From 94193cac7a66d8dac1ab4322387ea5e746460815 Mon Sep 17 00:00:00 2001 From: Lior Messinger Date: Mon, 30 Apr 2012 14:03:28 -0400 Subject: [PATCH] first commit : --- IwJPEG/IwJPEG.cpp | 147 + IwJPEG/IwJPEG.h | 27 + MarMaladeCV.mkb | 158 + data-ram/output_capture0.jpg | Bin 0 -> 190715 bytes data-ram/resource_cache/cachedsize.bin | Bin 0 -> 8 bytes data/app.icf | 25 + data/development.icf | 100 + include/cstdlib | 118 + include/opencv/cv.h | 83 + include/opencv/cv.hpp | 52 + include/opencv/cvaux.h | 65 + include/opencv/cvaux.hpp | 51 + include/opencv/cvwimage.h | 46 + include/opencv/cxcore.h | 53 + include/opencv/cxcore.hpp | 52 + include/opencv/cxeigen.hpp | 49 + include/opencv/cxmisc.h | 6 + include/opencv/highgui.h | 50 + include/opencv/ml.h | 48 + include/opencv2/core/core.hpp | 4345 ++++++++++++++++ include/opencv2/core/core_c.h | 1885 +++++++ include/opencv2/core/eigen.hpp | 186 + include/opencv2/core/internal.hpp | 707 +++ include/opencv2/core/mat.hpp | 2557 +++++++++ include/opencv2/core/operations.hpp | 3628 +++++++++++++ include/opencv2/core/types_c.h | 1876 +++++++ include/opencv2/core/version.hpp | 58 + include/opencv2/core/wimage.hpp | 621 +++ include/opencv2/imgproc/imgproc.hpp | 1139 +++++ include/opencv2/imgproc/imgproc_c.h | 783 +++ include/opencv2/imgproc/types_c.h | 538 ++ include/opencv2/opencv.hpp | 60 + opencv/core/alloc.cpp | 697 +++ opencv/core/arithm.cpp | 2716 ++++++++++ opencv/core/array.cpp | 3206 ++++++++++++ opencv/core/cmdparser.cpp | 311 ++ opencv/core/convert.cpp | 1215 +++++ opencv/core/copy.cpp | 568 ++ opencv/core/datastructs.cpp | 4064 +++++++++++++++ opencv/core/drawing.cpp | 2408 +++++++++ opencv/core/dxt.cpp | 2640 ++++++++++ opencv/core/lapack.cpp | 1765 +++++++ opencv/core/mathfuncs.cpp | 2411 +++++++++ opencv/core/matmul.cpp | 3139 ++++++++++++ opencv/core/matop.cpp | 1649 ++++++ opencv/core/matrix.cpp | 3777 ++++++++++++++ opencv/core/opencv2/core/core.hpp | 4344 ++++++++++++++++ opencv/core/opencv2/core/core_c.h | 1885 +++++++ opencv/core/opencv2/core/eigen.hpp | 186 + opencv/core/opencv2/core/internal.hpp | 710 +++ opencv/core/opencv2/core/mat.hpp | 2557 +++++++++ opencv/core/opencv2/core/operations.hpp | 3627 +++++++++++++ opencv/core/opencv2/core/types_c.h | 1875 +++++++ opencv/core/opencv2/core/version.hpp | 58 + opencv/core/opencv2/core/wimage.hpp | 621 +++ opencv/core/out.cpp | 307 ++ opencv/core/persistence.cpp | 5408 ++++++++++++++++++++ opencv/core/precomp.cpp | 45 + opencv/core/precomp.hpp | 216 + opencv/core/rand.cpp | 837 +++ opencv/core/stat.cpp | 1340 +++++ opencv/core/system.cpp | 835 +++ opencv/core/tables.cpp | 3512 +++++++++++++ opencv/imgproc/_featuretree.h | 63 + opencv/imgproc/_geom.h | 72 + opencv/imgproc/_imgproc.h | 47 + opencv/imgproc/_kdtree.hpp | 467 ++ opencv/imgproc/_list.h | 373 ++ opencv/imgproc/accum.cpp | 481 ++ opencv/imgproc/approx.cpp | 803 +++ opencv/imgproc/canny.cpp | 349 ++ opencv/imgproc/color.cpp | 3179 ++++++++++++ opencv/imgproc/contours.cpp | 1780 +++++++ opencv/imgproc/convhull.cpp | 815 +++ opencv/imgproc/corner.cpp | 413 ++ opencv/imgproc/cornersubpix.cpp | 271 + opencv/imgproc/deriv.cpp | 629 +++ opencv/imgproc/distransform.cpp | 873 ++++ opencv/imgproc/emd.cpp | 1162 +++++ opencv/imgproc/featureselect.cpp | 242 + opencv/imgproc/featuretree.cpp | 64 + opencv/imgproc/filter.cpp | 3063 +++++++++++ opencv/imgproc/floodfill.cpp | 1141 +++++ opencv/imgproc/gcgraph.hpp | 385 ++ opencv/imgproc/geometry.cpp | 331 ++ opencv/imgproc/grabcut.cpp | 575 +++ opencv/imgproc/histogram.cpp | 2631 ++++++++++ opencv/imgproc/hough.cpp | 1145 +++++ opencv/imgproc/imgwarp.cpp | 3523 +++++++++++++ opencv/imgproc/inpaint.cpp | 817 +++ opencv/imgproc/kdtree.cpp | 241 + opencv/imgproc/linefit.cpp | 719 +++ opencv/imgproc/lsh.cpp | 454 ++ opencv/imgproc/matchcontours.cpp | 198 + opencv/imgproc/moments.cpp | 651 +++ opencv/imgproc/morph.cpp | 1261 +++++ opencv/imgproc/opencv2/imgproc/imgproc.hpp | 1139 +++++ opencv/imgproc/opencv2/imgproc/imgproc_c.h | 783 +++ opencv/imgproc/opencv2/imgproc/types_c.h | 538 ++ opencv/imgproc/precomp.cpp | 44 + opencv/imgproc/precomp.hpp | 159 + opencv/imgproc/pyramids.cpp | 569 ++ opencv/imgproc/pyrsegmentation.cpp | 1880 +++++++ opencv/imgproc/rotcalipers.cpp | 441 ++ opencv/imgproc/samplers.cpp | 882 ++++ opencv/imgproc/segmentation.cpp | 541 ++ opencv/imgproc/shapedescr.cpp | 1306 +++++ opencv/imgproc/smooth.cpp | 1566 ++++++ opencv/imgproc/spilltree.cpp | 498 ++ opencv/imgproc/subdivision2d.cpp | 1494 ++++++ opencv/imgproc/sumpixels.cpp | 309 ++ opencv/imgproc/tables.cpp | 214 + opencv/imgproc/templmatch.cpp | 381 ++ opencv/imgproc/thresh.cpp | 636 +++ opencv/imgproc/undistort.cpp | 572 +++ opencv/imgproc/utils.cpp | 242 + opencv2/core/core.hpp | 4345 ++++++++++++++++ opencv2/core/core_c.h | 1885 +++++++ opencv2/core/eigen.hpp | 186 + opencv2/core/internal.hpp | 707 +++ opencv2/core/mat.hpp | 2557 +++++++++ opencv2/core/operations.hpp | 3628 +++++++++++++ opencv2/core/types_c.h | 1875 +++++++ opencv2/core/version.hpp | 58 + opencv2/core/wimage.hpp | 621 +++ opencv2/imgproc/imgproc.hpp | 1139 +++++ opencv2/imgproc/imgproc_c.h | 783 +++ opencv2/imgproc/types_c.h | 538 ++ opencv2/opencv.hpp | 60 + readme.txt | 60 + source/ExampleMain.cpp | 188 + source/ExampleMain.h | 23 + source/camera.cpp | 498 ++ source/camera.h | 63 + source/main.cpp | 32 + 135 files changed, 141100 insertions(+) create mode 100644 IwJPEG/IwJPEG.cpp create mode 100644 IwJPEG/IwJPEG.h create mode 100644 MarMaladeCV.mkb create mode 100644 data-ram/output_capture0.jpg create mode 100644 data-ram/resource_cache/cachedsize.bin create mode 100644 data/app.icf create mode 100644 data/development.icf create mode 100644 include/cstdlib create mode 100644 include/opencv/cv.h create mode 100644 include/opencv/cv.hpp create mode 100644 include/opencv/cvaux.h create mode 100644 include/opencv/cvaux.hpp create mode 100644 include/opencv/cvwimage.h create mode 100644 include/opencv/cxcore.h create mode 100644 include/opencv/cxcore.hpp create mode 100644 include/opencv/cxeigen.hpp create mode 100644 include/opencv/cxmisc.h create mode 100644 include/opencv/highgui.h create mode 100644 include/opencv/ml.h create mode 100644 include/opencv2/core/core.hpp create mode 100644 include/opencv2/core/core_c.h create mode 100644 include/opencv2/core/eigen.hpp create mode 100644 include/opencv2/core/internal.hpp create mode 100644 include/opencv2/core/mat.hpp create mode 100644 include/opencv2/core/operations.hpp create mode 100644 include/opencv2/core/types_c.h create mode 100644 include/opencv2/core/version.hpp create mode 100644 include/opencv2/core/wimage.hpp create mode 100644 include/opencv2/imgproc/imgproc.hpp create mode 100644 include/opencv2/imgproc/imgproc_c.h create mode 100644 include/opencv2/imgproc/types_c.h create mode 100644 include/opencv2/opencv.hpp create mode 100644 opencv/core/alloc.cpp create mode 100644 opencv/core/arithm.cpp create mode 100644 opencv/core/array.cpp create mode 100644 opencv/core/cmdparser.cpp create mode 100644 opencv/core/convert.cpp create mode 100644 opencv/core/copy.cpp create mode 100644 opencv/core/datastructs.cpp create mode 100644 opencv/core/drawing.cpp create mode 100644 opencv/core/dxt.cpp create mode 100644 opencv/core/lapack.cpp create mode 100644 opencv/core/mathfuncs.cpp create mode 100644 opencv/core/matmul.cpp create mode 100644 opencv/core/matop.cpp create mode 100644 opencv/core/matrix.cpp create mode 100644 opencv/core/opencv2/core/core.hpp create mode 100644 opencv/core/opencv2/core/core_c.h create mode 100644 opencv/core/opencv2/core/eigen.hpp create mode 100644 opencv/core/opencv2/core/internal.hpp create mode 100644 opencv/core/opencv2/core/mat.hpp create mode 100644 opencv/core/opencv2/core/operations.hpp create mode 100644 opencv/core/opencv2/core/types_c.h create mode 100644 opencv/core/opencv2/core/version.hpp create mode 100644 opencv/core/opencv2/core/wimage.hpp create mode 100644 opencv/core/out.cpp create mode 100644 opencv/core/persistence.cpp create mode 100644 opencv/core/precomp.cpp create mode 100644 opencv/core/precomp.hpp create mode 100644 opencv/core/rand.cpp create mode 100644 opencv/core/stat.cpp create mode 100644 opencv/core/system.cpp create mode 100644 opencv/core/tables.cpp create mode 100644 opencv/imgproc/_featuretree.h create mode 100644 opencv/imgproc/_geom.h create mode 100644 opencv/imgproc/_imgproc.h create mode 100644 opencv/imgproc/_kdtree.hpp create mode 100644 opencv/imgproc/_list.h create mode 100644 opencv/imgproc/accum.cpp create mode 100644 opencv/imgproc/approx.cpp create mode 100644 opencv/imgproc/canny.cpp create mode 100644 opencv/imgproc/color.cpp create mode 100644 opencv/imgproc/contours.cpp create mode 100644 opencv/imgproc/convhull.cpp create mode 100644 opencv/imgproc/corner.cpp create mode 100644 opencv/imgproc/cornersubpix.cpp create mode 100644 opencv/imgproc/deriv.cpp create mode 100644 opencv/imgproc/distransform.cpp create mode 100644 opencv/imgproc/emd.cpp create mode 100644 opencv/imgproc/featureselect.cpp create mode 100644 opencv/imgproc/featuretree.cpp create mode 100644 opencv/imgproc/filter.cpp create mode 100644 opencv/imgproc/floodfill.cpp create mode 100644 opencv/imgproc/gcgraph.hpp create mode 100644 opencv/imgproc/geometry.cpp create mode 100644 opencv/imgproc/grabcut.cpp create mode 100644 opencv/imgproc/histogram.cpp create mode 100644 opencv/imgproc/hough.cpp create mode 100644 opencv/imgproc/imgwarp.cpp create mode 100644 opencv/imgproc/inpaint.cpp create mode 100644 opencv/imgproc/kdtree.cpp create mode 100644 opencv/imgproc/linefit.cpp create mode 100644 opencv/imgproc/lsh.cpp create mode 100644 opencv/imgproc/matchcontours.cpp create mode 100644 opencv/imgproc/moments.cpp create mode 100644 opencv/imgproc/morph.cpp create mode 100644 opencv/imgproc/opencv2/imgproc/imgproc.hpp create mode 100644 opencv/imgproc/opencv2/imgproc/imgproc_c.h create mode 100644 opencv/imgproc/opencv2/imgproc/types_c.h create mode 100644 opencv/imgproc/precomp.cpp create mode 100644 opencv/imgproc/precomp.hpp create mode 100644 opencv/imgproc/pyramids.cpp create mode 100644 opencv/imgproc/pyrsegmentation.cpp create mode 100644 opencv/imgproc/rotcalipers.cpp create mode 100644 opencv/imgproc/samplers.cpp create mode 100644 opencv/imgproc/segmentation.cpp create mode 100644 opencv/imgproc/shapedescr.cpp create mode 100644 opencv/imgproc/smooth.cpp create mode 100644 opencv/imgproc/spilltree.cpp create mode 100644 opencv/imgproc/subdivision2d.cpp create mode 100644 opencv/imgproc/sumpixels.cpp create mode 100644 opencv/imgproc/tables.cpp create mode 100644 opencv/imgproc/templmatch.cpp create mode 100644 opencv/imgproc/thresh.cpp create mode 100644 opencv/imgproc/undistort.cpp create mode 100644 opencv/imgproc/utils.cpp create mode 100644 opencv2/core/core.hpp create mode 100644 opencv2/core/core_c.h create mode 100644 opencv2/core/eigen.hpp create mode 100644 opencv2/core/internal.hpp create mode 100644 opencv2/core/mat.hpp create mode 100644 opencv2/core/operations.hpp create mode 100644 opencv2/core/types_c.h create mode 100644 opencv2/core/version.hpp create mode 100644 opencv2/core/wimage.hpp create mode 100644 opencv2/imgproc/imgproc.hpp create mode 100644 opencv2/imgproc/imgproc_c.h create mode 100644 opencv2/imgproc/types_c.h create mode 100644 opencv2/opencv.hpp create mode 100644 readme.txt create mode 100644 source/ExampleMain.cpp create mode 100644 source/ExampleMain.h create mode 100644 source/camera.cpp create mode 100644 source/camera.h create mode 100644 source/main.cpp diff --git a/IwJPEG/IwJPEG.cpp b/IwJPEG/IwJPEG.cpp new file mode 100644 index 0000000..b5dfc8f --- /dev/null +++ b/IwJPEG/IwJPEG.cpp @@ -0,0 +1,147 @@ +/* + * This file is part of the Marmalade SDK Code Samples. + * + * Copyright (C) 2001-2011 Ideaworks3D Ltd. + * All Rights Reserved. + * + * This source code is intended only as a supplement to Ideaworks Labs + * Development Tools and/or on-line documentation. + * + * THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY + * KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A + * PARTICULAR PURPOSE. + */ + +// Includes +#include "IwJPEG.h" +#include "IwTexture.h" + +extern "C" +{ +#include "jpeglib.h" +} + +//----------------------------------------------------------------------------- + +struct buf_source_mgr +{ + jpeg_source_mgr pub; + char* buf; + char buf_term[2]; + long buf_size; + long pos; + bool read_started; +}; + +void init_source_from_buf(j_decompress_ptr cinfo){ + buf_source_mgr* src = (buf_source_mgr*) cinfo->src; + src->read_started = true; +} + +void skip_input_data_from_buf(j_decompress_ptr cinfo, long nbytes){ + buf_source_mgr* src = (buf_source_mgr*) cinfo->src; + if (nbytes > 0) { + src->pub.next_input_byte += (size_t) nbytes; + src->pub.bytes_in_buffer -= (size_t) nbytes; + } +} + +boolean fill_input_buffer_from_buf(j_decompress_ptr cinfo){ + buf_source_mgr* src = (buf_source_mgr*) cinfo->src; + + if (src->pos == src->buf_size){ + src->buf_term[0] = (JOCTET) 0xFF; + src->buf_term[1] = (JOCTET) JPEG_EOI; + src->pub.next_input_byte = (JOCTET*)src->buf_term; + src->pub.bytes_in_buffer = 2; + src->read_started = false; + return TRUE; + } + + src->pub.next_input_byte = (JOCTET*)src->buf; + src->pub.bytes_in_buffer = src->buf_size; + src->pos = src->buf_size; + src->read_started = false; + + return TRUE; +} + +void term_source_from_buf(j_decompress_ptr cinfo){ +} + +void jpeg_buf_src (j_decompress_ptr cinfo, char* buf,long size){ + buf_source_mgr* src = (buf_source_mgr*) cinfo->src; + if (cinfo->src == NULL) { + cinfo->src = (struct jpeg_source_mgr *) + (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_PERMANENT, + sizeof(buf_source_mgr)); + src = (buf_source_mgr*) cinfo->src; + } + + src = (buf_source_mgr*) cinfo->src; + src->pub.init_source = init_source_from_buf; + src->pub.fill_input_buffer = fill_input_buffer_from_buf; + src->pub.skip_input_data = skip_input_data_from_buf; + src->pub.resync_to_restart = jpeg_resync_to_restart; + src->pub.term_source = term_source_from_buf; + src->pub.bytes_in_buffer = 0; + src->pub.next_input_byte = (JOCTET*)NULL; + + src->buf = buf; + src->read_started = false; + src->buf_size = size; + src->pos = 0; +} + +bool IsJPEG(const char * buf, int len) +{ + const char pJPEGSignature[] = + { 0xFF, 0xD8, 0xFF, 0xE0, 0, 0, 0x4A, 0x46, 0x49, 0x46, 0x00 }; + + if (len > (int)sizeof(pJPEGSignature)) + { + if (!memcmp(buf, pJPEGSignature, 4) && + !memcmp(buf+6, pJPEGSignature+6, 5)) + { + return true; + } + } + + return false; +} + +void JPEGTexture(const char * buf, int len, CIwTexture & ImageTex) +{ + /* Init jpeg */ + struct jpeg_error_mgr jerr; + struct jpeg_decompress_struct cinfo; + cinfo.err = jpeg_std_error(&jerr); + + jpeg_create_decompress (&cinfo); + //jpeg_stdio_src (&cinfo, fp); + jpeg_buf_src(&cinfo,(char*)buf,len); + //Realloc crash? + jpeg_read_header (&cinfo, TRUE); + cinfo.out_color_space = JCS_RGB; + jpeg_start_decompress (&cinfo); + + int newlen = cinfo.image_width * cinfo.image_height * 3; + + unsigned char * data = (unsigned char*)s3eMalloc(newlen); + unsigned char * linha = data; + + while (cinfo.output_scanline < cinfo.output_height) + { + linha = data + 3 * cinfo.image_width * cinfo.output_scanline; + jpeg_read_scanlines(&cinfo,&linha,1); + } + + jpeg_finish_decompress(&cinfo); + jpeg_destroy_decompress(&cinfo); + + ImageTex.CopyFromBuffer(cinfo.output_width, cinfo.output_height, CIwImage::BGR_888, + cinfo.output_components*cinfo.output_width, data, 0); + + s3eFree(data); +} diff --git a/IwJPEG/IwJPEG.h b/IwJPEG/IwJPEG.h new file mode 100644 index 0000000..1e639f9 --- /dev/null +++ b/IwJPEG/IwJPEG.h @@ -0,0 +1,27 @@ +/* + * This file is part of the Marmalade SDK Code Samples. + * + * Copyright (C) 2001-2011 Ideaworks3D Ltd. + * All Rights Reserved. + * + * This source code is intended only as a supplement to Ideaworks Labs + * Development Tools and/or on-line documentation. + * + * THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY + * KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A + * PARTICULAR PURPOSE. + */ +#ifndef IW_JPEG_H +#define IW_JPEG_H + +// Forward declarations +class CIwTexture; + +// Use signature to identify JPEG +bool IsJPEG(const char * buf, int len); + +// Decompress JPEG from buffer into texture +void JPEGTexture(const char * buf, int len, CIwTexture & ImageTex); + +#endif diff --git a/MarMaladeCV.mkb b/MarMaladeCV.mkb new file mode 100644 index 0000000..a7dcb93 --- /dev/null +++ b/MarMaladeCV.mkb @@ -0,0 +1,158 @@ +#!/usr/bin/env mkb + +includepaths +{ + include + opencv/core/opencv2/core + opencv/imgproc/opencv2/imgproc + IwJPEG + +} +options +{ +enable-exceptions +s3e-data-dir="data" +} + +package zlib +subproject zlib + + + +package libpng +subproject libpng + + +files { +[Source] +(source) +main.cpp +ExampleMain.h +ExampleMain.cpp +camera.h +camera.cpp + + + + +[opencv2/core] +(include/opencv2/core) +core_c.h +types_c.h +core.hpp +eigen.hpp +internal.hpp +mat.hpp +operations.hpp +version.hpp +wimage.hpp + +[opencv/core] +(opencv/core) +alloc.cpp +arithm.cpp +array.cpp +cmdparser.cpp +convert.cpp +copy.cpp +datastructs.cpp +drawing.cpp +dxt.cpp +lapack.cpp +mathfuncs.cpp +matmul.cpp +matop.cpp +matrix.cpp +out.cpp +persistence.cpp +precomp.cpp +rand.cpp +stat.cpp +system.cpp +tables.cpp +precomp.hpp + +[opencv2/imgproc] +(include/opencv2/imgproc) +imgproc.hpp +imgproc_c.h +types_c.h + +[opencv/imgproc] +(opencv/imgproc) + accum.cpp + approx.cpp + canny.cpp + color.cpp + contours.cpp + convhull.cpp + corner.cpp + cornersubpix.cpp + deriv.cpp + distransform.cpp + emd.cpp + featureselect.cpp + featuretree.cpp + filter.cpp + floodfill.cpp + gcgraph.hpp + geometry.cpp + grabcut.cpp + histogram.cpp + hough.cpp + imgwarp.cpp + inpaint.cpp + kdtree.cpp + linefit.cpp + lsh.cpp + matchcontours.cpp + moments.cpp + morph.cpp + precomp.cpp + precomp.hpp + pyramids.cpp + pyrsegmentation.cpp + rotcalipers.cpp + samplers.cpp + segmentation.cpp + shapedescr.cpp + smooth.cpp + spilltree.cpp + subdivision2d.cpp + sumpixels.cpp + tables.cpp + templmatch.cpp + thresh.cpp + undistort.cpp + utils.cpp + _featuretree.h + _geom.h + _imgproc.h + _kdtree.hpp + _list.h + +[IwJPEG] +(IwJPEG) +IwJPEG.cpp +IwJPEG.h +} + +packages +{ + libjpeg +} + + +subprojects { +iw2d +iwUI +libjpeg +} + + +#### AUTO GENERATED deployment settings from DeployTool.exe. Do not edit below this line #### +deployments +{ + ["Default"] + assets='Default' +} diff --git a/data-ram/output_capture0.jpg b/data-ram/output_capture0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8c149318d19794929c8b10772633088cbe708758 GIT binary patch literal 190715 zcmbrlcTiJr^za#a6{IOhhaes4By{jgAVGrkj`V8i9i$2r|4Rr6iHJ!^ z$;c@vss0UUybmBGAR;0pCL$prCjK}2)xY}yVmcCfZV5F~20dFc9&bj;sMG>-UiG?O zCjD)51CmW@jd1j5EPP@k(C31<)8ki0nyZg!VCKyRn}AA7&&bSr{~^1usJNuG>{EHgm->drrskH`w!Z#>LCnxF zc4TULW_E7=`@-V-#^%=c&hC%B{nNAapT90HudaXp4;KM|=zmYy{{s7eaMAtaA|xgz zA}0GEE&{^9eDZQE=nXNYik7N`%qk3vVT`vW%l>P~mozFMQ2SDj{zSI9f z`#;J4-+@K{e?Bdu?fi+F~Sn`jxeHp#G_iRxw| zN_Ob?db(dK@mUu^+8|r)WHR2u&o)z-f+)VoB{6$D)6E?Y3+iJ6tlq8L;@$MBB#^Fh zxmR}9QBC1Puw^A9JtHL}?hBsvjiZS8P|dW8TP=sCh^>rS{`=d0(eupjUFLEWo$#5L z0>dPeAYkgAO<7+fYx=4vhU45DiWtd$y)_-MAZFNP-DS*AF(4CMM(uA7D>f)OTrQE# z686lPDPvxoo6@-!r9G;}aL8QIdaiR)o9@Xe)1v#mONvwaJ(>Nrb?ARD1^ZKu=#|hx zcX0JubrXe~Sc;9`$#23LSA$0TyPfM!(nBmCT+n*mIaU=0t2FY<-Pd8OB!1ItNjknH zCCp-4okE|y{TI-i-%$3p7T4rHZuGrsvQ1!DQm9l5IX1ML%%i!K_%Ys3q({4bdT1!o zc4Rri#ggd0e$ixVN;+cSMDmEl;iei{DI}TNsI|tR8M{AK0!!kQL^>=5p$*Fgi)S9n z$j~}I=$x7M&hE$wG*@(8mx>o^bl-XZ!T8h1bZ`26kzih?aUE?s#+}s~F&{*8Nz>h+ z-+08uD~q6_W))gH^*2417Oq$>u}VRjONC-Fs`UY+BgB9;Ht2&%!}P8DiT1PabH8`{ zN7W-q=wxFUxb$UT_Xz_^48zKzBdHky5lm-y)8SmPEcUZveUOo=N?--)a#a0T`hTi= z$!W3AKO#s7UWZS2aT~gl*7|OvQC+@=y+puIzWM1#%i8Q6VkE)-l6b~X%g!I1jw+jq zsAV)*Ep&q+BX@25UGa61L*AqhDlYuJywUc7@+~6ILNr!kNy_)iUcH(hpIXdc8jE<; zMYojOeUf)2ww!tisVjvH_^sQc8FD|qTGawLGs)ljUx5tku0G%fd$9LvzVi~`t7Y-6 zEL=Y1`}ZPfx=&{0&zT%%dm`Qg&fs`St&w_IdjJkqlsJH67}r!e60B_JgRlp z%C*URujM>HYv<)J?sogT8m~c^TOnpwHUt9Cdmwn^&k;1J^q?ut+)7mD((Bf0VDFP$ zhEdw3eOULSjCrxz9I3L$)q#aw%_`0AwTqfKM7c2kz%k{PKiyhYKimC4&2%tzC-Wvq z5p8^L_vpn5{kS&vR9JOU0@daB9!pf1q6>6LB87y^pK3+8* z=qG5mf3tH`wZpG39o-DB-YSeb+Kbv^g*kffilPVA)^s9*lPikmj#po$XOc2_os6>R4G-~`>)oo(D+S&Jh5;I!j88SuCfK;Xxgq!A;jyaS-X(>4e zxJkyc8wq9Mua)`sC$0As{#|jrxbV7i$Is{((V0Y;mprkE2=ADhXsOnT=*s|>91BeM zPy5zud-&_iu1@G3I9IgY4T^Q+3L*%KMC#n+(?7BW*Y$gRL%|{$9;)T=#jOGyg+^eX z`D5enC^b*wv||litJ*+7X-V@sX0~`UF&@2hZ*&HR6J#TN%@^@5q@8Oeg4?PCobi^3 z4e99H+oMmW-07Z^yb)g9JQ308K_Q&->?2X@{4t=1LycOCWf$BR+yDI}SNbS2V<|ze z8aXfl+ZWEBnH810xtNAO`rgGqFq5q8{kC}BsYpbbR2*hmt+Ha9^GaE+DAcOxhPLT1 zAUge)(QH+r-O8H|V!*IhEApOt0O*95apJmH!DqK7OG@{7DgR1vsHY_ZX|D2)PZ-1E z=XZc+(<~oZlIn3JQ;GtST@ysSeH6PyDvxf@s875xJRjx-_=KO55ho3x7dT{QEDIi@ zJUD~inxf>{=yk1eHXLO9kH7eV4A#=W>_G?l@ph-)@2v3&Vr5c0oms-u8nZ>+w8d)_11}rom{(4pu*e&D=#-(SU>m&T!#_n1e?kEFKV)*j=e_vqM16EBHoPqcE>)B(q z`n?P}i0H`>Q`s^yUBi221 z@m7_$fW%m}`fNS?CE9P&b!kxkU24;B4X(WUc6#V03xDbi9Wrg`*<8SS7MZ|S-A+^9 zH0yhaMEFt7=DW+BGi&2ec4=X6BeZp}x#dL&?#m3W`r%)IDhk0@F^l-}Me~>AlUpXu zb#cV!D-%nVqw!b76ibTDqeCkG$wYy$(X2bl+LHz7{VNDwzw>EO^r|99(6U~7zK!oR zXj%B$#mYkOWlKlftLqV&8GJlHkVHh27ra=Z3VgLH)VE^U;G!M33L(lJ|E9Ix%2o#^ zK3Tv$uk@OgMz=&z>D5>a9ek+N4?kLyyLc_{?jA6UZ;XA0FLv191!ZnDUjzxcYcgGU zrNIAW%r7VJb?g1}iRvpJ)2+C*UMWp(GKc!0sGZ~pK4=o_OGcoWmUp_y2o;I(xv0S% zWqeWiiDtoGz`ow4;w8gT^0wo!y_Zowh}%>sc6mrhw)uQ;=S3aS(X)k#`zj-7Y7(FW zN6SwlB6gvorM29VW;g+m>71qQ;vgzncM2_~n3th@@$wy2e8P4|kp+u6R-jY4{!L{- zUX5JHnn~;KrljkRgu&3}fgPuiw|=>h+r*si_Z#V@z)lqWcOyb2M27Q9@Lu)F6OxkG zw(r?5)|>&zI?RsGAfp`k)p#ULKn-5(=Hf3P!ERgQJ--uB+5Y~+W14sn5zqnfkx*Q4&oPu!#obHND+Cv;;Zah`_W4a=0v=x!(vp1bPrZB#vVtwsF$C@zQlO4-&JJqbog(i=8msCcoER z&Kg~TOOc~K;yuMIF8)X6_c{}!_Pi*ToJKt|hzLy`DUswhR_;{A#?f8EdDgFZ)e9|ro7$jTRomsXMpKiO9;N9#-Ado zM5==@y{;mc(+mL{WC{=W@`5MAV`_a9a0LT?yiCrIrpokIiz|Fe1kDDRRa*v>M67U_(mLT&A}fbd@n5xo$9ZQ-CIJOEHcAZPc_x3kv7`uR#zQ+sJ2wJJr?)H0J1fyftt(O#q~qy_aee9!IMQsX3L zvBF-Yc-8ARXI@TA>Y3WoHOXZ8Lu7tB{T97=FxJwZ7tPT+2@~=brJUBQjP98+o^3O9 z7Iv-7NtSZEyljO1KJZ_N$90Gx+)!m>GdU&tohz-Z;YV5reyd;7rhR_{8vof+vbrR& zR`lap1>idKxdU%2N&#i)(W}zN0tN`BHT%$B2{nWg;OY0$gujmf-vEGJ)EmPy?_vrP?ejPqTnIqsO zuCk4bcUjNwlGguOTN#9~Gig0x{O>0ZC)323u43%n1fMEK*$%En#pk~gc>dvJy(`Z|B^)yDnljMGIWth$&y5z)rh^?y!7jseutdf0UzgPmt?)OF!BsXT zx>B{FkJC#6za31ntaNJ>&gM@|-YW6;3_ReVNROn|WgJ9Dp%Lzk*q&Xoq)u=_H?BN_ zHr>;cbd^MrRQ?uL1i}RV$DB5~@^lEiPj;>#o90bLC?gnY9p(J~<29V{<9p2|S1yMq zqxtI<(`IX_`A33Q-Ldb*ciC0Xb5dxQ?ORztds}F7rf9BKqHGPtYW{RNvY4GMt^7Xd zUTs=a=Z~6r;(6qThd%z8L3M5xru)6Kq0QrX_%1v7e^!H9-a9(s*&)7(b}9{IO0el= zG&$Q#MK+W~!tlQZp$^An7qLzsw9(T~OJ9`1e{*o*-$Q&fk_pBxKBSKLFq$j2qv{xn z5oraB^hWhOOYE`VKCEip<=Xa*5zF##$v+ZCXRc>LnaEl0Xyur9$v*e&1N!c!hC2%l zuOi5c@MqYK(qj4Mt!#<*GqR~13Se(H@|>Wqn#z2i4*4p>Kws-qN8Z^RADU�lrZbg8#VXc?z zPsAR2i}u>KNe;f{YbFD2iUEdnFS2N{>P$fq6fB}k13QdAVRBFrKhG6~(&qY*D&->d z6q;8XJW$48RRL#!Ew`P`#Wt^gt3T@Ou88>yAaS{pAs<_1oTt1L-_I25Zx%WwnkOQO zx7=F)oEp(<6;9;g*%0JBNQWs7JGFAvx90~iWttMGgesWTHS7W;UTmB1!sboWAAadm zb6ne1pi6Dwe1D#IJ)sK%5}2l4xtpUP&K;?v&fB32BhJ9TfJ#Z`7y!tT;4^HC(4AtV z1u?Pq9TcS2$;TG%v_17((LS*{yz>>jT$!(UDtN`(0$9Q(X1P}zYmr?_3Dd2B5{;>1LEVHiDG6g0xfmF}3VnchV(4A_?tQ|syR0Z({Ql;_`3Zr{k%jfr9zq95Ct3Q+1rWpmf<8HYYXosp zg?bG#lhSKTZxNw=4sU|6%m-u~3dwGJyrfYiIiF{52+A=l2lCc7SFMEXTLUk zUr|{5wI2|PtD}4x)sqpDj=h}LvCeW9RdlAC$w}B?gVk$WlIkKX^*dJY@dd8Rg;f4a4|Mpe zTnVrBu%;VME2?ZLeB6!-uK9VD>SY-H?4cC3SDnua-$a$%s=Eex7{j(usVY$G~5vUV+k?9oXDoSru&m z|2bq`5mS+aai9wKCK7y%7eM4X3%^ufMIExvxtVU%>8(C@IYqiO{|i{P&%H zeJeF@=gC8HRC4`>^f!y+^7eatdiaqys`w3-O*T+~;mWD`#^nlr0WQ!UCj6|BX*LH1 zj3WAlkBGO}4tc#D6|!Qk2m-}gM@6@yutX8LJaGV<=!&W`UL;d5%Ptaami&gCwbsE{ zNqu;teUWC-8XSZj2q&qsSEvD7Ka>&d12?4?BikMXb zeBv^FFWzdee@V|q?Y(;Rt0{gzKC#<{o#ygI%ZyF_k-*oFZ;Bp!5w~JC8=0Q~AKFAc zkJdeQILq-&USxHjYko-RlF8SWxOPOjV8)#`YIrt7!eOgF+Jav;JtQCO1|K)BSUoi> znPxFjw!IfGBv~a%`3L9S!;H0$LE{pQ_dN7vGSz#8QoEjI3PFN>*UGn&AnyHn_V7ki z=>p1x+K%!PoljjBp%dl@5eZ7u%g0H2>Xawt@(75a#^my)CBwFt$Nq(THnyZNu=Tfl zMfkUnFuB1){G~>$pNN`*+AcM#LPiz~7K_y<&}R&vlatjWZ8*L6E2ijP9P4~6m8}-% zf%5`QUY%cGHG254Fr(9`z22U+-+O~4Rp-{6GY~Z< z?zOr1^;gZ3ovg+GWn#>4CvQB$7rT_Mj!ympbP{&WyAxEX*Q`X0PxjNkXgXF|C^bxQ z`=^=<2da*zCkQ;{VG8>1?W^mYi|+gXJ!LH~W+VLKL14mL%JeFZVn~TrFQ>*WyV6XT zIL@Rw`$5vq<9;#|UasJ%5gXf#^a9Jz%O(kXmHC5TGXw;_Rd&k%G%6E5J8+qfM&Os; zg&u^`WST|hnoFe%3@yMUEb*{hZ6AE=i_mt6yl~gs+w%?W%h6&~$GKK-aZ4T~(~OOq z=By*9St_bic5uIV1^?Y z?z(V?7n4mt%$=&=>DJM0$#~=w%ccaHzJSUZ7|s}_xe2aX*0wd~E*bxKiRynTr$EBF zIt0N4{{Vw96krhG(dioF{LL z&Ta)8f%A|{?XOj0v?8HBl zM+@9C-jdWM&Gy(%MQK`+&ul8qAIst+PiWM*h>-I#o=tZf0`|lMYGU5!_gcwzi0Qpl zS@2rBC4Ta;hk^8(2$m5sBF)Pt9M0o;5sG}XWtXTVcsXO~UB1jq>5_1B)K*nl(1D;J zA9JVQ-0=&(6+OcYt-g22HJN=r##ykH`{QH2ip7hAnq4E@`(IR*@!TW8q+$+3&y{q3 z;u=Gaq8f@}jPMUpp`l?aU{aY|?%*P5CZ%#z3;*s>%=v>r@&si&LS%4O^dYb*nqZqt zk~npklJYu=GLBN*3)Cwbg`x%DyJ|muq(T(rdw5a!V5SC_EiLsQ{#)_Rf6lu^V^2GI zGVC97;r^RiK0E_UfRK+L8_xVG7KVkT7;ep)erho{awuFe6>dPe{V`f#$TgGUbfrZI z3WdNHwKKq?Y*WycLH^K^(%mmzU9(L-neOD%vv`@FKevai%;EnV68{SCY`1ho_Tb}LP& zJSVNn-==OSu7soYV3wth%%;12fE0)84nreY6}F{wjOdw6ALme-k}6ATG%6ZiGt zmgRDzE~|fn8|O`4y(oet6hIB$=@hpXH~73-z5A70Tj@Fewuy)Mi&CvS5joIoz!!L2(4KaH$`x40Wd2*yfnEaPdKoY9xU@nj4P@h*)NjGZWBrqR^ zHZzW!169<19?hj~#Sbi(T-du0@~<2;<+vQT!k*+}b&9h)OwO&8UPY`YxOZt_$bWPT z2uH_*S*XFo+p26{pmB54t*0|TcNO6H+*#F4#rs{6yB&Lv?0G-&1MW+t#bor7mFdDi zY|S7?#F-yVsv#p4lF-iGDjAGfN{*Z>Y7nuq<05_vVBs}_>G5^OVO3gadxpZ|QE^6V z4s^x;tf@BQiubveC1Uxh4PapfOT=us3ydV#_aal|Y>IGFiKIW6pUc0mXolc%$&{0n zed{a|aA|Ck)#T~vOESBmRnX(_=&p9jIr^A?N!V)1C!$`iXF-|Yt$IVphw7atu`2~A zbN$F_+O*y4vnDYe_EEq0%aUD1OChUZsixyEJ$<{spw*%X`=ooyi*qAWs{+-y=Eg*u zrXidB%A+W)Npx>pH#pJ>lKIg-Fj-LtE}qxa~sZ)6N* zh=5)~oVKT|(iEVmY!zdgB2TJ#erw%<^YT~MrH0>`1`RKhFPq|tSTzZn?EzW{G9#$@^YYQRC4%OP6M+pIsQrcm*2S%< z5)ma+^^6zQ82?~5b*ND^Vh|?jF+Svxv?AZUeH!FBa3T^dO!byMy&y3-%{{|}#8QYL z4v>qhWes<--0NSTq&pm&GaxteVVW}PPg{AUNEgSIX&yE8k7`Q0gp4eu-i-cI3u!1OLJB!@XwYe*vaFo&ely zAfN)Yj`{FqU<3I1yPH4TT7Bl@W}XZgguQ~hl3)0_$B~?SzOG0UEcTU&O_}-o&5Xa=@mH=%+8SRfw!f zT7#TY{rA|4)oAgf%{}>F%v<-xQEIGgxM+17fn_)g^p9$D+;9B~zgP4;G~`Kr1KYl{ z2?Gf}YReX2ezJ^R*NqkzIjn7|Mg!3fUt`Tvdlw^RQ-Y94I!M5#$Lydn>$`Ke40eqy zn}Zg9Ca|4Tt6{5!*G~UntEjvZeJB1Y)@Q#mQ!kzVohM2!IxVbia%Ii072)yewxIcI zn31>7NZaGGM+)?hGv|VMmttB!6eq$rR4J()@>+PQLsp*jafqE8J2ZYUmeU{QN|CaO zhA5`7I?ASK+txKU;XeNnbpHg>PH(b}$zSfz0z?Knh=urjh?A;ut3_*kHM?EPdG-AV zZ#0{&>b2?$eF~Xc3k?|vTirb;@<-0(Mj2-GuKW=;s2J+5ALb!fGG zSie;!u(&ra=<_3IJM3V#xN5EZ>`833%=lw9zS+hZf@q9G76A0`ubTT+K}1jpw=Fs* zZm9eou}5%8de|4$jgRU>FKcv8hSb%?ImSpD0eXt$!f;(+w)qB zAAlCSTLDe7rO2}ry>g~jm#teJEAzci4*Xq1nOh_z+axtgqanC!`qr*h@M|@TTA5y( zw;t*eaSUBpYr1*qtv_<|(_Xsd;Y5)#EEVo$ecbLp&MI3yf(rFpn*0l>ug#5rKk=`I zqRPdV)_esZK943i-kZo?SmWY~9wA*`@BKk$)pGFNH!r%)Oh()24f$amw_=_Db3+>0 zvdr6<Pn5UP z95{E;+Tq14<2D^DV+1*ipmaM~l`u9B1|Ai>6EQhWmKuAtj37=$asbGf{|TFO6!hIx z(U?e1ePhgh+NTpnUJ}XHx|TLM25V9+=k`>oDU>)r{!$C&4@Xx!l~~{uT|<+&Y!VC^ zu*m!*`~xN=D%w3RIKx%*gZ?F|VaEB0XWF}6&-%<&p>(P_5D^?+8($)*otIlvLd7q~ z8D)Lch-ffhdHM5;bl{F;B{xM{^qATN0kKfrB(=moK~E1Tds+7pt;{vqF;R*Zz&k2G z+%VvhDSKnX`1zDJGqD66s`KIm%FFxRCXfaEY3T^eVHT*tPW1p7*H7w1>}Tol`Q@%6 zlH-%z0fcd5Z)Ufg>2O^?3|lF>RhWHughySQi2#Lk%%+;#$MvTX%_CDwY1qon=>XaB z!Ks+HP`L6b8T(t}INqd9c6o18(Ml}SW{h_|QkE0wn?imNgX9?LC;O`2|J?Nj-EulF zna~m(LOKWJ=B*@F(gZ7-@KyGfmu&HXN|M0Jw!)fR1~-^{H$9a1;RO33zZmV@?Req7 zNPC!<_A=BseY*%IB&oAjMwxfn$@}ybMfgbEE{PpDJk6aA1gt4ggBW0KG80P z%d=(T(5R1Wo1cFufh$jfKF}pOI5KmsCy(}Q`3{YVOTGf^*1iYI7d{;jgGFTu-N@N& ziinfy>KE~M(Fk(Kn+ARFtfQ7BAZXd#-ZsNxapyxelCHJ~CTN@1!X=nNtQJhh=Fj{w zcY+%tS)etWu5>BSxW=fMEW$;353>38VVS>5Tf za5A2nzkoi6VSszNWQ&UbyAWsMU;pOMzxILFufgYqgQIvwA<+64AM!8<8~>rgBrkS0 z^L4J#mpx6&`c>q^AkbPIG19?#hk2uN$NRW-aoSq*5{pb$vT~g)=3Qme_ovx zxS$*f)8iuio=}ovdE=aX>BW!G!mHVBqX=a)LK|fL&^h&=?bY{3y<}X@L|{3t)3LG` z>2IHiSW@f(o5vI^QP|;;Jc{cm+?8QK7tgCDu2ePQ#@lipO)QrHIfV4W-p^;s;EacK%flt!D66v|#RtH!VteIIt=$hir{jxS zEFN_Amqz)){LDQs#M35;X^MEcaLE826^pPK#%;>&C_5<>4S9fF@FsC+p5T;gTVbpy zRXGu*WbtMfz)N+@JJNSBSb2$EzrkM2(>XX)YotVpvwf5q)aIS>b17R}!}klPQ3FEW z!`O1a$b)qdn3rHxRWn;1%K0>(bDtz>p)dhXpq|hZGdU6)MOLsL&t^Zj9Ii0KL^v)@ zX_;6s%XmMCHEhUPG498Jb6ME!G)I0#bOHUpqPT?!$XlBf}y? zM99vzx@#YC4(skG>I?iI_l(Q!8NAXA%bN=`VE(}AE$?Qn@E}thSWC^>!4P^ zahu7ZEb+Y_e%?&pykglPB7UaxnGlc8X0JvEfuI$ca%wLc2sTe03c6O^Ed@NTialj~ z^>XVawIJ;djij=XSx(EHSkVqM(`&&^hix4n-Hhg70St}cpd8EKxponPpXtjQJT{>h zF3}#Z!J;PI$uyH5+}N3AQnz=(A(Y1sxz!YzWNyiw8Clvg$}~wePjj|PJ?S_5u zP*tj%N1}Q-`q^05PSRJniAJ47djOk>{bT`#Tj$GX5_vw#UQM$dPH$uf>6F5{#Ta?L z_v$C!AXwFUfWDC-8@3xySUm*afwEnndcAMz_NO#V zPix3~?4iBBD52}zIaR+@K2rw@!m5EV|7xHQ3(W@`mTRgye(sh3bm$Ulb6n)j;%8Cl zS_l5-rHkr$xtov4C3{l?JFC|UNt`%N`e5ln#v1=3ED0{W`=&#CQ_NW7olgcyV#9x* z@5L=`gN?Wll-h?*~`~LbL%NYjvLD+dO7uq{Jvbp>^#QbjGbIlj{!E zfMm79it7~X(rfR5TP@S`#z70qg2ufgVKa+6b5lN+wd+gS=Gu-GIzKz{cV!LpHR2su zX50~Rv!bllzv%sP-+NxB;&=5oomH3TTAV8?0}x0^o^QU|-V2HTjbkRw$jmr>vSaqo zkv31b$*@P#c=5o+Mh( zOM;}JL#d=)aPy0+0$VzqL{CQ1q4(>%!tPM#x1ZQ(*-a@GNT0F2RqPqXL3GY(|z1|wrSS+{fzaKVpqT&o`gS16MJ8D ziN(&lhbj3_*(;F!#V1v^igKd~f~|`3;VoyjWc}b;%C^&z;i!SkJ2NWu^=pTkAmS*z zZ;}?fE-%N>P<@ZX`De9Zy&X1tFtKyD`Vc?SHb>*0d~mh7;o288BiN&0xYc~yd_?7; zB>rdT+r%(dkLgl$17Z+ztbOeC^OcGHkJ6dO*E*O}7D>oD8yP+c5{4Q=IxD=#`G70v z-P-cOEk5GKLYYKp^t!G-`5!X2`N)-9SUR^*AJ>4*TT&k-HSjQ&jUj1>ZBF_xfYBqx zLoT&aa6)l_2uaUEl)+0x=CxTyJZDgr)K&}kH!M67FmlUPXk_Q)$vC{w8EpP?B$-EY zQ$78P;ldNrYMuEmROvAFCvdUzg5a?Rm(3Y2;voxe7mQK6JL#=*p0AC`?fa4V@t4ms zN4qKd5|GIrlC(-8H<&Y>;(6F#QTON2D-%cv@7>`cd`Kx?Bt~Tq+(frrefxyb)~s~B zz_()0UKV;0^e&#k1GebSBA?o)z)1`ScG6b_V4Xs!`t<8n?)K2i43O<%M@CNDO7#OH zZ)T7{MOra_z8-D@-K+RAwP8V}+39N1SHWr8>MQ=uN~e8&b6Sm$i965=@O{{dOoViYWl?I-wxSiocAgFM zj^m}%r|TA}**s(@EA7-@NDwB|m(2D;)Huv`&X-OIKCVSr&9=LR9xl$V+i7)OYwe!{ z9ei{-TrGbCNKKT)NgOS&l5TwMG>ut2Uf)+oeJ65DO;(%avYC%uzO|i`BjUSANlED; zZTMWj3}xqEexnLF(Zlu$naKx4i9vqq0fdr_|NckbIW%&e>HZFcT{SrlX@|7waTf^GtCx(moF_Upl!O!9AZ zwzDU`?6PnUaMIdl#=m!E+~uP0p~DhH^^B!$IHB$wblihfiLVnCX<21edvxC5SH6_7 zv39M-f9$b~X<{YxkLh?7v&(zyvTFNxNi$20@$0O&V=0ih`0 zOSO9wzU`69p7|TbNn`sD_CDy6Oz}OJM+!gFEwGyCoFFAcgO#~WmnEPd+bIW_oHAGH zGJ(03+$@$!R0L>*9jSp3^BBT(qJ;7n@2CvSYrqFy{j(v1PXIFRcDTtu+fwSx>3HaT z_-Uotkrb`pp0G1}wzl53BCvl{e$Y0bp!jZqbMg$&0=4^s@S`#PxojF!&kH<1XBeIo z=1;&Q4^$R`&X}*-QZ|Bi^6jx)b-P_~sLZDPCGzF=EJ^{IYvMrMP@uQa@!3?|#E(nd zMts++mp4V2BjN#%Wje|*F(V*TjNAaFJF=8jqsH~ipjSx4-%euM*YNayEX?OM7%i}P zyfOzn%b13)DB&|E0<+2?98xDmT8dXe@b|r%loJn)oxf7J%4#fi2B#kCTQW>YX`%yx}}&3%B&x6 z3aC2V__waN;e2no@Ko)*XyL8jcrHVOpdFlmsfQ>mx9m~D_jbFh#P)X4z*7;+s| z-B}5v_b)dn-DJ!)_v^<30-*n)J-1 z{k>n!?XQ&D@91xKHHAZWP|$M@qK*f^O2JHK9|*-{CM4WLjd;UNnZLUwan1_vl$hrJ zsz3HRi-0(YneD8Y9`F(wvw0RYJ|py@`f0thujJ8ltqH3dVyzaIh68+$e08(37JNR5 zxw}nSSiraC84bZ`#(n9|#S~<5A=*4_Dz;1vYgUs>lVz1EW8qTlYHi$@JbI4@Ql;ku z`3qPn-9)FrZ~~M?Zl5@wR$Bq7jkJ&g3X^4qeTTghpdMoMnK(gB=D{HSpJeQdx(A{k z%=vsJu1(43<-}5lGO=UN;H#!M#2M%PeJAo2(|YLRjUm2dt|l7OJc1RJ#B`01PwkTc z<-P{7KRcW93F7EhbFak3FS9=~=YJg)#22pn714w?6ab4Cn=Xv4#IijW13i(a)}N<3 zQhK=s=>REP!p?wb2GPiCR^MT&_M4({kD5opWW>AMWXuPt$Smq$;`6VkQw_C`Ka8Ez ztBndkoLJ*6ab7+&P%wbR1`4Y9t%3aAxdna1#(Qt%<1TAd&-3cMGUdoK_u)^KGg+dp zMgtX4u$Y;A0(^eEp%&QOvGhgPzU{S}dRl@j)*^GSIOCVDMxeumZfRSt(_43r52Y(F zU|MVN46I$)qsysn@_In{jdi zP*$5A`SSHkLQ5@D8s+D*m)uc68gWSi8I%6`$9+ca-;vnX6ba1czqXqy2stp4RE^7C zAzo3)IBeu~us!iE2p5LmRt?^zW4%{K0XW}zjdyN|agNef;*KwYLCJKQ{x?Lz6m zM*HPOF6XC(M7)E)rG&dA4W6Yw1(@#0_7#xywU97bYup}PC2qPOKe|dd*3ZCht}&4Yyp{DCb$lM}2JyXS^3o&~*m%BJjAG%{>A7F+ne zrlQLZ*M#ehLHOP5ugMQjuryba@u&2D37!ChnR|zx?rT+_q?PN78ChE93%g2hJGc34 zpY8dFuDFd07;L*4@v^pqWys4{mU0Z&a=ukIHzA)2a|;>9FXNiSeopF#`0ujUO^AbwS%%45`GJH4N^Q={xgqxK?zNdu zEQAj_8y0RaX8o^oK*ei9Z9mpBCqq|PGLC0j88Y9;^(>PU$5N#s0~L3ZBz316=vV=; z<*l&ZB>6K68_=)eU4C|9lH7h=O<(nl2V2a{CbLotQCY3ko91+~fD6Zo5NeKiVzwLa zJc509%7(iKXuIqqhsCDyYWk++iII68QSFRorySV+yvNET^CgXICHTe=bAKWv6QmgRkS1nazF5s3tn9TJ2q1tg#~@dqtN9 zCV`z*Rfr#V9MiW_AHN5^=s)0ViYi{>tB%cqO&umNo(+_|(V3@@#+aGOeO9*C=hK=u zp1^iTK6Kck_c39cj1DG(BC_`i{Rz+Z6;dJKoQ_b*@D>;oW zGYgx8KMU@3nr&02@ewFY?2+NYO!|uEB<;nUzM|L5Cer9uLyP^ic3s5LSp!3CP+t{4 zXwJu@Ow!fYtjk3Kg8$uw|J8V?$s?)Ndp3ZSe6S2Uj;3M0+^4k>dQvA&unnO|;Th?| zu&l!BY1ci|QxCD#f0JzNvWgx zgo!Rrnv?10m6Tx<;_u4!W8o;EhrQyA=hRGY8B<7iMvzMKiQW;K|9D z5I;XLlOe`=&H#4IY<|ln7CUSKle%t*ti%p7MlFj0`LN|D??tCB3|duJ;*!SGfc_*i z_aSB>(>3ATK{TRcGP0DCWutX1AE1`04K=>)AcDGSC2|>>+z`vuAh(7B*fKu5TnHh< z&bB7g7!a1~8rV1yt6!OxZWQj8FyC-(_h%)Xv8X68ttoL$N2~A)Qp?;#1-)05xm}(_7#iL@c0)9YuCIJx&DirC9259}{^xH(IZM=xwuvD5W<0eL1?CHZO zz`KIV%QRb&lA2)~U}cdYZbRBDG+m36#uUI4k1psoQy_$428hs6De_(??~>(9bBwqB zbG7;eEsP#gHRWY}F+MJ!so_aytz^?iXlP)V9P;X}3=NM<+g#TWf!2LM>dKcTUZ$Y8 zKFl$x8G6xhIR1L+GBMcXO%?sz_Lj>j^-|M)W@4~tH2HIJC^I#Dj@ZON1*1ZB{jAP4 zyV4PkrFQj<+X8%R5bGg|4EKVS+dyrE`|g0Z9^d?QlpeQC)lwT+X;Kd7SYjM3G7~K^ zd<3S0(S0iEkzT_rh2j+@;2Awb#{Czl?2G5+PvS3AI%cx1y^S0H1PhJS7jBEl@_vwE zUOnwxe!1u-X>Pr*w^EzsTxKa${`L)7fSQ8OQyYS4NsCe%dA2iQy<(ao8=wt9Q(Zo3 z0nBmhy;O@CzkvrC#yz#KSX$N;z>J#Ja7o1W(68I4$T!j0iNu%kE-uyd-xvlt+A3S; zRIO>L{#k}zFg|A%aLRcOOvUou7l~E>%rEkwtDcYKq5hZ3d%)IDquGMBvF8x>oXZ^i zQNy>sjS4={*J`j!`E*4IWA(_&*3noNWTkO-&EO!gPi&VUI&r|p`FpgN@3sA&*|h3Z zi_0b@vvX!I=X0-*n@H#8sV<`zi__}Qq=H_U+E7^4hT3coy~uKxj*21)ln@-|4oI9? zN?tsGg=x6s#%+y8?*7y^tVz(|$f-mJv<%N^@eAqEfpo83%Mwj9uZ2c4vm4>T>+2(H zV(%{7m6HU227W*KU1~1~dGe&11va0~PwR}3>6S8{nhQCa8vEU5@%RI;lWE7GNHGri zY4fHyEfu{90%L;8aZTcz-X=ZFWWcRUKxEKRP0%FMnb6&A%9Hm8>pI#&6W{poc_RR> z#G5A@z-A%=z;k8*bY!>t$d#w+{mFl4FdBQ;CmY&6R5fSDN_r(dtXPFR+{oPt!2Q7Q z(>T;lV0!19UZnvQ%Xp1NBfpIP_(~~$YHqYJlSm?>y*vx<9eWKdp|Jw%n@i7+%QTH@ zT-3~{2UH01vVLYvwu~nDO*HhGZ5k#9<)XV)wMmNfp(O+1LACJ?e`@gS;Iv*{Ub?fA z#xqEQ7xTm5C-1(b=5CGxse@m>ZW3%hdR3`F!O*)$t{)M|cc@`X7u8VSVgsRgIb4J$ zh|0j!ILB9;qlJu%ScPGRr=U=^(kk?dzi6Pjz(raByCSJ9qord^E{hU+mu3Gt013{` zHn$R}w9wJJH2j*i6j??FOgBiK@#Aibq#@s20*`dpyN--~BcbDxt1I1S z(}~psbfN?18AlVRqTM;4F&I8|H5&>^so*dn;wRpX)B$wx=l&6s@jnC6T57$OI7JeoKTt(dErk zQ<7S3pzqD-5rFs6a{BH5YeDF_U0~*4D&HJA3I|$R$bX(jaB#PL4));fwJ}mp_Zc*L z;{p4*>jgA!Y{n?eiZY+frSu38+P^BlPLACS!3%wC!BV8qf;fF@f?vD9gkX$&vtj$s zt2I9~juv)o+=$+`FV2xMn1L}!rHtA1is=NMv-SlT{0<>p;@g6d8JqamR53A2g|ZZ(pP*WUm@1rZPM<7%C%o!ZYDVV;M z+RHubBS#R^lo2kz7H1llS}ct6neZpnA|g&~E+D`GVAQ*Sv`7wK7M(tHdYNucW|`uv zTuqReMQM8RQM0sN?bM=W#PdOL#n&=8y>X(9d|j5D5cDtLp0-XvX1UAlA9_~Jr3TG& zK{9S}I1NOLwPmwLV(8Og?UxM7fD9i1h+5wHn;OnJDRi@3=HIr@lNNs$v(rKsYGIkeQZm(G?AN0;5E6~~7#(bSY!?XTsyhczWdJ0^ z!iIMnHeW2qD=|HM#lD!3xXaw7QBv)`ro<pnESN+r!jcKTE7eN)Op;7^shY)L0!V z{oB3Z@br7?eQq)}6#1wRXo5iHL`>rA`8Z3JqaBN#(^0c0d+&Y=Ga2XBe1_&}#0%D^ zd-4p_j?bXwn0~;C+KkiiKKZaBAq|0k@~P(l zY+S^yH8s!AKEU|De6zgEoS9zotOZbKSE|6|MsjI9hOz!T% zL^HF^RIdD>5yTjplmcM}HP^#!~yJ=wM1b`q_Hjeq)%L;a8)cSAL8%#vZRJ!JIreb^V z*@sy&#Io|q_oq7&#TgOHS>nMj7K50(?M{V+5I&C&z=LKkyZl(jMeC#KGZRN`s<-cS zzpK+_C$}G!mkWx$h&yiEUuxFQE!kj}G3D!vMM!R2dt8hbn%69G*{&-+12s@W(vxTE#S33T@(*0NuAC zgWbr?xk5nsN`e6AdpcWuK-Vnfj(A$Yw0YXIS{sFYt0?mku844?h9N>@s+4e!mxawR z(v<1@YLwybsLq`#nsT*%YKb)z_p(-NUAFC|hs3DGx>TvoG38Np8@}7h38vk((fsbu zrQ_42Y)OnRKxGZi0quam`)Bj?fA(k&T;t{IfV|+6cOFR`3>oRCK(00!KaAB&zeWeMi!3Kw?i7eoE*pyViG`kTZ+Ryo8# zleFbkcGj}YFjI`5*6LP*T$ZOAs!@~W)moD6c9ZMAs@OapHOaS5gzoKiU9L@^@;2|) zHLdz?d&G$uk%BKH&U(IU7k0Kh8mI4E=Ta50?vaU*<0DDt@TQg*Z6N$+iMW;%F0T{*>aP}^r!zGZcP`E{g~x4Tci z$J4e}#E+Q~81@Ria(Ea`!#Nxro_%?%(%2`;Bm<5}I5{9=BcR~tr_-)$@$XpW(7e`Cptpx?-ZhgMLSplPU|4*^ z%;kPi>(mj);aq59BZ#R|_EhQ4b)DkeQ}&jnd09PEPS#qrqaGI>8jfm}2&Wx6DtocL zlX6^_l&yDkoxOBEpPNfU2}Rn>8;+yX9WdA(jxZ~nzSCX-l^6tcTwr$~U=LoL@1=I!$EIA90N4mJM(#_t)-72UM9kqFMq6^0i) zo^9Z0%7Rn?Ow33QGBcCW41NYxYEC$xf0jPgK~z+tl1->{QG$MM@@@6pbnuwX>eHc9 zRMeqgT{(ZbeRp@(-j+RY#c|wP*-t!UAS{l<3V1|!bCL65KrT;Q0zFN9Yp#4uds~^& zFQdXp%8N9M7y!EwjqWg6M|_<21Z^GfiM}rQpHtHA?eAsN7AatG=F>`s&P76`v00W` zc5nzT5EbFK1>PIvv1FGgZT!zPiWOa%hWR8IR(32H7iI;r#m-JnS8j57&IXlLTAniz zS}r`Q3S8KB-z}5of|8ndwUW0+?PH=i6;d|E(W3^M ztAGV^2g15stgOv+2szv*QNw3AQ4vp+bCy;B;PoV#zu^bboFkcU8S*(Z!lN17au&!Q z2pK+L2aKu7$1Pmat}2~KRa2)+mzna^j1|`_ifZZI?{#L{*wtk?2>bD-)RI?DbrgM6 zT-?99`XuU!G`Fr$u`HKnHanYeP|?PpC>#Yj#~p?Q(fl;f+@bUJ z2*%w10LR)%$iO5k+9GWrk;r@;WVS&(OFpLQx^(@z<%*|Sa>Bf2Yg$m1^tP*8wEqB^ z>Nw@terBYqRB~-aG^C+^U6Nb0x3j-xy5=rCXFRU&Df1jDC|hn+bX*eoP)GrSx6Oc9 zu>%mVjc)=FvqnigF>tNUGNp5qk-;U;anB(2XW`C;B+VM!-)!l>%!rT%NXjfRu5rOC zS10BJ04*T+X`m}QNj>p}IbSa3C|&W>mbbV#R{2gaSn?QP#B;-A=P1kAM-f)7M%*f5 z9NZ%tMKv38%KB)uT56g#h7S!MRV6z@>AH$ZrKYWWt-UOgX?b?zpT-v~F!RP+gOMrP zc{uydc>op2+tZ*uVP6+81jXc!EMb6VQcoZq;K}L>0HkyyitL-=R+TVg_RF|cFi=M6 zBY~1Ye>O0jt9;GnADfV?#a5HyevulL`%a>}`bs6Y+_@w%z_s$2k-~>kROI~J0HKX= zs-aOTwPNG0mVr`>3DQ>9J{(2G%S){G^u!naE6=I_-f(}ub6T=L;f z+@VWnYCM5XTX4x%Qo(rRfq*_-0s|)TWy%GN-aZs#F}tZjfCe31SYTt7Zad?pc9MK9 z(N!|qj-@c<24igz~>J*vM@ zNXs_dfCdx__bXz z>upu_wX{o%pT)8)VGaG`hB;+7vvbaQ23Av&2PJvN38q?js#)em7gPM1V9z_85CPB5 z-nbul9uHoX)Lncx($P4)zX5ZZTw1fYAmeDajTJ^hj=A}m?#NL{d_U75CP?)QE0`0)m^0vZyC;QL_YRsU+6D@ie|; z4dL4-a|634BrKt~_Ll5)Pi&T*dAooq!sJ!)}PDAjr|Y47tTE>@)6Vv}0!%HG;+ z!W3y!v|^i4NnhSqO)W2^UAEcU-lZF#6TZ@kF4B2FF&qMZRl&gRk`F!Cy;FyWmlLdOt3?=ka9DS#~?O2Z=md6-WO=WWw_okgLPw{Dp$uIhg%ic5!L zaNsnIc^u(`F~B1^ILG@(q_S!njNdG=%F4(=F3%_goM(GB2g{Ov@i{mE@=|*_boiZv zA;&vQWQ-1Y*uOCxk_pEEoMa4ft<4IM+Z!3kD#ZQnInD=R&}Wh8Yfr4goNCcftyR0p z%DmFER%+b3tJyAH-S2He6w{Zqi;7mV<%>&Yt-afM_1QPNA9ep)!wS{33$mT^4 zxtU%ds7pH_`D7%n-ocWVnzU#Htx;-tcYR`0d z-m!fp{-bel+qb;$g;TvkhQP=Np#yNn(ar`y$&P#Hx3)0C#7N2pRIo9U3k(!sRFmp@ zoP&~ci6!K5td2tvy;~T;<%uQmIl<>0{lTsiQ1L6q@~m@UV1;FogDy!K+N`I6fC$G; z&5>J1SE(6LrA<#r^UK<}KZfb6HDvX@pIy!ADw|D4F-j>k^=;WXKbyAu>Ua7tg|zGK zLS@u$CX##Trcf5*82q{AXo||k?V48+M2>)Ys@_;tCkk4*{ZGL<1X{iIl)C%deVXEE zwwqg9hbbJ4nO0e$QgO88?^4G(0ZKO)9sS+V^Iqx8uq9v4AcbMjW%nt;Ac2ag(M%;I z%tC+; zHz>HLX1(5JWV&5`D_yQ^>Yf|Y?FnmbKwYZ2P#!c!kw)RW5y-(^*gksR~qnDJ3-t&XVS{j*cz!U(1#AZSLOd3}srFN;N7*lepJ7z2W)~nyBWw;79qogh4Zs|> z?s|`uo&X(&+(ropmZ0y9f*lub>}Mq9c^U7OBOroKE7o-Vo+gYc(S;V^+^SWT2-T@k zsPeu@CwRr(MLFrad*9?SQ*Fk#C0=<-@0OFRQf;W+IKf>;Ewr4yQ`HZ*jE7QjjkzHg zk@Ay|VgX^-k^ty4S}7OE!IbYK2;&gz1fcvIZ&^X3t&GCz~iYS93Fa%b?hq4mMq0^ z`R{>{M+6)ILCy~tZu#`BWDqiiDTc~}yUSt85YSuByUR!%@rL7U~q@{kOFCS8?TQmaUwncD7pgPg`}ulc!BOmzC0wJt;e> zIK|zj?2~K$0na=JG}y%LBP6nsyN;(hzyx*ekIIOFq$L~WQ=W=W05}8?dyF12o`eyc zbf#Fsx?nn-?I-U5eoz4f5(4CqbIBZHvn5zq011u3KRy%>pyxQiAE~RJ9;J7OjZ8gy zsZvzcI(UZ~t!S4eS@NYMwThBZ+U@(+%9JqGCmG6BdE)(??xLzqE?ZN((^YSKeyLjM z>7p}<2*bWfWCwm1kU-?^7zCV-8?hj9sEH>#*z@VZ>InA*ae{mH^{+%954Cc^V}-~h zAG&_v2RPV-9!LwFyc~_#=GpjltCdn9k&i=-*(W4%kieX0AOV5_Jm$GxUsnxIl$Bbs zt0tXXJZet6rBStb%2B=JceA&1(o&~;dq~GsD5=T~b9;$?_CD{TQcLEKGXt&<9fky)T!{l zRgzXg2n6JYedq>Mfq}Jw0fsv9=ts&ro?nfGBRFDJNlnHvgz3sNf|Z+GlWrDC?><>8 zXz!u5TF{bCrD{pVT1upnvs|~cO}T3HZC!M1@pzHcGUW2agOb?Ja5`s>fK-g70VM8z zpHt2O&N1A3^sjZd_(!Th%w<6AKp?3jrU^Mzk&xhlhHgl}+ILew;Zo|dPs}?b>42<9 zBw(2UAP>AwA}1sqaC4OM`s$~$h9;d^^0!t}g-&GcA9YLKgKJwodNtU^;&2d*d8^4< zJIWHA9@pew7W-LQKSRLgGnFM!mr&Ro;G7HrxMPAj0OPkEYv#X>S66cB7IsrA!e!La z$zbSZYioPSBS?tB{odx4_7XOfEtLc}=$xMibuv)`lfAGVGP_1_6z`0N`A%7|ykL+s ziuqIak@#h(>5bt_X^f5LTxzoc6{}28L1hB7ODangak&@KuQ1D&0wTe;EPLVd+*BIH z&aCAsDe}6}l&@}GsZw6mB(#j0S}U~ClBq)*Dijr7Zf<1b6x61bmD*9~aaUeTZ%@uf z62x4xkihLK!N?r*j1CV6Amb;T;~9s{kx3{pGI|#+$>-(-9{m3RkzVS47+y~yU<8ny zpPCVGCP4YH?IU-_a#KApMhT|B4sIo#;I+3`b!~w|MhcSHU}T-tbqsU7V`)_<w-pS;5JoU-XCqJ!9x04492;=VV1fSG{$0w=loY%0=@T&D6YwUt@ z2{Fj>vXU8behcgP^AE5-=HCBrZ|$Ru!koYaE!7=YSC_hx4V;XueHOpKF~HshuTdi3c2 z8r|*NHUx@x?S*$>5(@L?FeEDi*erSN_eQFGHGe7D;|QpRN0EXkMO+=t83P#P9JuIM zWC31{T)znE#x-lISc*-!y3I;;B`Y?R+H#z=wu?*J+PiYciz7xisW%v=)So1xoK=>V z*O_Z|z4U9L;u6YBe8-TW0(t3-6Ncasz$Ef9j+DsQpS-x_43I&_I&+@*>ztB0^si`# z;oR<}B=VVGaE4WYd4+bf0~0@(q=f~8jtN(gDC7b^hXPQj*Hg(5aLi)zw|5vUut;xOtqyff~;fC*D9vsqN6Fvb1qw^ zq??po($(7CQnAx4{{Sq<;pI-1s>&8qq^ZlAE%M);u9ww5ZC%#Qv&_EFD@XGq1G@mS zum=DRq;NUG9dnL3=2-0IiGs3n*z8FI*b$$uF`o7IV|)SKDGM~^MhQg$Bf#`w5=;O* zgPy#7(UWQZ1HfTaCgDgNZAM_k=OtN}Xxp5P*~l2q2GU0*glb9Fjecs9PBMyX4^69G zwv?M~>#|pS?QR-)yfh-A8O{>Y)`L(dk~xhfDb2}fLTKjI%d1p0}!W9ag92Zt?cdWTf9<>Q_@RjuXW12 zwb!lYG@PLdiW7=gl$9wb7`|V5sIF)^Cw&#Ry0P&jmhzPh(VT(uKliW)_p!j|&~x6O z8kHY7Bn)xbpTLuzoyg$!ueMjLIhi6y* zAslWx@H3U?o^mtWJ8_D_IX*4>MN!5yr|?vt?IVcKM_;v9;5~`TlW?jO?I23|Ip-)!fwusX4|CqCufV8_4=c?mN#&W`^T6Z)1`co( z4Ze+x*F5qn^!a54OA}c=YtAyF)HL@LX}y-0O78nu?2cuJ<%DGJ+FGmf>1*BVcXzJ( z?v}Rr?JQnQl|~=|^K2{$Znz;p$l5^xn2xQEDmS*CHOLK)pfEgq#C*U3fsjJv44e*} z*V&8kA|TsC%@UF~xk+Us2j=qQVi8*eh2aAb3*!o+NxlNxum%}tl#s#FYhbH`fCrhm z+4`2j3EX6WpzAEJ6Bi1xrx?^wykMtS+1;hOnwFa0&EHpTOa9QpHN5n*v}w(Dvr>+W zdn>r7-+O#k&>7=Z2t$H*0IW#ckbYiIJaS0JbI(eIS~{KVq%Id}V4yZmGmIYNvHA-8 zOne4aCylB(2$8ZYV4RZbo7s5v5v zC&bDMb)@TJCa(&#=-L!M6#d_IHnd7s($}`ea?Ehgk9CqWjJ!-s`F06q`9D+LIX(s@%9-!w6Gt)Th_LQFhwsi(VM$ovM za1FsM%!{1hcjJx-=#B6Tg&B~6gMzGBoM!=_Es))Ip51ofS2Y}yHiR5$*PQP=wJ5@y zYu(mP?MnLlyW7uF-aijgP*IeWljhPby)AUojGIk8Qcb;+THE6Ww2>rGRJJ)gbM6l~ z+Bq2HoSp zxq_|&WF8rC_99iIPMuj{p-o0gR*g7Roz>$gH8&{9Z+k6YkbQECySkJWmp9pO-DxJ$ zTW;N!`t?31n#N6~k5$OWPI_m7ll9;NI@V;?68TtUlWP;Twv&R{Bc>02JAeS=86Rz* zfQ%WHR#yPAR$eD6MoOdfeK}eVBw{arclS9x=`&L;3)?Ij?w~x zJt<{ansHM0e%UjK5d}BNc=b@aV3}hef6NAn>b?gUz;9#8A%x@VRqyVs9q>Pe2Y^WW0IO84r*EKvv ze`@H;q-oWr?@}}5t9#i+drnrQ`q`^)T20+6UG%=T{{VyfGg>)UYMp_A13R#Bf<1`m z01|ovc%?U4VS-sy;Dt~!ae@f>asc3wk&KM>6`p~zHj)vKM$Z78p4dF#us-PcHevUcHGlcV#SF0oNiydMn3S`7mkFe9l6NkB6d059FgY>!-3SQlBBsKBO8y` zJY-^-X(_m5Xvkq2kXHa43vrbA+-Mz22`YUStl%iH>kRd2C zG6)$fk-_Q5JRd+XDP)Ywp#}j`3CPArL&zcZWTPq8ZgQxcq^By5QM%p`_g(L1;}rJTQ>7|#aQ1F7l2Wv#cYk-Xvv%#@OC0R^ zcP>~33PD_L40@gk<2+>b7+(0s73Nq6VsJ?~?Vp<3;x`+CMpD3>j0NaO z2R^;Zo`aloT=mn8vbP^8$5GS~+aA80bIvQRQ;cbnKq`7pAT&&x=(c0Qw zcjx74H#cP!qJI_Lt*38ZhcT&6xDm#3P789p5!)l5UNN6uD~Z&Z*@6y2gU|qT^Jkpo z{{Rmd?mG3ZhVAn5<8d3GX*kXaA#t~i4DpZWT!p}zMrM`vvgZc_7zD2efO;Gd2c}PF zP80U7r0GVYisyw&iAHMnwdDQ8yp`W|axApA{dM%)Z@W{HzAfiCT<#;0p13$TJd?+u z1Y;Geq}fXQBq(;{gUC1plYl@6I2bt|)mGsQ*CI7xkZ{Bkp1h6&07&I`4yPpZU7dqP z4Wo3ju0FzZp zrWSQ-N~5P5YE3m5N)w*jHl=NKTCc{&vXZ46x2BuDTiVGb{$6%RsQ44ZlUZ0ZOc7?3 ztb=$t2v889Y+MFq$pqx}UJ1uF@g*8@X%hmUWqIUz8JlTFP!$3f+qqd7?kk)=&_L_% zdNz-9aM8#K*rx?S3P>b%By<30Mb~=?X{OGUUAW@Z+o7dno2r5>v>(bXXMiF_prFkR_@JT$J)UnSomj+o%k~+C3A+g^)9{!__yn;IiBYdnAq1%zTjxpHa z`i;KbYoV|jY-E|CSxyvVcG59{w*liM9AM`o1ONsybE#6CQ)}OAOC@_d$7Yqfck|q8 zDJ6K{w<=L>-93K$`jlaw;Fbp^ayF2pa7oDx$@D)lKA9EJTIyQmg}j#b^IWV~49y}7 zn}9MSSonlUPy(1!@`Pm=A=F`d8$DOUkN{=SjEuZ1lM4kP@W2zC^Z@P1&Tfr4koF%$#lk)8$qaH z-9imTyHe&&`_4)daav#APnlgMt)EMrUXy*PNpcaNdlFdro5;r*$x)n=2LV}_@S&K2 zU!$M1@50O7Fj#oIUWi-jH?~&^X{QM!XybxkJ|w(mSmFyYQcl?){8+j-4g}Si89&eh;vH7dEHNr_E9l|iXoWJW;ov?+}kDNxP zAf%~iVl1YjspT^M=Nl?YXqB3OErHndxmAr%zi%NkRtn^xVz z%IeO_{w`&WtxmN(G$Su%Q9o%?Nv$s%MiNaVmePB@Qs~QeI(lPi;bURK;1kKf901tI zB=duwdRE$4?O@~N8+rMMI2p@e4oLuTM>xRgRUla+Ah3B6$RlVhMti;iR>pEMf&c)F zsV89=tZG>ZT!0vZ&QDW;`o$S1kiOMl(D z1fjvf10as4KA0mQ;PNmA?BWQwfUJY}Y&cLeoM5pH&q4DNFnKx1Y@5M|i>MB+1B`)% zAmlK7e-R@bXPg{!T~WiqRbI;lLKIy%$J)YDl~;aL)M>fJa_;XtrgTD3mL19!J*=El zl7LVw&2ZRhx)Sy^ zX4TT1=Tb3Iwv9&fXhCB6rz*njAc97PEzKQSB{s{nR)M~mV5cm zQggeV($Oas$kS`#Qg({@KV6TR$AGl(hi$SJ1C9A}+vY4n@;S*M^PgZRUjtaW10}p= zhaW6sjB)@VdV!8X89DD?O~c`+dEFicF_K8*uUuuj1CV*a;2vjP0wCBBA=}R(PI54L z+s{G9PSf?Ngej>v%V|p7*1P6@>TUJ2wY2%(%)(r$#kk2&SM42}z2k06J8Ac&YbCMs zJRS+t*W~*gWDrPE3jD<9B>bm2=O>;=wNL*52xYpkA7$W<2a}9$J+Pqj^~O7Nucp^Q zjO3WnJAKvq_EX2>)|`Aj80Tzx^$G`E^*cR(A8u-uI7i`(V)VX_Kg;WTHGQ{CsKrX6 zsmUoxrFlwHzu8x6^5ySjweNc~<_UZkXwI$nKrzYP5ddR6gR}qu>w(DQByfHd{iysm z0(g%>wp&zmhfcn@Tw0?L*D~8g?p#I-5A=6TmTVBp85a?s*cQ=YR&Zlhz$E1t90oby zW1r099G(Z}_w7TcLqCq}mOSR)NSY}&qu?;qt|pB^L?b3ShC-aEz-0#+)v9oZGP34e zl6pm5^6uodPfe}b=QOMRLZigcrz*ILE1EDCT0C9_ZWOf5PerV&|SOz&Ij^mBT0e1Tb_BR<` z!#E~Q+3(dyewjT6c<3MflI)O>U)hoql7Il+)$_+; zf=)T=a7JxAaRIUWe8VQ?Lsa|B~3-TjjmsNs?luj znzr{l=<(hTO4zzy%~?CeNx3~8?S53<`##Rcj6>j9H>)*_dtaS_WGleQEtipi92^3p zAPjUBpq>e}Pn~UJiPw}ds|LvEL3Is~dTlu!eJk2+($t_W3Ca=GcDWz`NjtgS)Z-Z^ z1fG>~AhwZa;p{~#%h|?HURu#lnM;=K#oxZJ*7LTn=ZX9(M9#|to%}J_%*+e!IVz+cxjdhn zEKYK2llUmfHh#=Exz6HPFeHJTunNSe8Ot6(!Ok=Exq=xO<~U2JBapv0&Dig8oMW$C zk%3W-Dmi4`<-i6qN6HR!mO1DLQciafkVgR4buhT7N-|W`>9z3k@=Yhol8l{}n(VZC z8PWd$RdJAmjIhyaZOORWH`4y{vV>)(>N?)dZhYMr!=`x$nHqu^GOk0G&q0uJmE#4v zWaWcAOmXDhogURq|RLkp7RWTBZ zP^VILBP5(|-|t*vwY|Aywdjm_JS>69`zsPKGv(xr9>gI41J49{_og2VC_B7{=x{a? zc_X=A1_w-Y>P9j23u$qakcolns(a^*4m~)}wLEEPa1|GlI|19CG07ZfBz|1iLrGfO zt=rc7U2Sfk)`v77B$V9RcU-AE!aBumv$mbI-uj*bd@in=g}Z#G94H6z8$rkB0sU(q z3M=HbxnYho8ytEPah^HpjQdy7ZKGpuQAs1F2VCPA!N;#&oaX@452T_o1|T>*Y)+kr z-N7G1eY2Wr+g??6l5IVDbX#p^wOvh9l;qo2O)s92yVl>+=+ZuVUkgSuMXQ63K1Kt8 z2TXP&a2e+S@}$@t-Ca@ zx=CrX(D~W?CY=7~9eSg3a(eJ_-1pATm#R2naT7cJRVLf=wqb9 z+!)uO80p6&k`E`}uUwjdX;B8iiAX$T;Pa2ch&}rfe(#|fS*yh)`Y)FL>({>CtE%Nr zZZb_JZ+a^G*=VDqZ>#Fs*F)#u_(dCN#2%dke8aCNZ#f`!&#z%fo)lByqC?H1vQ4bPL#bJ*?3=cxxE zjzP(%f5I$`0+>i;2P@^^;9!A(rx`go&N=-4fd0pec1kV)>`zn9PeYt~lkMwL4JI(b zglG>qBm>Fgz5(~D=4$CAlTr9vzR_K}S-#ts1<4z_a*f^As!?juch%or^yu|G1o&Jk zfGx>A0rFVj91*}={q8|0-`=BN3Pxr92D>8oJ0A#)oLc2i9LL`P{{V3FpMTP-p9)0T z4GIhsf)rLVt;fxZaG3a_dH_&W2QJFsy>Yyll!IPJrs60?hidW zaqZTf(n-gauFYxFs@Gk7)BJ7oH3>;LC3KbAlWnc6VV3+go=vl{m^Vv|L_|`|T&So!WNV&qwNf z$b2TCu5FcwA9yI^j2@Zm-yMA_qxexnHf+YyP6>;5uQ}LH2M0Oib5?INwo?=`NHO?{j6TlRPV04RhK zlzr8Hjn3}YURHm4c3wxtUlhD5iSXy*8-195>%hJyxMI=Zi0yQ_X3y|9$|AzyquMzR zqyt~grM3;dnIHyhOJ*BJ7Y%P282Lb658Ye=k_Ja7`ri1R;rP$PKZ+_9Mm`&O*pd{! z%xE`^SgUO00g%CzFDy^XU&$4#D6_Fp7^>LW$B;5gt-MM|+&*2{>4A~XFxBB_5{xKR zuLw$YqS92UB^J|+FU>ZcmF;~sy|j9-Ybti7TbfdQlU%n~N%F?+?zLBYyDi$gPGGFe zrAaHe41>a-my$;#B=MXRf(2>BtQTNna0g&BlD%7kbDnwncNL!#Fkn7xXQ5yT=eRs) zfH~xj0U54>&e}3esT>kBT&VTSvuZ3R;6HM0pZ((KeN5(pQt9XyBSv)!> zmv|FwHtT8f%cv%vQKXILo-E5VJci;)ZcTF4#?!*iqpw!H>p7)OQ}#}Ge7e-?EKtlYF08~JLd;VxrSS-s<7>n%Mc?Z;z_0is&&LmlF-vi% zOz_7Q+;HB(q*_CJGy}*?kF?CmXAv{X!OMvf*<*<&C4`NA`(^Oz($>N&TRk&RveNA> zlghWXv9Y$YfdMGMvcU=&8C~~A(=?3>&ueP>)S@18kTleRIK&0 zYk#_&3<~9oZwQH5w z2r>j&!sN3iT|k9FJI->fqx#FU(sZpIQhDcgZJCYabbxe zgT($3&^2u;>IhtL zlwy^g=Lu=G&*RI)U+_u4ffv4W$NvBcevvnnBi#!HV$))+hDOj%}dtc1|03LV~#9k28F06I!3tQ1Ht@0WTPfNVEcLC)G$tD7ls7Rhx zF%}CDa@!gkl!N=i)^r;QT~GyOWhz4K`NIc|m4HwgUN&@az=lS2VX=rmIs9M!pMDyA zRMReWzZv*jOw@#kJgq~?NVPa5UoZ($`Yogx1@cZ+I2_G6-oN{j7iB zl)Bf0EcIU+{0qL-JX7JjD_g7EzYAX3#@fD>X$dU9V3Id4cD4y5aooo8NZv=6nQdTe zIaBbn#oij#G>vlaN!2Yb>@^EXj5ii{miHFdF-f(ZRiU_sIaE3;ynB#tRFeT(8RLu7 zz+$lVlc6|Lt5HcP#!94OuV?R5c6V3aQny`GKRV5LYaN#JPB5M)4;TLPPMt`?7jgG$ zMp06yC3d~-9<4W_%4xQOYx6F0ftV5-hTPkaTyh7v1ZM>B4@o-t4YUp0hs%(5fWUxA z=m@(_m)@9AhZ)cc*i=Vz101ey~Brnt! zBd9%vdes#?JfR9|oaZ*BHl(2lFLv>5&zG5NXU!hW`FANPC8U$+v}@0*X<7DvB9+=H zwQ_UvMR6}U2xY^tLQYxnE3v@q$t+Ih;Hl(izBjUn(12OC1tEAR3O;X`l34T@z#I}u z0}8zIo8+2Qs=K+)F>}$!Nb<^4lIYq^G_LQsMhYJ6c9Z7S-q*6V-|F<=Ubq&v^IY6T z5-4Llfe-+}i9B~F?tsLSFaYRJ2k7q%X)x(l*1J>ZAsT(#H{*6n@PIGM2g<`cf!s;y z%J@&hH}Xv!-)C2x26%k3WRxddc?p+ zq`X2{{H9&3(#YOQBR1xX^Vi}xj&*+->sQ*wrEt$B)Q+~WlB4YK62(IlD6IHk0AJlh z8pjMucD&PMYZ~^kuigE>SG=0;Xw{>S_meCt0R!cMrD(ClD*V;QL>!a$@%s?zr$ z1Cz!M7tjIQk?1ps9`5zyFqz1R50w;vrx|Vt8;K`8l30VZ{J$u5qcO(h3`X2D0rM_6 z!6XC8C!W1>bBfk5pS+ZwDaz4MjlJ$@J(AkZdD~R7t0hVJ*FB8SMhY~aV0Q^DtMc8<6^K;s+&IrWVv z!}hn00$v~xeVy2pW{v#A8to<1Zv>v<@Y@bHJXDyI&T$VeI)Ep9dR7EF{sxT|#{{Y#Z#oF7)Qy^6lYPwCVM;Ja) zv4(LPk&d!P(j0@qQmOoB?0qsKS28KU0l4I30grxq;DSyM9Gpa9LM_vsIYz5bmBOXr zb!Ptnc$-kOv*n($)n57^F^$El*qWZwFP9IZj8kqYwAzwZySv`quJkl)Ya4>P(0=3ZZ+Kxun$n`x5ToOn;^y7ifD>m6lX3M@yHZsH zfb3y_$p*_g}j0 zIJVPU?dP_Z*YiSb3Z_s1A1K|hZefgr*X6iK z1QCeX@7azNs45F`N}RWGleD&T?rSpaI^kkK*&H?jz{$qoql1rg*CzyGw5}234AG3V zn|Be;-@HiQfC7=hIr{omWbEslX9I$F6X*!%KG`GFn$_WO@Qg7LjTeH1Y2Hn$Q@^{V zNy$_o9LK1>%6N_q6cUOAG?{ynBtKQn~(fvTABYx5f=XOtQ=O02h40Y$G zdRBmCOfN!52aX1IfH~tMAP)FFhZVvd;D-!@t7U(_8)+qnIp;lnyHxl_IAM};kCb=M zCxST11KXgls^&?pZ2XhG^tG-1H`jB8QgKtXQ%U=&yWds4@2$RFueohb0LD2#LDP=d z_u%C8rh~Zf0^{2Qrce3*0P7MiSKN9Zz;W;W4@y-4=Xc6ZF^+w^k)O}6u%XUsY2H$A zw&`tmEB^q)_UdIQxir=JQcI$<(Qew(+G%@fexr+##|Pgz&*c=IWmpq#7sf}(KoJB* zhm25$qovtsQDPt<-6bF;H995!7K{NSR2T!LTe?e-F}e{($%d0|5JY|Ve&2^(yPiGg zJm=i^?~XTJzi3t=75U8OVuZz=g+UC{4TxO*^-_=5xlnSd9jS!P8ZjG_3f78-iON)%>o9BuUNG44;HWo zsxp7&5MIT~i;RV4Q^AzBwG(|Kiyw%OTr7 z$AvP3m`tSDbZxhTw~18BlmsVVbPwkc;kRsvo5-Dgx$1g!#Q3L>@Zl8)#}hEkp$aSQ zL)?R}PFu}xzhOGt0#_ZaGYwC}O7(YJ&3&v}aik$6(-)l(&DE`fzXMBE=^`?z6aN70 z+pW)k8PGLIxvHxEodBjr-(Xd zPRnly;v}BFvcTYTlYFQdMOJ^98vo#wm{IY8wjo~dMeeHCpB@sNx+ewT1f>jBc|PV8 zs~3B@N|!M!8!C66h7i2ylv_v-`M|ys48EaCmrG5mQ)1f#ib{W&Bja0YPd|Fjg_ZeK z_gDkJ)cmMt_EPZ-uUj{z`lbTRT5IM8=7OW4;q~K7Vr7yv(9I4-=Er*3b{ab zfxaMU;yMfd}(LD<-A7&#s1=7|}&LF{=UV^UN>V83w zHrniU1B(6FPzXpmhz1<0RzIOuOEwb?nx7t; z^R(%FH|^guUq978t45__{iET0&L*PY;iRI+&S5+(TFpn>dK>lQ&3XrPMlQ2QvlF=wlmXHmh)_Jp@J>Zb`3U;pz zjN1UvWiPO#gCe^V94?w{&@dR-!NH+H3vf6p)9cFG6dMWz#4?WPYe6n`QM)NA1>ebO z_kz0cN}PSLW`M!BflM20OaAe6yJ-5+(%*xQ)>aP$-xahFhs421W+P@rm1~>n&ypnXd^yCqB(w%PEv@h0{A# zDjffPo#S!)ZuGSxuz!6uu1%7ZGNR=WQqucW4kE}8p{=Cp$E)As+uhO&fyO>NI$f+EMCsktNVj@F5mrS$yj`i zIeUPWPpXo32gw431CX0 zlr}}l@8xeENhN ze5XI`9{y)`UGS0N%PtCJf6-SZ`wefBRZQL?E6kw?)ThMRP&28Bj7XkQLPdqoq3v{hI;`_qW{sI{ zx@w?`Etfrd`TY=c+-Fy%>J6=j7u2)OkXG`vQxJw9GA9=E`Hd*Ucpts)?|xc`2z!6adZGKY*$YDYHPultZKgJF}*9()iAHdF4Zaf?wi9l4uq}K3pcv z%CmNJW>EK}JcwL|l9MBQadK-zb7tYYaa{&t0H_;;7HJ}R(Zm*3=1+8#0vw^jfDlS<^`a@f_;($>D{|id`j0*6e9P_nMBUo{*{;! z2_!X{#y=Goj(f2AB)dA}%3(G56)EGol%Hi(+2QO1 z4>~N$>Nj0{UY`dan*9u=^tPAn;a5|=`gyM^c~7v@a9vk#bv3n<=cL`btZigXQM_Wl zKkh*+-z%hIIqQ38;#3Sql}r;h zn4QnvI1tk>+mF>#yv(C zUAMNv!~Euyh}Muyd1ohx$9QJUmqRp{u8>Yj~(w?*!eO2}Kd> z57uz5fk$8EwffedB2?W~UGD$7cdueK+?H+?+BXN?JBbOUSLl1Y(lnIp`Oa+fu5d#= zjqS;gb2%}e34S^R2;unKIc{6^8~)_#-ixh$PuJCD?lTpUUs=l8<)=3fa^$u4zK$_y ze119oN&dQJ`Jw2`+mEh)unK*K(+v1?&C84zo~iXr3DAqCces2139YC{X?)&O&EmK9 zCYOIf(G>)vbS67ob-JZGVpyJopfw*pZmXQAv4f%cieibU#>a z2#*^f1cKg48?;&J{J8b1jVU+8TsKZ_uQH}E#Pva=#l%$+?MBHiW;HL2nPLI%86QHV zValDh&9Aq#7?)V+&cyuK@*!PZY`zl=dM6cv_J!kldR?qM%)UHwO;J;dNoJ)3{(8l_ ziGNHs>z?r%Wvk}@gF~z@T7-q2sG?;AC&?yj>Jby~q~3KJ8F`5i2tu|Za%W44KTjKf z2JBf_>CVA)A7uuO%Y1CLYRJgJZ7lXBKyJQ*UILiWeO~e6yU)+fAUV5S5Gk@U3jUVg zz$Bh?330g~3Mmcs9H2)Kg{}TDz!arE_8JfIh zeCmX@AL6pLB=?$&ld(zfv95~#Zy_)k=%<14*sN)x-TLR+D+KbL>UxfDx@0w&FYz)N ze2ps607AzG4~F!)OSZfgdKZY^MIZ$6EB zCQviI+IT9O+F!xHr@xg!N2S-D_W9)uJ!{wbZ9RE6u`S(PJ)brRMRl}WCb&WYDAMNa zSJ|?mwn+!tk#7Lh%Ju$1nrK#KEsu@AL)n+Oj8WintV23EN38P1dM9gpQPs#|LkiK}uVT&lqx4aLG{nW;Mf*Gj2if7CwKDaE8pAl=EEMp%04Kr(0753Mgf`uz4lrPX&H zA6MrJ&#zz1J>=ahTq@DS%L8i)~xZq z?XzhkY`StOI~AE0QuaUYlUB6QQ4GH_UR(|#YBpEru=*8vq*%9QK5H%vMZFDm4W{?< zc78BCCBpZu^ptIUM#bQA=Bl=@?s0W@a!1^H zvC_$Dt(p$Y@VahWpA=O4y~47$is56Cet}Kjtevr)rQ^ifW6a%zSG~*q%i9Kf_g8^k zyl)zDwEI7Bj_$p)OQ4yR{|Zx9+d&MH8w!sHZVQ1QMK4*D)isP%uD`!h(5cDFX3nMR zB7DSe>nwBX2ec#(n{n z2pv|Zqz=5|NMWw@y)C;ic=lpHd{tw)_aETqk|$>5z{-js`zrN$qJpKh<-D$JjjorM zbXww$*83kJW59R~wx@FU<;PX)S-%FPa=e(Oe z)BEki)*COvft8$5a zmA!+tJy-q*p#KM`QpwiD-VO)lLA+lDJbU&f`=kDYorgEY2;F>O)YJB7-?DTqz5JV$ z3^iG9aiy#}%=HCrYuZWIH7L09D7*SPy!>>zwYG^zvgwju5eUNU3W)0lrlu@fY}70~ zD&HtKHQ5_#=BN7sk2oZJ`Ex6`O3vu(lXRVcl2Kgwe73xRhJx3y9)+v0uWRn|*?*B6TYP$Xd~7EsD$b@9$K3+EsK$P$9&8M&&j zwiSXOFn}E5JcsY>2hOQFqo?nheu1FuLjWY$owPmpPbmfoCVuz8;0z8+hBI5OVVrL9 z9kmhj)M$ha9f2Mlc1K^fqcWGmq`owvuV2SkS^05y;2;u<@p8*}#r9(@U7kzu*s8TL zV56xk4$h;!>D9^zh0@Tcxd zeJQIcDQ_+L5p;|MmVyjTqjHnqb&NyZEfQezb<}=zr1szTSFCwv8Rk}UW%~7S{|UXm zse*fH_k{%YexqknGsEP)Miah{rS_Ut$r{3^*KnA4D-Cgv;MTM z5u-rHt$*!RMxKA2@VhagM^AFf70b=OC3HZb`ki zsfuz(_h*sCrM91M@!tEvPp8D&dgzCBd52LbF-}y7zm_&(SkLh)(W{M?)%F1YobJk#WGsmq|zMPa_eJ23=&^6yvLM)=GTkKBwI9_QHrbKD?C7 z-964o$z^YGbr9=XVlb)H426qKeuNX~ohp1y%Ki&kE)RECZ~v~@C?onQZ*WIP=;yY# zxA|~*V?7!xv%p(Zp^t?d`xRXZ+WJH^yT9=+0X>JzFdsPme>W;*X9BLTU3ZJ+)53 zvY2>5aZIDtP;{gS5tVgP@1~?2CT)0Du`^ueS)5xWoQiTml)&n$ zg7ON{?#QLzg|oInwez7irA%*azrOMIM=`q)s1MdgS!O(t9+L++0+T6RemH%JZzr2i zGDHnyX!;~iWBAxvLeB&E`78Y!vLPEyfSu=L6gv&w)Cw*PyT6VTA$$axejN!JdRQSe zl{Ed+zlBQbE}qj#bK+`hpVW(3${Yv@OLW}OH2P1OBFl*^LoSND>!uf#))@NO*4a-X zhY$YahOftlUmt-{7JStxCDq_J2tp4nfIfAMRNU`1cfnfBS>8l`e61)Yb+Rxl9PGcH zRH0lTnchviISudpoX;rT zZm9dYJDIn5yTcda;K+EEB@Bo2G2{G4m7lRe=llDt=Fw05kLM&R z=D1k|zjm>q^IwHK$DEa9t_+ZXavySU!VLn!oj*`EDO;q@XROUpj_q&8}) ze&^}^51=>v$U7Rxs@x5yU{S7{aVhaZIt^F+avq~ui@Q|v_^B6X4x?9eiK_TNKpN%& zgckTjf=THg;JoXwvWp|F!5jue`KREzVrHvd(?LsT!< zuGewH$)f{NeJg$B$IlPWKEDPB;Ni!AO&`A*boIW)cTG271aYQ)zLH8HDHm8GU$S!5 zNa@pvZZU7_QGnKqswKEOe zh<(-Z_4cZp%HL|3E-2f4c4QwaBKjD4iwcH0i>3~-SRN>`eFJFvjR>LMqP)1Pi7MDr ze(v9XOP{1_8YbES{ZN(cu5M_Ro$^%#rkQ?+vun$rpmaEvlC)`5aC08L?kOVRb8oo_ zw@9lpan)Z(H{a-&Rb9~9(#*vzbI0*?b$LPlLf^0e2vQWO@NW3Bc*W*zTwQKav9qK>s7}G7Xyab zsXDUo`nA;h^wq3R^SsTX58+{Q7sZM(yBMZ*6aZMMJ!;&9yZS7V5#r!XQA(N3r4$)1 z{N8`^mWy6^7sxH0)27AA3A+dbr+gLiH7T6P3hky~5Zx;@6CV#6y{#L$3TN6U+S^)> z6u#?P6yx<~r4Sl*S!=VNb0TL;12g6>^Tu>&tP>cW_p$#U04g(<%hs+1OzrMt31hVb z0vih+u^>RIHV#om(1Vh96etuD#IJ-63+A6P^FR#G@3>p1%S_k*%Bufkd?9;3rTZ(S ze%!tO%**BM%(LmB>)wiGHP#tRQI4V7QK2~=YP)ynD{rhY7k6#r)ev~&Oeb+J2Pm+C zm97{>PuaV0x1i?ik^n+Bext^CmAq7A_|z}cNj*zVu>zvC$K+vSptDsW#+#|xeHU@) zxwBQLTexKRGEN8}0tn;yZrb92mc+%u$)pO{<6zcwZE&)we?M%6CZ%5~Wpk9nqF22# z#EHO(=R95{PEqeIIPa9$!``Ygl0PxrzbBTxg)M0mQ)}2!f4k$lTf*d0of~74$=`1d{g{ zfWxyNWi6zIz{T(p|G3F1(noxi^7@u;>@XiWES5ofcSM=+^U~F?s(y__XXWHHdr$d* z)W)*0lC&Km1$3xk`|wU1po+ql3bc$ zaVe9$NrhJ>2^&|)eZcn6yyqt` zuaRL)qmUrsNrHoSsEyRP|m zA}+;8tI;+1PvC-KZ0y>y*Yoma@pqHE7by`hy&qmv+X^D5tdE70c+NmFJ|ubuygz#R z^@eb1hCm~jr5Ah?#O*&t;d4IXnZC<5Vh>xPxItEsepGyblPcO>ufxvU{NDTzkMXsr zo#FY@UDwUef*QL8C#?+~+u6885!v&2=S8$aAfFTJXd^sQVSmf#kmdycqyrD;(c-DU z8Ad`SvoPWxV#!CNMUPkZb#H2}+z=%6Nsd022T>0vHctq@WaL565P~g=cvdD>ycjtZ zm&97>`ys4@gC^G+POOi0=Fd_SJ6>a5A^V%ricb5*AI`p{;v@XdNIX~ycd02ouFWa; z_Z+D*ggZQ^M+S5ft42vdcj{wN-~i0(LoNcXp=Jva2d84wqe+TH%Y8 znLF)^URB2pEed48-3(Hp-{fWz^-A_W*4Lu1cFRUGUX5lzTrwC^19byAVXz7g(8vz? z+$rVa?N)gB#8C0IlxSaH7GHn7nslsz>fe@(62YI-bzY5S8Skd^fB%{3G&MvZ!rys` z*b>iz8*QKe18{Rm(Eu;`kGb+i?vOgPrh@UZSKd)K{wsc6>$uQ<3J$R)IginF?xz|X z)f5B!Sbp9dp0s-+S|;y2(+U~y^$Doq z$gs#0&SyVkYkV7p=dHOPt?Kd*z&u=Lb+4fO*QteRgO_Ko+)RpHNH`*vk>6TOSW$@F z`0B?_U^6bfji53YceWcWT3a=J^To5q#L8Vp(+6qhfE_-OSi0h8aFFp>AQX&#VYVOm zS?X?0sl#^nCG+M%zlthc#_f0+zeBE#p>{4#yMz9az8-eeACMXgntGHx&w95z=3Isi(MsPrSMxtU)MGp^2 z<6deFmBVc2u1UOy*`9nzbe!t(y1b?QZDk)D=y(J>?ygYOJy|l*&c!-iCG;V+pZ0v* zOx;|!yU+XP-I{YCoocWzuw)}x{ySAIYh64k`!d6H0T1Q&n^gYmnDabO=EQ@67Ncc(+j8Y$RGpAIm?M;49rS_Vp7T8&}k`>~N ze%*0#zD9WXL-&BD|LFpNg>m%x;fPkDl;>QJ%PyNyj>1dWSE?($ph{#peREv-k8P3& zTfDobmEhMhxQJ?P!*=7RaWSzup2YCd*of&Ab7;y>I394!$sI}{P+;91`!-|SkAnDt zLC%Vr{;1SZ@HGGk52dH07;o;MJ*P*J+0}XS$*EnVY9XAYB%P5Y%imOf-e(%PD?fij z)Tt8KiNDC-?l0G+t$9ouyPmi!OGs0AqyGAQB(6lU{r2oyb@QjTkE8e9mG2B-jb;=N zMdV!$cfziORPHk^5MPBDZJ8H_ZjUziQ+Vn}7k|b6_)hdsu7o}5Wd}OIm06WmSTt!W zEqARDC=WmCJ64(K22CA}!um4qKC#v8VhDZ64x};~*t+-{l=YE_^~|j@%`540XZG!Z zoIZb1}v~d|R_R;?AJu3#NIXQI+ez?pY7Y{I?l~GQaH|=QeWNF&HAAVyEq-ZR^zVOvSY4OqcR#lrg9=o&V3@ zKQ(_D)_D}hWDOD;W+Y#H(OYQ;Erz^08Iemeo>97AfLEH7CG}jz69pz6Mekm$o>GBx7B2iSpt>l=Uh9nqWOvj%#LT1{wxk@X)>rM zv}hzVxVtp%5~h_N^W3e-zHRflME`nf8PmGw5<=!EY9le#yyM+P!kd~8<$b2^WA;XA z%~=W9l0KjGg8@AaA-J7c7-Rxb{5My(J5(6FKoHkazY2+k@6!p4)}e$T>dRcG4u)s8OAmVqZIvy zs=lpq{h+Tmfb`5uv)#hz$Rm!-Y}u8*{Z?_|F$KAE4DJTiw5Ol`Os}h%7-6^!!r1s~ zN?eY^k`DkR6lHRzEc8C4S=cf-_!$g{(bQb-a;1w)omskq#k86iA`O_eSHu~2Sp0u7 zl>wYf`_$+#{-g9L|5+t6*$G5Z@+fdCfkvj~SX9W`63sUQ*PWowS;2^~-N3x4tYvjp zKAs0|Uyznp%u~q(XeOg)^-CE6vn0JIqz!M0Sz#$KDLHTfg_7EF!!}ogiXKwL0e43oRUdy0=e=K9MKvKr-SzaM)bA$SoFhozI~zG z9O!BYbc35Oe z)IdB&Z}SV6?HO_x*vk0RUSp=^Xo&9dd!o6k4kz=Nh5*mvKREd%D>Tr>zy{#Y$=w3Ee=={t2iBF}Ulefg8Th$=0;6^2!1l9F!ZItF^a9vNs5cSbuPUq8UIcZmitrbJv z`009<{O20wS)^~)YUDmY7+8_na55R`Q*tu8)dNoXb(h^R@0P+ymGo!Rv%FgZ6;c%F z;K5opiL=l7=@s_r<8noom2z%%kx9_ zUP_^4t{UwOs2Hu@h?b_cJlPG3i0(LQq;X`^%D(VR>(l% zk-E;cDd-mFlq;bZR-mQjdLth<6Rcv*p^)xrn(-Dc7HBiLXpk=J8ZJg1$XwU641EM} zK;A&1!}bZ?BU;Z7QSmRgKQ^ho6TG*;_-&JSunS5Dq^H%UB;sQ-hW4bCguix%lzx2M ze})DEs2saVI;hfAG5Ww!&09*X3l63`bciiH2% zDeGjQJ`(msBGMYNy`iW1{vq$X#IH3W-+d81lAvv@ufpN_x?}10Vk1gPVt#?4=6*oI zcIs&W>W*_}t(gt+XlkZh=)2gktdPE+aoW>G*(kFUy~Xu$F`iP=f8m@5468`Z6$5*J zoCJ(O0`-L=NM`d+ru68Cm+1Fs=r~DL%D=WVGZC+sO}QGPEvl}x0>R<_V=G6+_Z<2Q zw9Q9FqYP)!ulXo(xMmW6{P#W`Z1Jjx<4pn67ObQlK}ECK=dy2Fo`8_GIkZT#Q9MaN z7b**Cwzq&a-WohvD}VQ>qCQ9RkHQ&>H4hy0Q|oaYF1wrm+@x$d*f55OI_iV*`~$o$ zP`f2cCkHwR^97J_T5$Nv5jku?o&oIQcN$HApHex8qcYBCUIXddz#+{1j8EiwF!3R% z8z!wO#avylub#}=l8gX>Pf58aqdE`L;QU2EzQCWptlS1w1_PhI z{R4dIZU?}j$%?MCg?`a`kB!~94r5K69j-m#?56@2{J+8uM#RU=tl-d@LCuQkK&yZ| z8uu`3ucfCEhw@Rj99!E}XLA`TC($>)H!HC-5ulvwXeM3?>%;0K0DDKdTK_^(p){5) z<+yvD-s7RunnTx1_#&D`fqCzRXb^a!)Cmq18TE#3g>I;1hmKk`4H)_9zDsxa462{c z=)^=mwgDHx>||Caet4gE20trs@e;2n(x^ngXfc4pySmrWjNPinU41nC$|8aQ?__K> zSVEwk#Q<2vpE5fsLzOV83W5Hgwi@*w9Keh7xMXAxp$6|sF+{d2I<@QRx!mH^$lu;t zkU>tG-xoP67<+H*mtZq#kZ)H#uCZuc*`9-eED+HXa{Dhq0CrZj(9N@>^>{TFCcamo z74Tlj_V&-nR^tUPXbJ%YQrbQ}`UhYUR+UXl6KNtJNT{+MU=&?3u(AZ-Mx`tts#LLZ zCQQ+-vF700L~3noE$W@v3DO|rybe>Els%oo%LUGtUF)Z*4x)(l?>zil)Aawr7Hc!c-IK zqU5m0U|d+Rs2Xg27KDFnQfwIJoQgN)+lzc%)$V$w*$!FbUchnh$Wu?WKFU_b@v_f{ zLs?IO!N`EMX_d_5@d&(TT&x8qkB@B(d(4MAKRV(jluq#G@TXAw;y9FpD{4A-CBUu+ zm2`N1xZ@!e^I4NM$S_t@a!%`oe{$scLfQ=zA)?l()|+#mo16dV8!PwL@3CdH>cq#gq)l*(%UDg_Up#8V@?^3S}=!Z*D* z>fP_wlw}Z9lZFgz0)Z+QK@@0PesWOSCP6@#9igmnw43v#0A`S#c8#&`0bdH5)yE*h zq=|)dgb5@n!E%KM{&)A|Chpr=8HbTK5@jlfnN9BuH;@llPVY4SB$*_IveuKS5AN&< zFFctJI=o{eF~ShK9rq6~qk;|k7#DN!XF*~RjPYgSX>)BFb;Li2fn1AhL4_r~h!Vq4Z}ds%?KgZs&y#$NS35$&z@w-2ScJFz5Z*>Y{rL@Uz;KSgnY^ z+)}`?fO1%;xPXNU&-SeF7h==%hqD8Jopi(*0q8%`8}Lz{ zX(rUp!j-^oKHM*05ompKpWb9>%QEJ#7E~FjO;O1^TYV3qzut=eHNq0I1tg*VxYreN zjKUhuJ!TpfcX9%k&6~XwxASewiRMAUXq^4t@0~jiQqm(Zkm}zmgV6(xr)LlprIG)L z?kvYjL!FB+)*8j5`ayM&xPZ7}oh8zgib88LL@Kc2(IoJcv5}Ft@!Fa@Y_6%k;16Zd z+H}_IYTgy~jXX2Y#R-HbcT44H0iM>Jk0*@G^PWuA&GOXU@n%7+Pl9;x7cyOO3QSp=Rt}8mfFSzKgPyI_Cz&W_HtNBSsow zT5@kSPw&g7hS84jX0RAXRZZ|=^QZrsy-B4t6T_;nh*tAw0iq%d&z>-fB+gx#Oi4e1 zkwMUSdQ7 zUu*;*=A4Ww;i7pI_D#0)5Q}qvt=-^3Ub&K7)^vAbeg-?E0&J>5dF31K&u%KkL0Z4W zMRy6A2H|ale6Iv7*3sbyM?KDe)sD?GYFmPDy5ETI+I|}55G#4GKTX455P|l`9^5Z> z^cl`8o(hvoJ+`fYoV0}5?>5Q5E=cSZ^%mxtWYs}@kxtAr@~f|_ZF_n;zdLEwAvjB? zD-dy!s2V;^0ptg zN&R19oiS>o^k!X6As`uV`4w84K8etg$n*U(I=k>cg?#TzYoZOrVn&yMsaU55u9Vq{ zPo4WL=IA6wk}acCV8Wf zm1W_?HMb%z4UGa<7tb=~)JfZ_Ewjwb+(mz1dAjlU05}MeF%SH>O;9p-Eb?sVHl47S zi0w*uj8<@d1qW8CNhvq0@-2nb><+1R4DiIi2SoH@snFk0!a8-(mza2~@0OHQU!^*` zRBsKH54=*n5>4u&B1axzZz4&6*c)2?Hy<;q~ zx(lir*iRsg8lKM%SVc8UIcECTz2vea9KkoFaR_qC)N$qfsFl{i-ViT{aIgBUw!cmi6;}G={^TDm?u(jsJBwbE;d5?B z$`iUxd$zVxUD+RBMSXlQ!V;wP6R-qAC|yC7I} zZVK)-Zw4zgvpNu2Q{05AW8b#M)c9l-LX0E~1G1`J8 zC7iu7f(!s|+66O94#;!rBpcAP=LSHP{4o;JagzmtAG{XRS#}hXSq!Mb7gVvJ|2Zu^a6RA63H67*2~rd_n8Xl=(0^SMx zlOf|J_p$<}NIs#^dlPFL%fl6cX6sIJ8>E4&ki_iPx5vtUNhKFy{uc69`L8%RoPG~z zJ{X|^!oQ&XCV`dG3uft4!rQ4C&{!rIUECxePu%(>>M$qVN#x;s`~VFkVh&E>M&KJH zF@6}&p}xb`CQW@YahK810lRI|Km`ZKVo?Xq*4?NGLOv@U82wNt?i1*idiFQNZEfP* z^x6Cn%?PsBkGhO`q3SDqVKrC+sD3J)J?En)5zHLbyd^#CQlH)B)FNl6kDw1AuOMGs zE@ewUX{0NZ=m8ulF((;eA9f*(Vt8nH5ME`S# ze}H>Ad&Z;R_M%2|Zj2@>@p9Qk2>+^(`?z{l{A|v#WlRYnJKdXE^;%Zf@OJ+4vpeL5 zY0=SC*^cFU&0@!l<@55oCT;#ZA1rmKkWv_bVh;wep7f&fQbhZ2hl}eIo$ji0#8uTE zp{Lbw9kJ*)y)|ru4aMmWMiV!c|N;O9&z`xsHjT zBdvXTVHItsXqX7KBWse;s7=P-kjnKeJjW(_aFwWhhw5g&O zNaWfQlcI*Lz#wF2a?!oMyuSG?+ry~HtU!*sRE6G>cL|rtDAQW>ES)*IW9riarzl-d zW(5cAqNCbHeyT=O;UjnVN6KY)uMrpIU&_2@A7sDvHn6!d)UTTv!~amA82kCX3(grG zY%xC%v|jT64*e3TwJKL?WiQv$)8hY7rKCiL`HOab&VU4wL4#e9$COTF01!xgZWZg=8v5h z^b-4S$lcy1OmC=}Cvv;&gTQ?BwiZUyp_C*0m1O4$)J>zk$yg@-n)Bx;J1G99k_?YA?`l6Y?%!vd?`M2XCZc{$fMOv!q_z0IUl^X119LQI?+C+|Saq_@XHqWR3#G_BJo z2Mv@AX+T`BekfgSedQk$?Hl$|F%+iE+88x?`)bC^eR-4H-Z`FB%WP1Xca2wt;rE1X z1&sb-e@cfh=jvHJ1)6wJDrCSuM#6Z0DjB3-`lj@fIZuhS1_WSZFf`r#u%3)h2`1A) zBp2YWm6`d(Mdiq;xyPb+%F&H`MVi>==WC4XjUqK235yMhkh+b!>Yrhe`BIRWD3>R6 zY>lsosOvasOfSm^{uFE0BE${aRe4AWwb5pgY`m=1Oa<@e-tL|FFfGo1_924Q3G_3g z0npKt0Iojtu4s5=&NjT>!n8)4^6Nb_nTN$!i@JtHv0QaxTFry&Qxn^-cANNUSH0Sh z8&}+ukR#mbR^Jy?==^fDR4&q5ND^gNfQerRT>U%hC_?2wMu@*N)-Abs?RE#@;@)0z|!K|H9YvHYw_V?*Mn z3x3XnPe?5iobG#)nf>Uy&i;RU z-*!Em2o!pamYui2Zf~gpGA`A|o%%J$1!KUs-?u>Pm;C+t z$F`xyQ51W#ND7*3jq^GAxWA~PwM<&vLQ>RXzl&hw3XH)9c7awu$++rpK+>r01-O!o z`-rsBTI*WTHYERayeP2#WGyim%CuxREK>k=>k}ec%C!D0Zw#IxKOP!EHc_Xlaxo)n zWLNNtD>X7RS$I%2<6|^E*cYfDc0C1k4E9I=Dx@tyC>z6U2QyS3kt!8~NX>=CDKU0( zjkdijEVH%gvKu}B1{nNJGWKdYWE7$Y6Ad&a|Om*+J@VMUnS9oAtw1R(T0)#g?V)&Hmxdg@? z`obuni~VlRE4<2FaC+xt?LHbO>=6{Sa+>RstbqBo0di)wH(NP?D
4~;(>CDoI$ zLio(f@ktG7R(f!Rf0JHkqT6a#?YHjIL)4AD8zk09xsNwf;|!mq{_tM(X{aI$Mbr<1 z%hswdlv(8cQgJXk&Wquv%%~`@iZNyY?1>a;hyHLZ8U`B{V%A?^2b~VSBrR8l9g~hq zP&(D6D%Vo7@ti#Yr%EZrkH;gPj2li+)7g)vjt&!fO0rtQX=%;YZR|D&vzGFmy>hu& zfy?54|J_o)H;>f&T9;p0MNU7O_8wgy9OhioWmkDkEMySY_yB<5Bx7Mb7vxI6Bh{C_ z{{VX#>l-v~vNX|BW__-l6qt#)_R;Z$5Meb-Nb}~>#ZKQ9tGdTj88CRH9H1Qa)PO$n zyL~XhXx|7mY3p>klxfl3)3YNZc-!Bi&)DB^ElI4d&5DXBATAy)KRZ@@l}#JVx}RM48~kn!yJ<>Jcc;de?Fi zMtwxLV)nj$sC4W0R|Z=bPUnbMEzP#J?v~8|$#oWg0AZHkBk?qKfx}|MCO>q;0oPQp zd(v2JRDGY;>P>;pRlQYOnkSGXUG}@F$;|)RMkFN5b0@xpU$(yNYK!#B6;CZ!v0j@i zLLw%oo>>H|S;2;-suw$Yri%?uZ~{w(aIE z8i_@v6}gfKDL$@IuB^x??lq%~ORi1!PBz!xt5mwKc`2@o?45b- zQMp{=#x=6(8rge~@9+Npg?r9@pL1TX=kxIxOL`k;`YZ@LFWGmci-KH4n$8$Xc!=b` z=bVY0>EUHl{7me}1E~|?3XrmulxV%wB~fk+rWr9SI(g|ias1G*w2NQQYR_1^duqYF ztAA#4(ppUkET+ZWqw?&ylO{YgJg~6PFj`qyUH)wUH!Bt%CPnC0>HgFp<9F)Qq}U|P zsEEZd3MmgFB#>8-vw5oRxw!_hx~eS|=8TgzBg&!==C)Zx)DOcril`+cjjF)yA`@+5 z*otjeFyYnGw-}xPZ-VXkIL!`3-Ukji{lZ6?z&5kNZl_8t~1qP(~@-2z+g^h1pV^z=$p~1Hj{(Vg3ffr@y($P32 z2NA8kAe|i4#b>%@+HDs0wvy>m-%Z%4ntxYN)1lyGlv4gCkiXY8K2o87&Z6`De>o= z!@?G{n@!S2a7r{~0ebikw$k=``cCC|GZqD!p7iw&HXl3=zQ7gHQ2u+kBZrP3RdB-O z_{EK{D)VTXM5$wQN*a?Sf3%76(U?%{Negg-il7ync=woep0DfWCyO{ZzPGq75rbi^ zk0M4`#H{(`BN^;@Z~I5n0ckK-Zx>f`&h-B4vmlT%P^~oq+HH@U$E*Gwzs8h`9qF8x zxd(Zix2&#)C?JQ+kla$yYE-L)L3Xmj*&YHiFOH9qkBSZftDndYIN* zfwJ&5ME&-itWDRJASIK4-$}QxzjmKyNEFu)cw>;vgzzCiK)(45`nr8`Svid2;kvZ6 zQZXp0-tAcpvQ2qRtT&d?5YT7BAlX{EMMcKq3z&=b_JpXKgiCL9*#t;$J@b6#gS3gs zSn&fDa9HWVrqT=&q$UT)hy4GIW^LSAxKl77;-lpEH2n1ju4O2wEhD_@I>$GaMGy`M zCL^(dP>NDaZvL-rU8O7)c1kURAz+Pi(6a&QyEGH&qs0L|7KaKuOz@@+<&s!h&K#w! zeZ0Dz#5zgIY@$zR7P);rFE&kkuhQDjJ^Xmo>`2%!mFr-+W_H|V)zY-BV%+j$q74EO zp8E}`5Wy?A133sTO-ZS{M%6V#AaVhf$Ft!IL((`f661~LU|)>-y?7P00e7Cd;Sbov zOU3l?$x}#7bLl}bvpftb#!F^Ou}2*(V02Y=bKU^wjM_zos$7A`<+l6ym-`w$g47^~ z5{8i7sAF~RqPWmEI!YFcX32irt-@Q&0UACdrI6jOS;T+ske`Qip) zklYuCgn7G?zPx~)srQfUFrwquCdWIf6C%zE6iubVB9mlnFR zyA_6)rN&~olS?`|2ny%ndC5wTLWVVrz5sghI=Ot`$g3xIUz*5v{rwKtq{-j-QYb*2 zjv^~p6aB)PPbS8#DZ2We0j@G(Q!s^1kB^Nw@Jy3?tyzcprW5XhnZ0sF7#@5~7H}pg z=0OfX!T&{JAu;kuBY;!V_tSR|;65y4S5xCinayq15$l~*h(SW9@4HM9|j)jK{b{VXl~V9H<|X|Vko#D&la$}-u@tHZ!c>r*9f4`D7A;VtsQUQU2*;M<dB>2`aD%{`ok|`DF%v3H zw0EJ z3@+W7vfG0Qq)`Fc0T|&!g}|F!`p(B6!653<>c~5bdp)RA0ePp%diMe>zvR!28#G(S zZi#gEu?C5fK)E*AM3J%F8wPPlzSGO883}c*O9HNfP0l>oAsdeJFUFq;jv|G7&_ZkT zS$VFc_N*snu*r?>js=EY2Vsx-jKQ;!=uM_c0j+}GUgqadLLPz}0H%3U^&h9p%RT-y z#PYDngY0`!RK)iAzNk=tV_P2;%-c&uXn4^VXq!R2H=M7!7HdYuOfR*$-AdY)>O~{Zy0C_j_>Q zGNga};Bvgk)qLPdecPmwZOA|VWDfZs^%KlB-4-Hu7;>A0|MX!H`Sw)`j`=>aw|Y$z z!0&hkc|oj$VG!ceDzNl$IfdBwjAM{QDzgV6?Xj>P1*w8JEbx83(e^TTQ*4HN zm-2(gE{+~zB9naLTZe!_FXdHJgi0vhT3kX!c@n{R16axAani#jQkVY0Qg?p-0N}v3 zIzm;q(lOkMcP`R~Yry}uz^0nM{TjgfU?P18^lA$6KJa?k_Cb~m?Fl?+P_iMj=$>m( z(WrZkNmE`jqnByLR5*BBlB2nOo~;8f4V(-3>LS5$-VPK-!DF)ca6A zniT+#M;<%ZGmc7`S4}OrG|7Z|_)I@h=Q9!+LonufPU7fME~vFqeT3F=KDCw^JtLGHFanw2T|`pfz~UqN|Ln*>bh;HrON08)fARN+Kp??8Fvr` z2gNjR!8&RdQ#yW>?lp{0Y1Nsw*1coh8~f4Hynd@zU%t>o0Nj_1G%8BH}i zc<@Nzy(1x8&nSC`*Gpryk|IAF-ibbc}%;M_Jjvd}Y{8KAEzb_1p+gUCv2UaiW`g#Ed)ikSaueIw^Y>)hZ% zWB8!his#lzp(_tppdUDP^1-7Eb*k3JR~ZD=y4`MHB1dYuBvP0PV#>{fR%E|`j4D1j zNfs6BgW6R`{kLv3BHv^@iKjys7=Q`af4(-Qznv)g%}W_=C2by(J^lDZnAkOyz}2&3 z(sRXCOM%{?)~!y0{&MaX5ta)vV=Lyrov?pL9b_KJn2Oq=j%ZbMJ-(RQ`JKouBp}3uPz50&&GLW-%=Z{ym4y0IbX;|(D-g=2mFEV=PIwsZTQXV&-Giz;5e1VSmh^DH?*D|W$K0-%13AFRs`ty1K$XqQyg(|w zx@O^^Y$qcjG)EMXPcE`Mn3QKUP%c@z!hZv*R961N=}kjZ$=|F3&rh@dZOJabPWryY zLkCR?7ld^(3Ix==N{qdiT1{m2wI?@4Cf7q|LhaoPEgZ9Wlit%@Z4PeHJrXbY=9TAs z3ZP0A0%!4)#Ij8K<5bjt88Y04oPKz1^}Z5DjkE`w%rs=vx89|59A@WM8*4$QrXbS< zAGqyM$(`!ql8~+f-y>+hAc=*V-0)ma$5-Axv=7zXSW20;p~~N4M!d8hSFZO3qV)$# z)Ql{IbzW3ejq&k|QXtoaqNe_jzl`sz(h~0E z{nwzR&l%RhKbqC3a=xrwr^RY$5+kfDl&_WQt zlhw$Z6opA3F`Wnzqw93&LtmblD874hVi;>)NPF&mT^fuSF@bf?G2a#n@=U-3$rxGOm6UXb6*d+IC zP*OJT-)4?6!+ISVM4j|*afLq6ek$3^OUmL1Rq8a7Sh>f2R}Zw#aGm_~$f1$-8`%AN=;*09Qy;ZBhmz*@6}|+jY2cUp zAEvSbV*77UB7CV}v-GL-XtIBtvGK^s#t+WuSFM_8&6JE1c0{T3cgZ?yk(OoufGKS* zoT8Fm(=$O<5L2p{hJr*)FIO9*&sYI!)=BB%GSUwUW|Z{boxTk@yl~xEJds-H`S2qA z&rn*gVIxXFfW0dXlHne?@R>!1g-VWbGUIWyBIm17)q&|3*cZ;Aw~SQ%^2Nyx04>>A z`T4ZY3dhvqX8F4HC*?&(2588DI$G^&(EC$*OYz4<(j@`N;*emD!I) zC*A$p4%?3={|8`cYl*d8`nHk?W1!4SfcoT|JQb=om<6k3ex%ZP5vH zUaVb4)>_FwJg3Sw5e6|_fX|^dwg9D%ccfr`ljD+Gz(wPzYYXA}gM9(C^U`{fUe}Pf zZ&pQ@mXH#@y$gaCLG6S_%8Vu8LTXQK^0xA;*hKmitiJo%`sqQ>< z@92Z<;Il3#vPKdTCHi8>Ji6~sj9JQ^n_z09qzXD8!%>`o=-oD}HvOLqrHH{SXBCX0 zy&&xSmbU7|QxWiCLxrbnTf4r4ez)3^Jkzsj);%;YCghl}hz+hkkm_gi&r5CEKH(@b z6g8hT7}&#h2>u@cu$83AWk&T-nT0N!UQL}k6_CZDn5-pLT2J%>!IN$Lnp zB~noB z-kqoF@J{NasBt#>mSRo{o%KBbF#-x%KO79POR^3j19|4=84f6|Juk@iCZSBIp+M|S z(N_Lx9$Vo6c--AGi4y4y*lP5FZRFRv&aAuQs!H1`nU@ zgT+zftl^~qPY-kcu;D#(fzm&fnrWK1ja!!Wja#in{M)Wz3dX3Z)Ql%Gz5Tp{zXOGf zDyMA#!3gh!bcOc{V7N+lI)j=d6?cz7GMqv{LDdt~u3mrT*eTE3n%~mTqmG8d=9%lb zwSB99nVO;#sA*Ur^Na(XgG@SwK2~9m-S2%!my2k17Hr(xTlK8Z(+ZP4Y_ZceJ*`W% z+w8lX@hmJWs0e%Xa*JwSiaGHam=SPxbdb-o0`e1(=blU8Qm@+YR{tEWF&M;kV@*5= zsPJB~9iO*PHwcA-9TPZ|UXJ}v*6zEUd}1)OhZ2bLzQX zxIqv6EeOk``2TX_`or#*H69>oi5}~#Mx|`c+~qw4g7Olq^8>i9_gt~_Ck57m4Z7O9%X zzC_{K`so?{chmzTaOH|7#y+sASYV)ll-3+9D(Ws~0m_6A!pBT>tYbHERn}|xD3Pc{ zh2X`ju%42;(n|8`4#MYHeB97+39wDN;ZCw1~4J^`dt z6TrdyirAs4-nmH8rR4#mCC25?Y=T!5T;;2$noz)V2>bf@n|0D^20|mmF&)LkG4ID8 zf~DG0hn8~|iAawO*BBgI*?WJ&=(mo9EsphxK7Mal*>N+-H3vK>fh7x2 z7ga+6jFi|ock0q-F?K;p-8O`B(<%urdch@qFy5xQL_W$dMa>ns=u5q-PuqVxkO=pZ z_1m82&Ld&=;jiMHRl6Hua&kSHUqsAc4Wk*(n^m)(a0&mo{DuyN!*j*GQRgYgg62Ax zyrnO3(d-}wT8l&KM~pz*)eMgvFZTFM!o(aF;>LgpA)^lUp%?9vKN;V_XjZDco^8;| zhS!|-{S@z1sKs2gI_1((&4P^KUqi#%P}PRNljd^k z!;@9(jwLpc91f~*BAcjr0aBFSJ(mVZh)YTz`4&)ey+$(z z!}Sy6rPD-dNZQq=DB4UVQg9je4fEm?c%a2@%3RxG`a7CprJYAk)Bgh`GKDgX-Cpw6 zJARa(wIm|FReqX$6V%jA#7J*9-gB^*f*Q33_=a`k)s5C?N&z zEWk0ZtygIa8*Gq@J2PnB_aa`*!ZvczhC!Ta6v7B~4QlX3T_iMe zPaK!f-k`CWcQ_=?r9>E zat5ei#sZwNik6)`Epq@S6`#`;Hu2MTkIytVyBIRYOAUwv8?wX@t~Yhc^Mp@d18n(> z*pFd^_Xhg%^PD}V>i&*aV$6aS!Z!_BHZ}ZJ!hB?SKU}U=dL2ooBQ7nd71y3UrjYAyH{T6(aR>WKFW=IAjXz;gYX^q zNN`kJZF_nd@xE=af`3;|Fn@MdJr6Au7I90kSS#7ZUAvxBl+#_Heyc7x8$7#KV`&ZZ z0tM;)NZgWEQL%J9B){54ZY4q@gAi zRgns$*Izk(>Sve|rvk^ITHQn`0SE(1iZlkq#HPkJ?L=3ia<}Ly`(V{m^4GWJq;a)8 z(fYQ!v$&`BdTRMW(*=3yk^$jcxEdWR46f$<%+=D)lphRbPSfr21|ScBfcZ!&K58hr zr0V(5;lH7m`rjP>F@bENMkscM8xZ*w8VCC>zLBwqRB>+y$w5fi;M_P7GIJJWXLI6dP}9W|+JbD|4i^1~|kC(f(>c9g`JNO93UT)n@!$6{Jl zMS4ouWpj^d)~IH6!jO3tME;t(JVIi75H2_H*edP1K6@U>vHvzzFF73$vn44p)++q{ zOhu19cb zz^~I+m4u;aJ3auMw4RxiJP)p$Y69+kXzARVfCBEyF8x;GjHe za4av?Z=}clYFr5vx^`XbF|r_j++k1U28S@4dG*!(zLkaz-`;r>KNog~%W^X0dv?op zXEFbZDoK5J>(?6@4QWqN-0w793>V7;c>FaL`>p-fQZ3N+n_QJR*Nx*{5QEa#i=0Zp zY(56d1ca=BC{RthZZDkKuUq9T;suFJ0hPWH_g#TKTYk5-fF?^l_F|oaF7dc9DulC=pAQAjA#C6|%tx_XjXi|g2!tG4BV@nQCd;G~)c)G;FH z%?_>n{G+`f^Uqbk6kluq$f3DANiCkPvfFz5dpVdvx>DIuP2i2L@;Wkv83jbhSK-^i zapWKAr3((8wYe)cyxorw*K$9x7$w0MMR1tvLDGCX$I1JnEFrM}-k0Yd`wrJ@*pnkc zf{uOB_dGU#EIvuS0eo>hp}46TdYycfHYQB(AkLm1)W zanYjcvi((?GIVJJ#t)y4@!s)bGyM!XATxYXXu9-{xqel=cm3M4V6Xg0Cf?u#<>N+q zzX$Cy-%b)dUfpfK#?|SzZ!9zGzho5fUM_QqK^Yt=4$)g}m{lUZYi`8>m|AP(qZrvt zaiDepl>TGyX-v7OcgV)ta_}?5+R%Z$^Ka&};}*jn>*lq@Zo?l7S&+s?SQ{U| zv$dAY06%#0phK&z%|JK#?+bp(SJ5|s%lKi9&GD2@XpiDW=Lq=QYZhAIWo}|f3W4VX z1lmPiQYAH}C>toJ^%&H=S)TF$)8k^Sk8JwhJ0QJ?a+5JI-vG``Yn-*LQB6u$c%OXcJG%gRmjF5D|$jsCs9sr+2> zuiZN(Y6kHy$WQ7MYSGf!k6Q}{#I&#OlF$|Pg9Y!=!Op5$U*USk2FFj38JgBd>z=OW z&-spOe~Zb(&J{yV?&>C$1^WD``gnN$tt^Aex7nr{%-?b@i-shhQ-h_mJd@g_jDI4B5dP)=kBhKXL#Qo z&v5$an`QnYQC@$*S>_0zu8@%OAcy5!tm_|!Tq?nTU}fRWnTy82^aZ60-n&=A)uaDS z9LC?Y!r~`IuA|-Z)OLlZ;%f$9CexJ{F}yRiHy!Uo3jgHua6|b96=wP@S81iEWJsnu z5d_{8^o@AB4iprw;Fk>-RLArU5c(Ds_+6x1+7W!S-nD|oUx{-5+Nar`OJ6{7y78A; zVSfjNKIqbgDA?Y-{XCI&;(acW0uB$e^{7-9LO;r9p_TMcY>k%aJp_ysc&b0ig%hzn z@18xF!dod2)*`}+Uup2+sW_z*4O#9SPGNYe+-kDHk+k-|7Z8eWw_#S8Bfe?Yo(Un7 zI(v>jKOxG2n;E~6DoVUNI+KAz*I6pb{Nht8A8U!$Ec}F)E6l5%aV1%+j%)Tw7PC!> zrtvO(L;S!FQoy`CzIQFM z22P=cHPRJ!%8;86?|>ZNN^V)jc=o46t1a_w`&fUo-Z7R#Jh;r-qQbki2Dt>>hW~fz zaf6yBf=-XZk$guJTh+_?R4?Mq?Z}^)2IVf{;7FfnWgvtuL4iy8OBKeuPu*2!KgJSfdyQ-1tZFNp>%%)t^ zB;Bq4+g{ITb|m7~pZxo}x5dty(s=zlst0(hDZyYUL2-HYIk_~}`*o%;EAV4Ibi{BY|X7p!Wpd2WUC|A{pY@%-@LE63hPac4#w zeZQ#eoAtaZ(u{yWP%>~0BwQA2m-pL&wX>H;a)m^JL)coQd3IiYAznP7aJ(O3iZYpy ztz2FOuc#Wd87bMiEfzbCu8rOn9MGQqQa?)V&OOc#92|<6g^+nH3>cH>W3Bz}C!CE9 zYUcuJ>=H75YXNSC473z@RQc?`G+Ii)3n#8*RE2!+6PB-DN(URX;mp@E{!F~ndaax& zsZ5PYszV|XZIHec;u9Rb?xV8U0F$ZDETiT}Y4T}K5f zi31{C)BLg^CBs$fSP5;s1adlmBR@3=({XbxQ*Dhg^T;c%_=-cb@SF3~v5WC>RZ^21 z)M=gyTQTCmkVu)|QeF>rtW!s3?Q$3Jh9FF2SQm&#kAxep2|exwvl$RAsIiTmSI-!u zGgf}#!!7?P3Gr4_H~3%?I^+|l|3=q_>K5)crDm2~AK+|U=aefd8=9R`xPhikO)l#F zOVVwQ&OG)@Fg}-V&bPpN3j-=Ah~Pz#js30O6@jEEJX3{Z&3y7en!I=PPeicad}tnu z)|u?7#utUx5m1oqoKH&7`7w+c-p2v1!N*pABe7oUeAgNsq34eLoBWWT>?FbAnopk) zT4``|XEF0KQQw!tlxZ!-mitxz%n{1l#9wdOg3yCKGrK&K%Pw?pm86?<^$M! z?9^4fjqiJv%Q(z?sShN>|DRYLp@_D)>$s(SK&^Cgqz;^0eP+l0n3~HRg!S&_ieS2G z3j;Z^*Wan!43N%<39!CdV;!(mvWdT?^@T8L?2#=3gFTtu_CTHauYF=z!c+sOyEow^jswZL4kNa8c@C>5sg+~1tSiSr1=90%+UDq4;>P5z`Rbn8v15IT zjiZer5=|4l0*KV1*<<`mM;z#Wwl6^-@IiRJ@i%xGBGu(nQliF3ldEUd_NK82DdaAR znu3OA{M^kasB8LxQl0M!Jz8Y2$DE-UgNme&wHx{06x5KkW+<7auqUdpLCwMGOSjRA zt*ta-Rk~5`7Z)vSoUU9-Vw+L*CMp87+-&ICQQy_uHP1rVLs7l#{!XYsB=-L|qj^)z ztB1i$-ScIFb-(;=7sA-a!pt31ZjJH`l9KaVU!$UNO8R}YkgI=Z6lR?!S=^L5;xU_6 z=a&+&QRO$mj*rt6^GZ3coJ1BmZA}tHW-ra^51Tcn(u|GiOLc(BMrv^B?9ZUcFbN_s zs*i(l>Bn8_ZLXqaP_lQU{ToFLRtI3Bcs81c;@9<(K=W-C1NIP1l~gOg|9kheMKF`w z7JSioikN1Oym2cfnUM;NXq_KRuHxK>X^IGR4D6nNVAVT z7rJ{P_o7qyo4!vnJ2KKlL5JeE;yvfmc=QW8*#ng4{!Af(c&{KY=Wjf{IjfiTq*N`x zos`xBCe83S`|qy@6#R#%bKBU?OUszB+7zv*tsft#CO+=#{05--^`@T~mIf}nEyS2^`j`1Wma zD!Cvzb!rZx(uV;H-qn%<*u;7NEGkp!<81%X<9+WUs(GTwY}vdq#VeW0Hj zNkxJXdJk3<|8TXW{P?kHu{*~O5eDL{fALg2XScv)e|Dh8MWJLVw_lP1-Mf*l!#$%L`^2P>XWUg`-q2}D0* zKUk@PW(Am*7dIM?Wgf3T&5^>PJx_Q+Vpgk3_f{(D9I{FA*~cw61!x8~HxZ9>Ay!BBKG`+2O%q2b`sK1kN^A zN(3a$Ro#i9ad3P#NP8_R36(cDhx&Y&#?SB*0Du6om|aSwOs8LaGp6JSw0z$$XM>T_CpPp8WuGm zkk6ZDi#`VxNe0Y2wR@W}f3xxmrd%$ZV%GlvmqrP7NABPyaD|9+pF~V^EzWop7r!~} zZu#UWSW-KmVSazT>tMBKZ3};Y0Zl`Kk_9b;KIk7w3Zk%0%yJZwx8m&Y-f* zkH}VXM}zkiSM6`(64{S+lRje-DRE2aloprakRy{|Un=(E87zzlp%7QJIO z)_rb99WI}q(Em@Dk1twZ z+)-TGxDpGb3w@k6Pi$Pu7`Ke;?fF(IT};sU~0HTkEbK~B|H zE67Db{IeCdHkZEYZ8Y0N^m`gFK-VBAAI5jBM?hGdN#dvwhvxpPdc#NTk2)b{9{^Yk zM%@rd`klx)+0x56tPY^G{`W~F1#Ok$1??fk5vID;QD3M}-$hi^yfvl9V|A)oQK-2Jz9n%C}u1(&=rG0{?3z{<`s1XnP|QrxNGU2PN# zH|L&#tP(rq*AgYEXp*pb)Kv(`Mbx7GY$?nQY}XkpZ-bG6Ds42#GCqUib&7Xf7fvnM zUNYobRXs`vlZy0j^~?r@4r~S%7iJMF44ZQA9>BWG8dB<$m(1nP%Kg3W_!d}+>gM9$?XaOqKg@{LXo$e=$mse;4K-pXoN zNbUEM!B4_>j9p$?bX;=-VPZ(labuOH|QBLFy$5fV?bx&=Nb( z)Tc(r*AN6w>FpYDGpqQ9uE>#4>kUUR(s4oY(D_4w9kvDT$0r0QTYIl3r}WbgYVRY3 zw zuX}Uf#|zY4D$&m8rFrj!Yg`wC@!s*36zzb^+00~5R$SEv29pOzZ{`;Wga}UbWyHYD zCfodB{+Aqe4fXK>1W5*D?Un@(`tx-HAeq|6Nq}RK+RS^s`BrL(V~f$$;9s?y%$c#w&? zrv=@W%jnp@B9Pka}ZYfCyG7i%jRU_1}lzyD~eP`6T2 zytO%P3K=Pb!A80-?oM9e3eEBSsJ}BsHY&UsEdK+{@Cv5O%k(K)q+bs|GNeG1WYagT zcUe@?Zhou2m%V)@ZVna==AIe)HpznW?F+qhCBJO5#paG;ceirv+)XUoMzRcZXq9hU zG~GuVru6#HG~4AIQ2Q*CyjWvBtYI@7oEUUBGSU~-Ovy5-}+{{V0J{d9Xl~b+X>3y6|Z>lBYP4rUdl9G-g zchsH4-jNFX$FEWxMbNcRVdCw4-s1d&?KKVA>!ryO&)aq(mHql=}v%A(+{4bxPl zV44~=m4;;z?>#q`UjysmfRjvb@4x-8uUCrx`8OJ%grXeBZyH zOtNi$vZ+g2tOEK^AB3>83Ttn$`C_9%#`VMM2MfK!4%VtK|A`X*4UkkbK+O|1N`(nHup+_aL%V zxV}_9^NSFqufWX2d#M)Do?d_IL0Yx1VivqK)5}j?(-zXy^Kp@H7%LJnp7Yoa`Lj{4 zgOWvk1gC$xm1iN^Ima|vbv=LYzDB39q?WvvWR7Tcekb^+0irn&_cLL#2>)L}ewt|6 z%*0*n&4Re^EADlIg*uXBvokBGhZf>K;G9@LlR)L4u1i-UBb@ekUw^;Ik=R0XN4ifh z1oDj|F{}%+1tlYu;Gmn4)NX@7Ajfqz1OP7;4dmUodMTx0(CR$o>taycL6i|OX z2l`d>2)URZ#I>B1?ZaaFsa6q|n7rMX`8T$7x1#*sILizAZ}Con53cK2n74-8@=-J9 zGt@AMh%0J5xgoz~gqRaw0iOT4T3xA>n!{kxYutBtUdreTL&CQ1<*qFAiipz%LWJq7 zC}emLZey-4urz$OLTA>eTck;Iz)EJ^cXrHVBj}*LdC8ciguO?da3>kvq;D=**f1nE zt`7SeIAzlKto9x{Sv5`E$`bo}{yCb(!{6$2QkDkP5iG==mmj#&ZHBn{$x_{*`W|zF z%(=p|fDdoolffmA?4-I7&(5ot;wplfq7`XY$Gs^~Kr3eg143D5!sG70mR5bW3)mfi z5Ur&27gnFdmZXRfYt_{V#!%sJ^=}JFLC@I}Hl-W(rTKuzb}IaKgeLpT38$1rU6PRI z>93h{8T$S2ctiWXg8w|;+8n;WXwFQR@5m4hotRSz{ETM~`~aI=Rq1H(-RyVNPhQqH zJ0A=7be;Ye;lz{hV(m=*-BVG=WKr9S^4#XJV{k+8%;;)!eEGv@6HeVnNvF=tub=P5 zI_>>El{;G7u?zI+wvrqDii@6mxvSlt^7q!0?mI~X&15r@?TDC%&E8KvyN!#ta!tqt ziG(zou&NIiTuPA_PIJ>0OJQ! z)ix7q$aHrbu68}*HwAXO3o4Qx1a9~AYAi}<4M0Gw7ZA590M}2X!}$1vxnEwJ(f0rV zNuQ+n9$qgXV1<%KJmop!o{{OSzL^LnUHJX=1u-y(Hv}+tBLH^x2?4*(KY-o91z{k$;}cm z)8anZOi@4QR-I#=4XY?Q~LDQOX3?>Vcf=dNfjA_~RrxgXVcJXD%5*A?2qZR`gtGhiQ_MA>Zt ze=#4t+Den(?4o_X>@Lv6YyYr=^T~$7QtTK#eR_!I7xnx?*ZUui;_#=1K42vy>d@+;#QhbNQ^s| z1k#7VK}pJpBn`(BnI))`|0~>JYGR&QEb+$zKv+bT@;UP06jl zp3U~#_Bo`!y75KaRR@)(abD=!aeb~sbnl7Fkv)S_KvrexwS^dm+Pl3xU?d17XR?@Z!7A*B8x4p2abS6peRGXbP0P;GW9Wf?ell~wx1aX>_+{HH^w|VM zBpI8>`&cxJKt_450WFZ@zlV=1zm{<{saT}0Hj@0<9#%FvWIr-+4XpRAsu(3A2Zhc- zZfj~{j9!J=BEdd_>qS-{+Z{u2A$+Pv4Ye%vm_>&u;>>*LN>t1r5K~3(=rbNDXKLj= z8U&n*> z7MXHA{te5s>`9d9OZHCGnCFJ!gH{@}uRYQGkSzdg0&aUjC{Q9sM&L_~;p2zOk0vH% zk>`8tdoacfXZroL=C z3b<~~HV6+2slpHhywwn_n_%`h-6YSXW;~!OWe~Kxhe>+Z3#x<+!f(j$V~#e2FZzXm zya#e0c`@9dLGvgbx4+rxmcP6d*csPR2)S(Uaa`~_M}}YJ2wKXYn^L5DHVuWGEi(9( z+kUFh&o}sPIH5jSqUm1m8P>htSJ?pHG_Ch}QyLiiwLovQ5Is#A7D40gg~)hJ70e!+ zQl7h_qneS7-jTFJYhkd1^3wnzZV&%sfXx|Jb_Mz`f&s(1E!(p%oLm&m$OOLt9T0QK zxy2ME`?BF_(;)1CeFYQjzlaTbs+T5~W#yt7Z}zbcZM3n7PH~aP3<%(w(dVXFcU-$h zj`Z|>;+q7XzVQiZn2}E2s+#dtdN~Pa;=QfrMIrRPhivw)urVXxzbQQ|aPa&YmWeV_ z@i9bCYImY&B~G|z8aaC45e%eJ)V;-}5UMpDV}in-;^*Tk`>FFmF$;p2^IoNKwwAz< zwb8xtFuko&OHbeI4F6;QFyCPF-KN(tMO`X=i6mr654?ygtXc)eePfnZ(x(%MV~6lu zH5VF6EPZBhsC-6YDxoxp${Akla$Z1P2`q*+YeKP63GS`NrK#T>=evBc6#t^$d2PF8 zYqYAe;e{anVh91(QXBi|?5=MJvu|DzBkN>3Pf1T*&r!-cx8%_TTqV&$^sb*UrzuL{Z z`Y69+6{zzn^|_Dt9iKlMVk{bezmQ=swe0Zsg&$~iw4p5>jbZG-n+w9I`lm|vn*ul5 zN)daO_~BGX!Tqgrl4n8P`0pR>>iT+c17nZPH(6DV0(TBViXto%%w^QQ-Tx5u?@`c$ zKfI~Ag0rTVo0!;VYn(epn!oVJFx`2_rP+}g`EMwdn2tmJGU?zBb-18pCHp6|3{S=gUklq9}B!BYC$h4`jgD6c`t-}#v5nzR9K zvOa>!K_0&yAKePIG{oE}+GWouA>6O*h6eYAHi5#LG6fVywxL3F>rvnb9$fH}FUco_ zHZ@jPJ?+!XxxaONss-1$cA^o&9QqXN`0TP!GBnE30kl81?^YjR7T-ojLP|`IJ|gpM zXH#ufZGIG{$Vsn2Xm`odi1v&}|KQ@@;O~(Y(xOp;l75D8t{^;yUNmd4Q^;(9=(dpB za<}z8Z-=8BeNz)J=WRwvPz1F{Lralyu-EA2{LdWQG}KI8iG1?bPGE>dL$hv7-Mld3 zcxzgQt|uffHb9?YPo9sgCvT7}xGdm!Ha~}zxRFY}NlQ!R7p2ahrTdiafWzH`C2C-j zO>N9->`i7OJc}qiRyYR7v!3`MWKy{iRw6JuklJ_DD0%z__*%tLv#TZ@je>CnN=us* zZDIZcNH<Nn-CuL<$1*1y1 zrMpexv*)qNjt+BgxN)h99RQjaw)baxx5oEb58F2i3Up3Z59AK}HYsH|u&$&A%>8K3 z`FNsR)mrVTpPD(G3AJ(^^JtEal6OCH1nsAcjXhkP+4$3{EO4kxJ_+x9O!51)niAKh zTg%iG#v>vcU+Q}~Og#lGiZ@r@Tl&1_cl_IyT-u0zwE5yaF9WZ}v=iz$X*%u4s)g$Ad zY>5&PlT6vtVY@7h7=Z9ynX1PGw)2=N_d#$!tMHiXyqf8{U2I5AZEpio(k%^?{-=X_ z1p@7+VCvCrXA?G}i|p6>TMvj;+8|Y-SWW0r@c+pVDqZ6w$S}_UHVM;&4{Sn#6V&xU zR}J<7q`N5ormm;Of$;QP)Z#y9gf32L%!#QE<}b$jRO2wtGybC++76^%W%cRiB_r=| z^Oz6Foc<|G2zlI5hBn0*o*9eWtZGx)$Kx&~Eq#toLyHUR6)wskxtYe(#1 zsR|Fc@6Q_Bks%erc=`N404eu}7k@J&O)CLo3x3?)j}6E#qUSEMpM!?Uc0Z4*X8~n> zM9FNf#(E!%kkLR_(e|uox7Mu^@=KUhnf-&D<<>9Om5ud;O{ZH=!%*VUYPPK<-36#L>qddRVH1bg&o>&ujvdNO|7O zUFeE~y0r}VY}Juqq`YuY72oJJC-eBP9_P7pjc+n$#XTEmo_v-gwnv5c0&GCkCwI{& z97*;=K2LNXrw_YREFm>K=B$|zp3K^)B}A`mU?zHEA`cM+P3m; zsePK~o$K%Z?{6+SV{#=)LJ05%P4^Z1&*|#oPk6@?P4$o;=B+ZhCZ%E|nWbm)aS};* zo82|;0Scq(1l5^3tao1j!dJ6J=f>w<-uDVRT<$09KVakvvw6H^g9WjYv1btv!!`YA zyR^R=KisWJb-8M1VBt(!p};>mX4fwV@RLnbU`GEeGq)LhQBg%y52+6i=+x74(y7XP z*68~}gsYMwCu+ztd{U-BRF1x>^!D^9ae8lO?p*#oLR|3+|bCQ zAoW9b%wq!NCWaI{IIUMq?Ix(jRIH@pL-(t++vu4D|1TlkGo?d+v2)XwEY_-8mM_*c z8~ls@v9S4}>5B@u{{U;IPwheS7q%+BUggf!ppmEf4p*%TBH0RH(Ld!-PE@^({jvp= zX+#3Wq1XNQ=+{F%sXY_!Xx)fdMXRC?x_Fvkp87ndBc;3FU39OZ1#r{JJZE26ky&17 za+j6;=QC8Nb)sdgYg7HZw6%ahNUcxfzO%P|w^Z_%!ml6eY@~|H-hp08whq3I4=fvY zr-Tfvm4@snPS^@+h{Ued23>2Bx!1KZbE2jZ)axykhxT!|v}P*t1owdP^0CKyVls#lIPwd%`MWfhin6Q){f zicijY!@1c?3!lod;?D}TguTYmHlGS0{d`CxZ#z=W*}0!{U&l4Ile?D74A#g`^ZTBq znGdAqG#Q>cmQ-u)W6jWh2iK(cKp%uS!z9^hwMTy2-WvXPoDK51zANOzrJ~jbh==Hr z6W+Q!M<5#l-jV!`rjMS)5d(p~K%iVlT*sfqYh56Z8l<<6ZxOQX&n&|lBHp8-p5!pB zi^%WpR~L4vL3^!+;Lk^0*6P_iXiXiJwCJl$)hTmRfEPa&HENoQZ~h5(80bBXbf0Y` zrxryJ`Wy#@aCmiv@uam~K>U>U!PYf{$iJKol6W>REZ(1<(dYF>74vmBVLg`Qk%WFp)WRJl_kq z`qdaaPX8!9g_-$ipw-9|lA!H}+J$nA%2cmS&8q0K?stkX2*PFB`}0>n-mU)X5kBX1 z^{(-!>9<#3scMc)HQUv!DY<=?JPC!51I2ENzs>VLkCR?8N<#Dsmo<;_YnfGGg2s;A zL(Ucs#a9xqCN2vVyLBpqD$aw~FTIh-ozmpSX-(JAC!tk7$+xr@1w|L^Q_A-*;4Fr9 zM>z&2&n9Z;c|aumE+q6X>JwhC=76k0u}xaAG4bJ3LN(+;w;DNlrh*@Lz0CAx=naG9 zGEK?bYsE#sA8r0;EyAPx`?fC^TljFoHnrXkL2)=@>c*8jf6af{cnAzWr{20eF1i4A z>dt+CT9vUj&b9pIO$Z?9pk1HSg+nxoWQjGN_Sf=`Zj|XaCxb0@j9Fn4pQiecv_v=i z0TxpUb<+SzAGkHoa5P3*(X&gP7xGK%tnoa#q0ws+=MB9sdAG-em?|q0*?V%+DgD~`Qr<~xn4VAKykon) zWw^Pmb6<+`h(fl9znH%QjQxxqGDM%Q8IWRKF*0QAF;c8m<)uH=Ypau_P#+fgJiZn% zD~~si>$WDRs1Ce?G`-KwKJ`@*Ik(~YiF;jYp|5Rll{>p|HbwYjzkV4MIQ7SL_Qw2B z$rI0S!EpPdPLq1MiL)vzHd2FSk^834Y{H}jRUa8+r47YQ*nR(>& zk!r!T2k7q*^=7bVw69|KX$f^JrHV`aL^{9RO8(>i@b^F#@urCgnH9G+p7mYBPx zW3;{&!=vSfJJ(6!~0T_V3pxG+4>@64o?dBf`Q3j5Zpc#z%9?g_@v=F$k{OKtUKAVm>j|J($rHyTesiTTfx2%3@X6^Pq>TiNmM}|E`w` z(8kO(0k$;8r{vD)Z zJ99haKfumpdrVe2ZXUHL*8z3AB#qqNQe;ixSwO4A*=C5#e}H!V?BiR~8*rUo&u%M|+vJ!1GwfY6l|mTtN15rl@1OIkcih{z-dC*bC4s`jug*|XiB$EZFaq{=VE+$089DK6 zD@*HA`t$&8*PG90t5MF2B;5(zy(JZMx*C0Nhz|N`dMA5h;4)bi5YTOB?&r<>uGGfN zOUJP@kfq4!O>)MawIe5yJbtAjJgbEoMEy~vkAdBQt3d|RQ8sB|qt~8_@tBgwNaq8u zz9uHu&S8st^Iy==>vhhX&R7}%mqX?X57)g@jl`VpW~5h;3E3*UIE#fR$9&{-LLCGlkaeu z0`>L}(~?TO1&e~?XeP~Za(22eUiWL^cOLFu`Wx=y17}LE8rN5RbaJQxlzV~KXpfPj z+`zCxi6{_ik(>a1F7zkQ-x^05A_|3=!yq!k%#d~{+h^8{(~Pl#gzq}>l7LQnV^7=T znC3Fdk$hQY52b1~qXM&gOrClei<%H&!p8cpXl>of+zRqRrSxb{i^XyPEj_3*%^dJdk?En zuG;KmRKJ?9!H*t!%-gUg{8@5W=0 zGJRt8X4J~}ok{b0g&|8m^_7tmk z^A|&|n>Gh!>Yqw_@&N8@k-K}0m#S!oe7j9-j2q0o(|H1j2)tqkY@jZVz#^AiAE6@z zoMU9$ZXkQ+6w2+J@5A<9a+!_TR>f(O!`SAc9ebv}g4}eo{31xR*vxK5vs=!%=L{;2 z3**h>$fDfW2)`*_NEx?`v$m!og7O1~{YD_m#;9rLmGbqNHuNgb>K#8q#ITZS9b70{ z2Y|zRWdO2ALZY#8G~Ku#rJ36nfRlXDi-+6LNDsUJov>3_mxFwNT^+5)&44fgWK2 z(P%j$fDn_ccBvmkR`%FHP%7yYYR1}wrCms8~^BN z+V8TmTze4i>op9AfsQDF?tS7k$zv*HDu0I|fWxE6-vmK&Pp!FigITpzd07AA(^xXa zZqJX@^&D?*t37{xFk(|eV)PtqE`Q`AT`bRGT626L0tG=`W|E6(2wu%N#A}iJZ_=xz zG+n@Mb@=|~h1;q6(n_*Lf3~mHz3nxhJxC8RH!%`?`tv(uLlGFyOhUqB2wo0vo2zv$ z6j`DcA?r4vs}5^?5i}PI2keDr2=DLcfx!hXonD({svsYdj5b>it)wV^aAO)qb3_IC zfw#*9>o1WuTEaMPwC%WSZodC;Z%o^cPrSln(b+7g{?1bpiaDilg2Qn?wvKPSl)Q$h z0Le4WWFY-{bKs^-KH+MVsn5ENCS5DIAu!;2e1#uksjx)X9rFF|^dcfy+3mUknP^L} z>7k;`ElaWj#~ zhw!zXLVGxEJu*8kt@OXg9<+yku#wq7z==lw0~BGMURBalv@x$Rv5AfVkGY{sMw&c* z{YkHn8bR3H5@(Br^BI%fw8_`Abh?g(Ury37UQ;Q-#u`CWQ=nc>o)v(*F?;S3I}LQt zW4c+P(ATbkT?GI->Cq!6#!hobCwyilTojRhJxtw{1aR^7m>YLSOB1!nc=$Yhu(WUf zhsWpjlM59HRvVs@t1ZyUs6frp)crhWD6Z<%e5yHB`(?(^Zq>L;(!!%PPP7eEAKQpc z`^wfrN2dqudS%TuGqu`U>2J+Jtql6o;C=ivSAo3nCjrfHJ0tx0XtTF(*L zI#)6w6$n(~Q)1YOjZJFz53fQp+|Q!<4q@KX?|$S# zS;v-}1&V^Ub0%HZf!r1LXtc%9_7uFoBwY#?;P9ZLH$DQnHB&ooUtJ_Hl6;oD@UHi@ zqb}aTnW20S8v+jJ2?P)G-*T4fVbbh9z)j)y{po6zPu$PLU~PO&_T{UG2pbanI+<02 zfeaBv=KTz9VBP^<#tV7{fBHakI2U{9sV=}HSvV!xFj`_Po}6N&5}%rmXI7azR#}|B zAyfa*m!8M8vyZ0hd|=)WrhkKt2i#-@DE)%bBs4ANfaglW~Qj}=V_5QVR-v2#!(esZ8}@P+KP z_l|)5oEA9Vp0;cDdRXwmNYNlUg^v7MEQJB0n~ zcnc8}I-)-;WpJQ&DLvEh7TyC|rJD{TY%|vjK0=mG{{zTg879TV{w`&T*9gEW4*LUa z*)KPkSu^~UyWu|Rm&LPdX5v3k&1ai?-L(}v&a%ZPH#IjJ0(?;5`inYh{4mjyYu^%T zWR=Xx_IYg6YGmB2B|4->LhPs5i@)C)xD-hWHsM140Pl_RRgWIai-(NvqVs{L8{;x@ z9XqCh>VawP0J_X93pea z2zVK1Eu7&V5StEZKiw!%jDT$?#n(7> zyybO?*mXg>SQ<>y*;6*9aSZPg_#Xf^XztZ59DYF~FTbNn3WGBBU7ch`P&AgHevuxdic`Dw9Mh_KGjsP8&XP=X=iY3!F;44fY)qakgoCE+^}AM zvS=|SRKT$6s0@=tv;rjOq?`VIesybjY>#(b*2sJuGAz#|)-1IEtG1Bu0CojPDLMxF zOFPN2)|nV6yI2R!unD-9+7IyYJKl}&g)N?Z83_K}c}RdlvUh;~g#y6P-I9HqOs!^x z_ovQ$_@}3Ac2Dr$Us=*6mT8fP^MFwcA-Pu(7MKYB50~2Fg<1xy*|a`_f&#hNLaqM2 z?Ga=94bLAR)~HUl>y82+C$4TV4*f_pTQjPjZEjT=S#|q$ut51V!(yhnGQwp^4usUM z928;aStXrZecJ;06Hm;;?Gj9yy(#cm(bGM{go(h?Zi8?;0DZ^^n=dl5$8~JR9ake^ zA$%7(%j-ohYzYUMG1YHqUA!AU%sC!7iQE%@={vyKE%)o}K*3z9;=P9$0?Tyn{JSAY z!bo^+HL*zgJ~62|@c+nAYL&n8pC9zHCcZTi_DpA1O$|CMB9+w2aS|NY0&DV0q^E$Z zOHZ{=gZNwL^0Duh!N2pQF(FQ#h{tJK5;0XSyly{?*nGaou!uIUQ3)eXfO$aRG;t}d zVjS=g9GqUs3AnF>>fz1!&PLHr5-)WQ#aba>bADo?JvxF}LRe2nULj$7?vTFYxQ8=e zzrLgJY<=EH6n#^12S9QMz-fh!rR-yD0snkvKwvI%Yhs}KAM8kT_14y%cyZ4wxmQOD zi=@OH3$$P+rJ7Kbsl;w_?Dm}O+-6Y9%0FGM*g#uO?qQ4{pj1m$iHV*FV8v*u>U6nh zwqpU&hRmAinMl&RXTK08wISKy4HJ?&Qs`rEJtT)sl+5D81WD2)V+M`AR1^jc5}53a zITIoY?(Cb!(zLms4-<0meSH?IW%xe`8bu~fwvG~eb-&ca;LNJ#*^THAX3n@q*Efw{ z9XEfV8Us#b(A`>2=(723Ok$GCgq}~dbJFaN{bFy?Us{gsN1_5PneGSKq>|4JztYeA zl3Qw2+}?)wc+4%vPsl(Ewj16zq@T1pq0KZw&VqexiMgNNT2nhQE9m7bttR1O2EA<$ zqDKGSjyA0@ahCPU#Nm=h2rYY;uT!wE-Pl+l=osYyx5wR7#@ z*7BCOu$Tu#RTmh}f7ljQ-q({lTDoo%pB5>}Ju$IBd+-mpUW=Kdphm*H>E~^nU)&*D zhbF@m5Gp?YFZ(6zIvDv9-oQeC75o-2DvP%Q&;w()k}1AWn;{Sn zeN^o3S|%-LMp-hC3VcX4c-)Qgu`|3SUoR2pJv6V9khJ_77i^e^Lhzq2{q z^57oVV*@8%B^mV2clA8cuhr>ldWBPjM>heesnHwR7g5LRM;~xf8Qq$u@Hnxj|K?`L zDtCh2el!@%c^y9bIQTfC!^UB(AXc`>g}TBnj@sSWVYb~M1>g$K4SyQx5ky+j@$Oku zC6dgc&{mwcOfp|)Kg^8%vi<2Q~D8TY)TVkSE>GD2L=|9u|&*X;P5EQEqVL-cWJRI)5b=QM;FrV z_I}6ng#e*=)U!FEygKF+=!R zP=PeU9`GS<@0gwH!k~d<$TR;*?50&r$G^3NSFR=1Dc7CUwhW{lB3HHnK@Z_cvC6~7 zMG65# zkV9~%mf23ot+k3)F)Mnwzq~K|Oxx8FCF5n_wbaFT{91KQ!T{*)LkWI=!@EsMOjIf~ zqDj)QWj~IBvWjgfkg-r<%^#9XUduFcOKBRlUeNKdqP%02d%-6!JGp?WA4%%oP&?`B z92e-bhCJ>-w~~?Mp|_UFV6bj-wb;~iRi0{IzU$iE)0(S4H^2WCg|L`j4D<1Z46#nv zJfQUY?ebw5>9240DnZP-$-hACle0%N4rN<9B)=y4IY=~kS=0LO`q{7=-c~>+swAlB z^;3Y#VN91=wcYm}}t-8Ic1W#LPF0bi|n6{j9u8A46-HuA8h9!&T?( zw-%n#hcp1Y=#PAO^^?J`YeHXhO(aO|Q0|ND!uP*#sGVUTJu!j&*MInDb?n|#Qg2oK zQGQwg0!2Rsz0C!0-}uck&Bmw2fMdNe4+#+o)3_^mQfOGDfJqhu_L$tQ)h=0f|CYI6 z<@CF=rYieOlDsoaTIIGoq+ie1)XUAKdy9kWR2rvjd2UAidsdVymsjkt|7E$t=D4|8 zm%D0;43fA?MVHQp%;?4LEz4sGHal`c&CHDN&8FALAs zObl8Kgtc2F??|pu0CuG-(M0BT{eteXj}4uce{2@0qk%ljY}kL8u`)7Q}4p+DR#_ z5Q=OD5o(-tWntPa0%==kPW@TnpQGUuV1&&YD=Rgt*L58$^$6LPhw0}mYM3x@5{y`c z--!v$u2k;wiFj=|Aay55;m2%W2Yq1{8krMTEPQOcgn&bbMkk*(SlgK}> zF~8LY56!#&`e7apJiWaT=*m6r9%!}}?gKNvOX$qhUaV-IoRWw;X+)~*;{V-5l78l9 z>XCU6XFeTvv7->#X}bT>_3ax8P3g{>N1oC!9=chx1x4_EAQP@}kSq5Wzg>Fnhwh5n zYE5Y1m$VH5{&DpIbeN@5PyE*;mI{%qYe5RE>a#pm!GbSFS6z>9Ds>Q-1%WW9ZBOYs zb^O*~i9IGcL!14b#-rqeGaAni+iwZi-@kdq47sV}W}Dc+XZv2s-O)+P#6SXVLN72@ zMELZS4eoeQ3HQCjfb>LQW$ekrLOIr*`PE+B?^y%bU`bZ7p}NE5tfvKf?!%YRa)~Y?Bn-aj0zR@bOCp zCHl9G(qWt;$xEKS1$qBm-D9r!j+{nKkmnHB_2uBH??p-<5c@q z7b`MHW>SIiCuWbryd*uYLT9rQ)t4EDGoM3c6#xlSO?$l?#%0#48sE|BJ-!mP$`j|H zd@IF0NC!YQYfB%zsD`xqq?aYZg6vx5u^Gq4*&ARr`Lus9MZ$k#E)NZ z8BLdyUWPwY(ozJG8UlSoXL-KZy>b_liV&Sc{$UMd4Pm_aa(!<=JQIH}__4&j^Qb0c zkU9~6c~6%3kM17McZy6D(RXUJhIT2mXTJtkQ+(Q6QziHD>|sUbL_qMCO~DzDqu5Qy z(oM#U3ZBhBU0>$qA#4T@rTHYAQ@0KvYvkWo#66?ECO|0-$Pb2@e5SsRZsQh`5pR&hVsFldtjV@u2o-&H%!f;UJY$fuff<90I}MyJTB-15>mgEW!Lxibr4QB zCv5G^OXyhPA?5WNp&kzzcM(Ov0gVDNH-x)JyGi*=FSzY$ z3gk|ACe4ls%&1TAcn22)TiR%$!-^#L)!N%+Y)I&(&bOkGQ0BP}Dym~NejC8LkwN?Z z^t>>)=w92N!lU#VYt^4bDa<($yJE9z?sZ;HzT zEv;~uGA-Gc`t1p8KQRc(MITpXkS~$5XS5~>6%f4l+@*mCq7mpw-3s(1FKG^`MM0eL z;TGDvb#a2l#E7AD9y`M1D=t}LPMUmoLcP5weRuUt^O$Q%s$0(2jK@RG-n9HGfEYqX zY-+e7>0=>tbwXwCW;)%A;X!fIyZ}jTV zCqzFFGA)HOq`Wy|Xc&H^`no!eMYxo>grt1ke}@T-7Fql1=5N|hm}y^za0g4vdM{dp z1A0N;{1jVAKepbb4X8Y!4X$wML7)A(ZFo#==SEeQqW4_Z=@%nutUte6`$(;4?EK2M z|I$R0HK!HE-C*@W@=|2aJ+#Na0G(y6m}iNm(Q#r%$m)6DK#VU|{JNqZf45tER+*bI z$L;mz;v36F4~gV$rTEsaVaoT{hyz&`S7^!Hr(-rn2t^@a1*=wp~PyVPNPd>GKY*ZKSOf0hlF z_#~}nSng_~9oM^ASL1V+cO^N;m9>Lk&X5#Kk$oKh;(>WBkoA~$wOKfX8wgoF06_nN z$+#4$B$m2Oyl?O;9+G6c9~aFlL$&{_r4c@m$!px zrlM7FIU`4gk1af#=Xa7rpvCDfJ{W{T*1ntCViPkoDo#FI`&uzMi_hD4Sk%h+*e?ut z2;uf6hC*cQ-D+sFyV$J}_uE$E6*rYi0UK=EZl=T0Yu}TEH$9K(MKe}iRv&c3x@py? zUO%#OOGJujDQ?6@wk%xW`|!rveOEzwKe1_#Fl=}$7;A%ZdkBK$xI zs}p8pz>yfH%>=pp^tK&uOth(l4y4|YyTH$tI&%#-&+4M{oV&Gh?U+gn za!X4VRVx~{itT;-S9n@dOoLJ%v4pjA23UFSBf>Le{biv;GOWk9et&a7Jd16*85iWS z5E&Z3Q|>HrCyK~?_r6M*JKFNuJHycP-qL_cddqGX^<3E<$wv>+{<4p>{?V5f zyZ%*^iL>-fIN6U4?JZD*YrmObm!IijJ)B#}r7KnA&stp}@!j|x`Xm+J>Fav@h@82P zIC45NZs)>7cW{}}58jnK{~j4o`o&~nF3+W=y89`6M$vZgKT*_NiN*k3EMx?<0<6Q_ zr+?vU?h9qTeZjmL82#Ov;C$Uchi-BM{sW+<$~`}%CT4zX68!_WuB0 z*MUBeO_8(M$J=Ah2DvBaF3ys*W&)+><>~@>*XtqzRY8O>oygyc(~Zrje(=JRf3$;o zK}BtYsPcD0)yjq9YC`P%WDLfK)F{@B^Zu?X^kC0G-OEHdM3vPr> zwQ|kIT&LSHf8A9mRnOn|u88Ia6;o==X_UF8^8Aw^Uc%-RRXRBlI&f{D(W=d+(_P^GO-{0p!RIhBgaQJ}kZ}c*BHGXp_hzU6Oy)F*Yvo_wt>CX5bi~TCapa%)z?EwQ zKuB1?hImE`?Zpq2sAM;va~d2**LRTiOXpR+4Bij>+Uj0$;jmAG6@2kK{cFAPD7-K`b8pD(NclLt-^ngHCL`@D z)plgPU}pBEWW{Q6v$xW`44MQ|7zJg#986%UKlE#Ir9tnOISwd;zgwEFFzn8$za}3D zoE0$DoB{4aBtKQ$a{hgl#)ZZ)*&J5o4_l7qR0Nga198t#j#c|uRh(%E;p9XvBPr$R zd;4@;9H;(U+08T1)2SiAe1KK>VFrYM&noJ%j?7Vu(t8?%n7QOixo5W%A*@p;>4F<= zkFF$A3QR4`VL{v<1yoqBVJ^8Z!|{S@G0A)}NFEV-0~j+Q@vOV`UZ0qGZ9?f)QJun8~`Eis>zIh0`-qPd6zV+vRt+QMd~&xpKB=T8dFTacP)d!yWaPYre`pw zqyoU8Foql$M>0v;r(vsXaQz2iZ}3EZ_Tbvc?mzSz7l#kTn@Jb(I6%1z6HIRw8g%IU z!CX4Q7GnPF?l$DJln$c6aG-3c?+6<>m%AbOj*y`4jqk?&=9nc`Ej#p$S^OJ3kZbxM z;B$~g-cASzImvj^_%$t1+Sw5l`6^WoqYaYz&y5pge7 zbIDpe((?Q$kX}cU>>98#P__S+@~`yWw}HVAzNksk^#IkLrb>AG4G1sn-%TcsAlFeB zB59by?_nNynlRf$jha4XL_JCJ+2iWgC?|(U2~xBJ$y#be&Z(NdExwEFGlEE&Rn0wYu zw+el4ONK*$%eTq=_(OIAtZcW*hoWEh^3R2FgI0^IRyX3++PU-0jX6_mXL42TSq+I5B0_n1gx^8{+w57@Mqi)(!K)vQhEY*7R5h{$(pM&O-fxl$Fg zeLz@PYtXFY)YwbLv!EUaJ%c4uVvyYK29Wclw}-?~R{vLWl);G0Ll6hAfewxa#xxh} z$Dik`80(CsWqs*BY_ySxO5*AIm853z!XNA3(opX_?tG+<;c1OBgz@p@cfYZBG4;Xw zR&0mB7abDjkjQpK{9S)P;JUPy`W;&GFbLZr%2*6XGn6XIRp$p=tKk!VQU*7bA)bap zfsl3xw*2&Yl*IH8*zWX@%i`hQ;R|q$_Fxem>+Xx+_o*V7kiIObZ`%I}t}~crR?lg& z5g&QOIZV(2`os*JCgY_Sq&oJ38` zxfa9`10K-8U>-q>c(%AGaYYWflPA>;9Ae5p`~s$3I%X7B;ji}a41@81WO!`SK%1x5MQ-nzf`a5j2+0i_nd{Lr<;Ooh57pAoZF z{-$T1K8K@tbI&F=!j_IceKMS@9$7$Ot0btPR z_b~UuXRoJ+w51j|o!<~Ugiwt?H9Qu~)9`t{*68(ppgi6-Ib2=X*nGo--Zfb^tl0hh zF4pm}U9qWM-&94 z3Z6#@Lc!#@0ul4ly&iFW6y|0qAPS72(sY+-z-< zT+CzGgbUYzuVf`^yDECBZ*xRs`Z@XR>MYIR5+f@;fP@{LjGK56X9ZZ{&VYTbDAgu3 z>HJ7MM`V@`Gq;SyDKlW-#J(HZG_JeS`1k0B_d*~tXsbyg&pY!SZV#ULt7ExIDuuc| z&~R1V3^l;i?{tZOB~pJJzyt;Q9Aag_)z#1zTZ*-ieN<1XlFmp$sNld_U@E+PK@u+N zXA6sw@2^CcqSj7CQ&L~BIxIaiLyLLj*wW97S}pOAN2s!V+I9OSVDkJ1;_@!u8l+=D zZvg!9G%~$i@jVfm??Eh$;!Ws^5mAmzcN~3$`a;oXy5ca?nCgZi&-g+=)>((rG z@0wvFqB>+5lPO7cCdzKMTzlf4R56bJX=fReki9 ziEQK_Rs`J9W@4qKoB$?}!P*qsXwUrH2m`>_4%YB-rqj9o2i%{r_a=1e^b)#zRaErX z_!*L{?@HU!)e*&^*WyrqXGhOz3Y{b|0##dFYNqcNSGz!2yCP*udXjI;$}fTM6b7@~ zN=$*OFU9_;nU6T^g}pe^c<(c?{D0l*`fOWZw9OR zBzKB#4c=a6f8|a3 zdQEu2g|2=4oRdbd&tIXbQKn=*E9QwRyKF-A$6D4y)s$rOakZNE=%WI&A{+UrSyXc+SYs0IQ?g&OjZiK92MlBrn zG5bGinr8wvjnZ>S)|$Gl|H%KESt)f_1zGVB*Oj)_u*B4l4Ca-J_E2dDS$VAn(=z7F zSgP{jsym$>`YL;$m<;|@=1S_1$u@W!flmM9T34F=PR^P^)-QNXOEkQXNfrRClgSX# zu8hQd_|aUIzfZgL`+X^2#(;OXu(rILM`J+z#k+XmaM1WtcQtz!y6IUA=ds!`5fs%g- zu5$>vN_vKA6BAsf2fz10Z+-U2<%OvzJBZtS`apEU{yo49vqD4j`uCSm-w(kXd3+Ty zyI7a+D~37pI9!)l{VL{ReRa%_g@#i%%tB8;M=hoRbtZGsc6fAFn{lUAcqtQS>X*n& zL+l>uM*?q^AW*03Z@sbVVQXgbgKA53VTw!XxYs@-KsiEeI*m_bZvBUnlQ*MhTRyO> z$rYiv`!Bt8tC(T{T*_{!*Vf(7lob5O_|^macJ>|Nne9)F6kt(uB35*obFH%Le)LtI zbk6h82ND}x;yhbUDtt(RQ=DeG zU~2d0JiiB`kKzxLQu{Zk$w4{NQA{LnD~|!5n?6*nvw)w_1tF?y7hDxRREFk10LFh3 z(c)9TTosxukvvN-aP;|y1}{^2e}+WoYh<&A#269drqBCdC*X*fCZeMVnhiscy);}~ zOX?6chN0F}7sDvs?s_F=vm0{w;tF`Wc%&_A2a9V>*{c0F*bVznYKnF@Q;p2#-u(wy z!WOsMFe_v6batZhLlOy|*2K0&vKRFq#V=cwN(&V&VXQ<+kbh}qp5-BSf(nrKO*okv z;0^#n3>|hpls{9g6AJkq@-RZVw`JnwT0BwiPI>ArjEM(P=}~!sS_gM{=<%Hu{(4`C zCvj)-u=FEdYadpSE0%w}qaSUs_B9cxtEX`>v7xcc(+>vgc}y=fXa~F52F1$LJ{Etp zhsQr9@J=2LqN1G!j2~W!|2SUsVBumdBmW6_kDzV)4?w{EdKd@waMz4MmL4E%ky^B} zDzs}R`9Tw$@pte&**&t_(^>oI$MMi7##!9}HenR{^khZmY*W*=uog0oUKt35m$K^` zETyO7o=uf*&n{{X`{fpJ$r-YAb*MGU`^=h{IJ=z8URA8)^7uU?v>2 zjEz~i`L8I_GbGr8~1X*e*yPLFBhFixZ^7amZ&c4tt|+gv^wwn zc6M3VuUaAhskjjq5bQ~qf)#rH4;4Y`zMRLna$Mno&Oq8pAcN0hPj2+~m5`mQo&mw+ z^2CsGqaQB=oF0dPnxst3cPJ~6pav`gZYKqT{G{M|90CqKJo?sD=|U2#Hs`HHB~mRV z9WTt9ecFE_sHr72I8EMMv9q=I*IkvgZNI~GicPAZayaUER%T#H3z9ek865C(yaKY5 z9EwzORGg8Ja>bZrfO5GQ`!QkZNb5E8QR1%HFNCc_Q zM&NQd1SkXyk9>B?-VRjj;pZl=2Co>j;U#!A&j$VZD=)QgHM8iZxm48eHEX(QX=Tc{ z(_Mbc*8Aw~`sAz?wvo?n55G<`)BN?Rih?;EdL9YmgY?hxG=MG&eN1-C`wXtOO`98uDO!kc2CzsWhWNlJ71emb=mwn zzKh8((2Y@1eqsi40V=r8J&zxceuor{>IU48PTA@8$ozdecc&e!c7)G8Ht;#)?&N@Y z=N+-fT4KkNRH!U*lYz#1N(?)*k-NHy zPfkf;k&}_>&~+8dUW%&?8&cnwMSUa6pI-j}jgFczrll3lE>`rG(M?*{>!)jZXzb52 zz0+*`MWFbHLBG`XEgmgnPP@|EP|_txbvrb>iQ;MPWid2v~2AvGMjqZ-1bRrb+2tVMl*7CFzT-|R&`{q ztdeas($d>Ln%{HQAiuYkQ3_#Pjxs<49X{zeB$7@*1oR}3E2o9C3dVDiLk@Yy4;=B1 zN$uaRYs7S&AIE=gg5vW30K?l+hnXFfuBHnhV%P@ZV2xPg<-|l5*|-G~J$oEe$kC*7 zz8yk_z-$Hz20|VNPI)~+&H>1-tiFv}a7wqdrz<5dX5TzlyKNjJquJZDvLd|P+U9ih zS~_)GR&8(YuKgFL*DP!sy6s*#9Q5R5^ON{eVS!`8&vTr97;aC@I4j3qDd-kPz+k*_ zyCcx!fzLzSez>L=JmVPzEMtE1CF`RTyu_ogk&2{xi;_}Kp4O{^imj(Th{{}FgfCRXI2rL=1nEd z2QFlmpJzv_R==i)T{tE0E?#u)%dBjzYhO#W{JL3rq7;ylSLOtfl1SU0I42wv$i_L~ zb*pLt2jv5%a50gSj-H3#srRP(5;Kw9l5^Z}M?=T*rw{f=C$31q{CoB1(zCre%I-3S zM4FVLqfu7U?#b<~?RDL=Xv*5nwJn;zy7hLmc1x|bS_t@P+~lbWLBRl$LB~Ik8SZ}y z%+#ln7%kd2WhAH|F&W$!8OAa)53%O89SKG(e&{E*I2rWkkhXWDIc0hO~4^N zz+grhiR9;k0LaD<73bpV#?$0&#l>DK(S0ti-StZQyI#p(ZC1Ut>-}^(-8Rxo%T$wg z4gu;&KSF!`NI2&wrCe(#Pvg!G2m?Jjf5!bj{*?xw zGb;SY4bC%>gN$$vJLj(*dm55GE_NgLg-00AQVxBvFfqnB_vW>tQ?W+w%0_tOXd7|Y z++zeDx#u=U71W%aqyRE8_>A-TkLg;rM_#U zx8ZX-9Wj>a5?3sTmgfYFkUha}{eV4s*HqTg+p3@gg#>_oMmZTgoDtOI0ggBn*K1E0 zA9;rZobW-w<0t4nb5`!wb25TSUAQLz;D5lECy+tou5;^B+!S`gJ7dk)8*+;PKLy&6Hm!MP>wm0{})x9CQQm90Avm z`ByVa6#&Lg4t{Jla=7k#aqpZGaF$7n`1fQIj5s*PR5;HW&QuNv#!X#F^Fj-f4pVnZ z?nx~>yIo&v-{ex(?)s%0Ep^o``gh-c$4`$=J^TK-{b`^s;lMZ_Ui@>^5J~!GtZc&P zInH58c-VxHOJzYhpUy@L%j!mLZv=%iO=aN56 zlh>!|Sbi<=rLT@WAL9=MUqKzsuZ;XHtoU+BCW_8@WSaWbn@{s0)Df*5E~O2VDVu1G z)nwe;HZ~P$LaI;Mdqk%hI6G^y-pMO-C$5%G{(nKk*{AkCd6i~#xm56Y`W0U@<$Q{8 zjGr}@iZ6YWNAbh1U&t-q%{X9uwk#Mhbp;Bra2*P`A(fF_5LMMkBCrpKb8ZaI6c9-} zl#(#b(>qG{0373{I@h#(PWW#(h5S>l_*VAH_e|D2Ie9*f;vFMUwQCFAKT)-IgrwGN z6`+ddB@@~@DYu%?$0fcXDH`w|A=7WohnPW)S`FZ#b;*)4>dFBs6Kkm(Nx(T7#%l=R z6-twZR~j*>+@((LJdk{|jgwlp)x}?Qx6}Lki8!u#wpEtmaPwci!%~!Gc-_iSgtSfG zUGA@KA4Yg~v8-EYcQ+btjU>i+@+1@d%F{G#p+v2b@V+E+Q^)=+N5R!p|<(m6go)vO1M^VU=>s+pFViyLN{9d1O|UhGt;dW?~o; zd5a*HQ@FNzmh0E+KMlap_{GIvcRmDgJ>Pv`C~pZD{?;WLFfkq zo<|&G9Zhj!sLD<;li#Np#k6(uOg-sFpRwc?tx z=$-7E(O+}ZEcMwENW;tvsUIs4agKz5c>whpT=XZn^$Tn0^;>jlWDEj9tIeJ zgWa+SJw|KgGpJ8#AT1)fEP2U0bKBRqIUP9qcitcIbK1p?;zVP~IXK)EN`t%Q#yt*6 z&nFeG9-4xsMvS8>l;oRkaca)o(QuP;lD^F}*4G_%2vVmg+nyHpno7$?l6JN4ZCdj^ zS5f;-f=_d3Wq8TTqR3RJ=))=#fCI4@VsnB{0Ip8k#)D3kX1bc;;@ZLCGIK4y?a34;05=$gk3PQxFWZI>GWoB@x<>27sBtm3m`g_5@4|G2c`7%dq5V(#{G+g6+ z?IcD$jAhywv_K84a=?;u2tb|KN_BBAeB98WrOP?Wodqc9y~N+VwcV(@-+S-9Pwd<~ zi=jzlDl0fTjxPTI4ZY|}GSjM*8~c&wz8~3 zVMya>`A%>E18Bv02Z-$=jtSVe?;RvNSd4tiz^@qHyDWfb2RlyacEAUb=x zK_U{;x+})O?&3p{hdg8|=a5O@9E?A~aNcU!y54s|wnSnwa?F87GNS}LzYI8111@(D zHu3)Hyl19p@RAX=1zB)T5KD865*q*ri+O;us;SJFk1Bn3%f#AmhTWhE40fgV*^2`DNiaOz%$0dKp(pVx`fyZ1;TEPfkqq*``LCn z5up8)WRi{2jWtauS~^L#lyp|ROHEgw%I8>0^{K&9op^gWb2(F7+WVdZ@e+$M`<-ZFH@ci>xTve;Z>k8N04KNXe5S3goaqv)p{@rXaD&K9y@_b2r)+;#;JGX_*Z7TM}!JBshlJ zIoZvK{XBbJxn^nVgFEN#1fLhUNqAOYN$IVU7#a1Jm>Q(hKRIM-IEIm@1I_*q6O zq*Qrs`C{B!>Y8iKY<&e>H7sl;ii&jWREqYHjI9+kqMVm6Dr#1`<+4rcnzg--NCU!7 z22M%gILRCk2_R#>Imb@8>Nq5o7$UD)>W?f(a~H_x z7$jiyz-3YO9mhM4YnRvcSdng4M+h1j0cQEI8aWe_g4rbeyb-vLKqZHvj+F3{l%-Be zl&Pl}%F~)}m!e9^YrB0`yPq+R=ff#etth1xqq^TEO-~5%E$cz3PYN&= z3`nf_I6uX~Y>naZ z@{?a7UbJh~f~P{Ac{yK}E~P(p^7A?KE>CF{bge5j6{9{U6YI*pWbsoxxo^YboVWzEa)ILl3%ce%P zg*3>IGCqHD;Ykc|NWy{%3y?_Qo=2eQf29xFHq76C&R?_*sX*R$!i{|3D_|I|^t+Ax z5W{GTa3cUOnNLzqq+mTHe}x_tf;uITG7c@Z&m(5$4k5 zd>%wsDG+G4VT!)lwlFBb6O13czNHjJb?a4C3Za#}t-tFSA^CII@57zFa8zM=9x)qj z9v!*PcT1v`&(0LQ_Fygu3^p99wPxC-E+Q(UInS89bCZGz2OQ%( zgM(doEDdSURAp9BoNK65lalr`gR`<%a|wg9-Vo(QJmthD8f$4_t9wnOIrf zS)2?KaH{94H$QjrJxM+M+L%eu#8jLk2TD?u93GV|8AINLWWCyLtvOw!uGe$sv2_)C zlw_0UjRfN7Efa1x(c0^;s@~^C9LV4WV7LvCdUhv{RCO6Ek)E|DoXeGC(>#u+JdUHc zwtDm!0QyH?WLU0C9pZpuwvHO;3s{HZwUwX|K6w_;y6a;DWCxh07m zcpv~rUWePQMLoxsrH*<4r)cZbuP2UpIpdBqP`$x{fK}TV9Z+Gq^NqxVv=Pn@2Ll=6 zwBNkjDCJ)O4l)2F;I0^QGlP;(amFi|`!9MfqfJKgbEhd@>1dN~S9jXW?i-9L$xXD= zidL1|dnfK%>wDd|_$$bg?nYw3uRInRZgYU3kViZcK_`L_1QN>rS=y(d`ImD1yNFZC z2a(6!=QthlSqx0#Hr_A+{oc?syFQ!%Fb^5X`~>N_m7^s<;0{WXIUpzjvyw*QbJXBu zno6}iGlU@pC{>G!oTa5HJA2K$yS|z>lXuwF#!_C=oRySS+@Du(gCJQX+=2lLr2CG&I*>d44_|Xe41ahMGC2b$ zE$fmxVx@{rq~$>UN9suj(~dgS;BMrMa!%vv(}UCbeiizY_tK2iT+_UCX=szy%IV$P zb2Tn!DMnn)+R5o_a@)Ud_FW#OLZEj5@_OTg&rn83ZolD1&IuU|02{H4f^s?IKI8-M z)}CU{tsC!XB#jFIo^Mk%bVf%6_XA2vblo}Po#jMPWkC9D#v-Ie7hb)~NtYhTM< zHziJYcaJQ)yL;abw$XZRx0wM(IowWpC+`lYJ;2W$IP7YL)N3MbIE)Z(0az4r1a)-<&LRo z=B>KlwVsYDPnI!FO78cSld^W(Yj)T2ZLMuBBn^-VP`%CxBLH!M>U;7!`WdA`z1V|{ zu>g)h8TA>-0B4GuNIR631McO4aurR#w_CTU)-j*F@0i#XGerO4fG1mwc}FP3``6MsbHco<@0Gg#crz#xv{B zPI17wCnqD7BRTZ+}GC-|@l11B7Gz|QQD z0pws~ITgzaFPc(GB%E%b-z=L+b2R9AQA^r>B%_8-rYebp%^sefk@oL z9S8@7#z^Om_|G1ogU=gEo4UG5FDS(;+jz-or>CoE)-p-isU+I7UER0YtG@j<>NOWD z@|9ph5~QvJ5;K(wdjbLFhddtBk`$tq2R#NKJafUp`==PrHm@TC0+#+!TUI z<2?HGJa)%E`K*;0MxsqAB%EbFSt~sfeActO(Os`Dpy4|tqNQfkrKdA#-s^U*y8i%8 zvLD>G03<7Q8R{FS<+3PV`usY_6qo8`%rv4UyE(~OzGY{ zhUZfr5pM`wc;m$IYx6|#&3H8kwEK0tlv}ODFv}&>*3jTGM6*h=&2X)aOg(En)Rd{gG?lrzn^snLlC`wmx22bnr{-UQaeE5^ zmI^rFnHL!X%7{p=pijt^}5X0-Jt z1@_~ep@9H&JBB|o)B57B(f4I2yXkhhntu+FX(Y9~YfZr6E{^wNp|QEopcui)1aro4 zayida#aPou`_aa7gA73!$WhKqae{dy=aLRO#^`+4$YM8R8;?BToPE*`Ksn=%`K#J+ zRlZ%Ie5#$X$Zfdb;NWyTe<~`$#lqStO6u+}mMup{l4(AxXY)lSos(8c>elY|zvcM# zvA-IU%(w-%HU|gh2Oi@&_3h6?PDR^r!yMp%6mSR1bHLz%!6UAF&e(H=9$iUBhkI;6=>zcb7j-+=v$FCom>&d8+Jh1Dx zo^VGzp8o*nKb2e908*oOKzIa?T>k)3*YmE4sW;0Mt!?6ydnd~5ucnsQOIXw1gK}3_ zvPs_dc56oMYk$KMgi+f#&-uqnR#Cw?V0hb+z{jEfgnpF$xyLa87*>^#-g6 zu*!@8EDo}ETdAbaAng)in)g1@}e1*>l0`BgFBv73UmFke)A38dv3wc$ z%i`aKdcKu^r)W3#3$1utSJm#V^*A+M1H@oYVR>nNZv@M89kF?==ZZv2n>C8s7l=k8 zMNz4OrwHL|slu|U&q8gfQBZuX#-wLfEk$bAvxT&~-QV`#4fvwGa{1)gnyRdDla3|Ii4|-r>)fXu}*xl_QQ1?2dtkA76_WE<2FRSxZ~$S=6Ekz(^B0ILn< zQ1XKW{K}&vBMsE$NGEPUD?QvlxP>su5~E2Zg##=}k1|r23pUg~X2k={W3>k7QB9I* z;fbeLSAl>IuPZg%hC2fyFcGwB#adSLk>a=W7^6A}+pe`Ii1rgorCG`mQ>V@C6fUBY z=ekRsJ!17&(&x+JadD+;wBV)9YH3DZ_eFVo+CL}nYqq*`H}NcpN+>%U4nYL&UH~{b z1Otp59!FtVK7GZcsJw-d@L5>yUUE4dK)~dl_#LZprz$73Mf0U7CY6v9QYi)&R@y#D zQqsV`+&MwAqkxu+R1d-T`r-8TN*#E_^+iXtcjZrp&N1B{Kp00|sqXCMmcEbfn*hs@jy4tHdB2L*CY zNI4iiPo;e8;hz!9sw&TJaz^3v07*E< zaz_L1lkLGESD{Z7@fB&%aOH)$qZ*NLlvldcw@P=l)!OW|+tR?!lqJhFqLaPSlWP3C zySDGH?@r5O({3h-EsL`QlsM{hl|Ax0B`d~*RQSe2`F*$;B93bdN2eWXQyttu530cq*juqxj%I$Qc{w4 zx4S9IGL-DEW~{w7J_8Y&VkkMMTiQxb-f2r;g}Au8ML4Ccn_X6p$I^+eUd+RF?Aes4 zNX^4C%FIM+zFz&Uy_?I}RODq32Lk4hy7s-S>H2Nu)T<4~p>1yjc5KgsHNvcKBxXSr zrB#V-X}*~>z) z)phsO^(%FhXm=WUk>>$ShUhcIZB?*jc%-&pGs`i{5O8C0xrp+!SXP3EvaihQwfm}* zP+GX_O-bsK)n9&!+C4Bi#Ol_E)@n|r4oA%udAQATsK(sxDZh(b?WNt3`Xt{7v^_iF z)t#1xZ!RXWw9_RwlU!Pv?`OA(uJ0Rbq)MWBkVoZ!U4uAzMZ}P(lHr5rRphb<{bSxS23>)ahE_%lrC4JJIXUN%gUxf| zT~!H2ylVS6B;@9=QgU{>e9}>AXx{y7_dailr5UKW^HiIX<#Cf*X{wgDc30JD*G>Ln zgHyh_zOkMH&jq>8dDgS`MdywF5UvSi1*kjI<@UdiFL)eFG}pe?4gyJtkafuutl zLx*T55qgqgnxtmq7{M)4_uOA*xA-Yml{d)>aD5T{tLm?>UW;R}@x6|dW8ohr zIK1I|;r{>7nU-z4~Ga*>Q$-B&66XB}_@U4}+ailO>O?7W= z1Sp9eyg|gp5fxQJw9`!Z0YG_jMfd~buN5J+(`4>YaScWb(@JWfn}dm(d_T1kx~?n z6b~fK;bI0brBxY_25&O0YSp-zRihP4FQJ80rHPeE!AfmGs%@q0-K^Vu&zYppfz0XD zo^49Ij1se_I*O%7Hqvrj@o7TVSGts)((*kuFh+5=*zueKNFL+crzaIFI8XrG2?H1e zp2v;=;PH%~UJpoyI4h2vpS_&*=K~|RIrgM1!H3ES$3H0m{#(D7;a>Z%3ck*ojXJ!q z_^Qc8`@7jV$*orR>$azDlI2cLZK$Y9IwbwyJ0!2Y@7d^?!u(selTi3M@rKBuZ?t?d z@q!qcPFYt>d4N>}7Ira=?qH;6sQFLx0c|3UGR()j_nLj1DGFEb+s0J#036_)V3CaT zU(~;i!^_}rh!){X8?OO)(malKgz(;_I~F6T#3W?n1dQ$ggZ#7F#<6Ku3%eUM>m*L(oUV_g1Og9>19Fk5Wx`~Kv5*U;~00iZ-0U57$o-BBR zIONQ3w=pr>obx}*a4>l&NXK4)oL7WK1e#TtCn>pE3C_@}(lQ>X!vHVcDn>G;a1DE& ztt8XxT2;YNlH9hLaQjpNiLN1-sNJ_C<|7OVZdWYdG3}#U6-hzWmpmnjX{fYR=8x(QUQWo8sJ+H_K7M$}TBcwB@I6`mK6((VoMg%9?k@FAPklGtGMyx}&<6 zEhed_#VG;GZ)HsGJhHNbhGMn*n%EbYDikuuaVjy+;!3ebB(_kn~l?ooKjM8TR64z-1!>SqOtVdM(WK_QB>rg zxn~{pv{SZ<`m1TGH0=tB3WYmxPT&FC+wUBl6W9gETpmqYF&r}z!x-t7$mllZSakmA z2cgYpM(@O`u1esNRNxF`;HWqp5y{CT9S$o=Wc}J_xEKYnN!#15Nj->e=ZxfeQmGdx zB>AJSEcJ||HEkX4`F7POrSGoYGWX(~oK;OZR3m$zth9An<8qRSUZWf!ya zo}<_rhtDttR8~13#JJ#=An}qjfHfpc9-tq7~L!TH+H3aUhl4+TSh5P66EG`dO|MhGPG}a z?IfRl@Ao!FX%$SsDBXa;PCc+oFU!=DyMf0z0A>Q|6QMci3BvC0zy~2m$}`C%9xzDS zEyx4Kuw-z!$RilzlZ=DMZlq_mQ&^dpBb6>a3h}hCaucvX-i1&Yp+dp zCS8FkQ2>oCO#?i98(S06K%$=xIqQG@6y}+LV3Py>Hh`ZQEj&*SnW8T5O?b z6#eVnxBA<;S7A)JB%Vgnc$s-$fcQbcAkTv@7wPa>(m~9E()=e_w7@WHKVd=-7BW=7O&RV*?Bo> zT}8CxB$AD+yLhGquDt^F3L{UyK7se zyK3*V?`=1-boo=9BDq^i%Y9bKeckokpc5&)01o0gz&!2hbB_L>Q|TkyhWn&vKQYNu zo(~&A#yG&o0QDmRq?JzG0R8i~o`8@D>C0oc9P$k+sa7moIRIefX8>TFp1pJR?VMII z!%F(aa`P{D-HcW2n^wEuwy(@|_Ps}UcdT@_so8p~X?J_<`c?9o7=i&f&#C0?$mcwh z!2HHDNI_n3*e4%%03Q7PbJXY9as}~&(+Afdjz6#KNXV~^!#L;D+aA979)qx|l_*k9 zQIc-X$-i|cYrbtXw?%V!Hzc`qNvm@!>F(cKwR>-O=b=_G(UX|T$v~vwV3i}O=O=-{ z)7AAI!Btx1mj{%0TI zP6$Z^jyd{|&(@s$06-W3`tj2pIrZnCr!|ki7|K;&y?=EmyDgg4-m7CXUfu6x-=5kcEsWg>7y~1WegqNu5HZhMQXNPf5Pilo$0QNQr+&YMOpBJ`PeG5o zI^%-ip}Qu!mFm>lGE0|Xy)`Qx5@)XCl|RNIqw=4t!A7K!V3v$o!rI$=^vX&jSOVe;i}CPMP}D(MOdzYyb{=WM`=9o}=mqKGiD12UE!I z2?w9${e7xfO0MCI1L{k4IsX7XX-0&n7ir4RSt!Z3b!%QSZ7sSjR>qw0X|(3rwe6&J zNojk%9j>?4>C~$+AxKpN9m3Nn+nVJs;gu$YOxw;uOeh2?2j$>%_j9=JCnV=NHRR)zsU-?(5^l}O#q)Gc zG}kS+-KBKww9}%6;~g%qt9qu>QL|S|bgcHlDPn}ZJ9QL&AuoM$=5 zr>$sB8Y3nZ2PgPOJ08Fj@7JdTJkn`Osb>%w8%N8N$-*y8cfjriS)3_&*_-ABg&$J5 zIL~iW$F^}?b)m~mI!aQsVA_*wnY(C``zy--0FkLC#No*7GID(}oDazV04lo#age6~ z@(9mf-r4FsdHPjw0qc$iazQ7z1CRmwf_{~1+_`;(0s$PHXOWT6j{R~D2jDBFn{%h` z)R(k`d8H0%S_wsLt!HgNEq4P>%@wbq4W1OAy_|5~=aa`7@6Srr)CO3tKp}I2NZ<$d z&*lK_nxkTb-$DTcf(h;p$HycNt?%ESGgmHSopl2858SL|gO(tiHqcc2p1)6XQwYh? ziuTg0H$7d`af@+QNpQI1DHHq-gwdLLe$d8<>LRXL|5`Lt4k zc3N4*#oevCX?5P?y_)OF?wi`$Y4~c5MNEK03=T2t*Bv+~Ip7~}a_z5xM*tpp>yK}5 zuX<#B6;MeBl6q&L3EQ|C(VH&$({t2Mrw z`rmC$BPS-k=_jW8ue06gm7h-Dg_uEYtAa`75Hfpbsp5xBJ|_GvhD}b&>qVOKORaapX>cdiG<(f5)>M-0>Flyz z+uYi;H#YH^Tg?JBX8ka@jEiYORT#iqATEne47$ElX)n5?4_hsR-O zDs}Mnl;da0ryp@joYkcnMoUXd5{>n7cGvlB@V(!QG=B`fxu|Q)e+&_{S~PC0aK;#- zc{k2vNQ8y1OqRxm}=2#nw+4To0NH@2P$!cQPHVEOQw#Z%He{5roo}n^BGAx-OjVjelgCC5o|sJl;H^LNcK^ zkw|tz5U`v+^CF$nmD?wtNM7a}3*B15jhwMtq;b6SJ=Mf;MIE{VWOkb2zym)bC?(`9 zBa9YhTJhfv-oCHqBut1c5;@v6P(sHvp;Yhy0i|NOBxQ~UOLN>^+}uxVWovP18MTr- zq3r3>>KwNiT4W3O+TShUptcqou`$g9_cGl&gEM=4_9(ps)Ii(GEOVWb&YFGYiU-}&|A%HUe+s_6g|ScZ?akBkrj=hyfDbC1a}ygGOOlFW0)^# z@Y`L~^*JSiO9|ymI~xJMAhB6MJmYBuv#~shFjH4u% zEonm!8?=(BxuqzzWc}V`_UWPKO0`y{N)U}YaE;|sbt0wi@~&miIYUL!w{+66>-ye- z4dhQ1h4e;Z!AiJ^rJ58hWHJPm-WDW8r{!>^n0c8+-NZrR!33boZ8gM>=#i{YNjxZb za$969Kyd5x%tTlpHUT2Nmf9^hPL|v4@RM~cWrpRA=ZQ%q#>`a+3WY!p${85|1O+F4 z;_553g4;>crL+gm7k5%jM0Y0&u%L+}WGfI8Z*t~9Wn~I?GRM7IG$T?~Dd5&2VWy=Q zIrBkXMY%e6jpKIQTzS*ePWZ~a`C{!k(p<7~btys5;e7ndc)RInw*48yX%?26e0hop zp=mH%$&F%(NC2>nBH02VY?4wiulJ64Bf8QrqfpJiIKWpgl^$UE0Akq71#P3LT#yI{ z7)xU)g67shh|#2g;j(sNhE$L;an3-%IQg+ydPa$5rpPAQ&{d1Ahk*ziXHOj{8gO%(! zW*oBi10;j^ky#og$ha>O?sF0XJh08q(4mgj-~uvE+zvjc$UY~0Q?k-5FSKOUlEYJ5 zn?0nDF=ezY?{Vd@4ZN84h1jndb#ho-LmD0M@w$_zQc+NnjGUcWI7KM@5QXg9jh8fD zjcWFO-F(iD8wk}lDP?)&Wc_U}TQPStEh{hQpvtg~A|GRo1j zE5FR}ZiQcK0#)P;cXFY@QTf&5y(YuWxVw@i0J}<6vGXbY+K`~B1Rxm-Mh;nluXfgN zbr`HAx{Te)9o+u_X1lt0eR$>?ElJYe*0l`A#x$E3XrTp@^d@b>}LT5_9IMPEov4jjcGSH{_k)le0cY z8^jfTg-BCQtDBF#G~AS5FHd#oo~`pO9>>p~CAP4YQdqi7}+^F-Mj(FCm%N$ z9Wn)TQfaQX@=YKFtYn59fE7zHKQ2k`GDp4#PelH}yLlN_Hfc$6sEEa!WSqwmE6B~s z`6CQZKo1~#DEKSm3;zHN%{8u-D_qZH^V>x#yiIbUM1(__esUtq61$boS1L*u@MBJ$ z7%0KV4G6Z{oY%gSjpFIeDau=0bho~){{S%x80^EJ+0vAz=A^8ol(midRkia;{{S`K z%<@l$P^#+C+?eE*Lz6P12T>eglFYdrW921@U!Q5fZOiwc!ku9)b!%jT+UD70FL!p( z!EQ~!xALP9ozjBN=_E!a1xpA*$U?5u;{N~veg$}n_DyQ*U4&^dX{IRQGs!3y4RsuI zHUnFKs~aT&?x1#?h}%s^wCrC9fvQDBU@8#kp^G)~@&Lef+nvEGBzq5`_eU7-6{Q1QW>^ z$2|4~8dcw@Ld?h`1Tfv4a(Fmz>4ERTuO`uaP;YEfYb`qVMP-l7)2-r?YslmZf>>g= zji&SbmMxDhh-+jwCH96R)GhA(t2m0ATU}C&ta_%3ju# zu@P>{q@nGUXQ;>v>A0R)fkl>3 zimrHu(?5@|qpS7pNd9e?WOQ`46ykT{4W`XB{ zt;CYrrllM{Tge1v#A|UN`E&LzjiAGzXu2+g6KqWnL(sHfNRBoSrRdf=e4Cawk}%$N zy8^1@Aq6(ZenkBK_^)%Nd_n!XQ^HoZFvq7^d{xu*-E0kA^_0}(@E(C>tKVv}{j%2H z-pOXwVu71aWszX=qE(dT8ZXg#CG(l*bY_-m8fL=$s?$F9A9WN0NCyg_ju#8U_}JKC zs^V$;DmCfFN?g#XQjFBD?4+F;I#x}v?K2BEx;rkA8t?7A8hbFDc>k#g*oZR8RO~)`OR022v&8*?wBo+j>_LQ-Ua|5 z@CHUt2Eb5oc8udE1CftHI3HYr^{vy4X~{e#T2Z5B=SHL{Cf6#wRo6RjRpY^n1D_P^>1+iy3@7zcqV@ec)r~S znYZS-@V2vVq#WT+=>`ucI3ZZ=2l-EqJ+{dco$U);K)?)s^4@u^Vz1ZL6t8rr*WPoOQBn^)k*yLaMjD&+fkJ)ZO%Yt6%W) zIt7zHA4zgimKnBVpPD~C6yUZw#_V=Hd)K<&&-RJ*aL46}8w4ysAq~1f@!?6x{o-N5 zusPlL2;4jzlHMK|&k@~>{J9GoT&Fv74g!Im`57mYeX{)?7O^uZ1(6>h#&QZPjidve z-B)MJfxD5#dXy^4l_}E0N~{#+1zDZOgL^RB8{ySS z+yjD(mLI=bV;LZ`oNz95?JDOTg4}7glLDhGa6{yfNaU4fQW>z$CmK~Ssnm?^MsBQ? zNyj7;~36zQl_OSTb-vHB_`6oitAhHee7;UKKw=&H0y%N zfOFSy$3HJZcAAq*00o2ZDI(*V7zi;ADZu zF_YS?nTS+T#@uem1hE8k$R3#|k_qfaPlFiNZB~s+T#imL_OfcDl4)~A-pO^hE&l)` z2uiI;si>&QTbavCJrU%IOLI@4?O)k z3UvOo;aW=(My%XkHXZRtCjC=WY=5lbaGb2YX?i&wHV80qc3%<>u-|3PUBV; zWGJyC+q;ZcW=& zC|)m@UR(1nol((FomD8p5rUKwQ`vQnsjqdbJzn~2p$FPlDE27JVD8*~$sFX42R*uh zl6oX;psPluS%3r=0CLNm9OXwD=vd_K2k%q7q?EV{0|C4Yt^qB`zy(G?7#Z#lbBGW} z$t=Z?9iKAumc~W~TdsP7Fn6jlV5V+HenCaoeUk;~78wbL?rsIOm*o$4*Zk{XY!*Rh^)GfF5~0 zusY|{^!;huPB2NqB!W7gnCBj#eLH$r?~Or9?o+Z_CmSp2{p;!Sf0^d1)pwM&i&Bi> zt;>68?Y-^Y^z~GjeBWQGKdm&bK2R5q*x&>Aus_`=w^Pp*Z_B~R$<7B(4<|Xt>VNw5 zn@8TqIP@fR>UkN@2acT$Q=;so-j3SIStq03`nzhD_OVo_7|N^WYTUPe^SiuvyGqx6 zET2WGLE;U<;YK-700e+`o(HMH^&df5cNX925daD0NF$8$GmfJjk3BLvaa(Qcj2_^0 z9-YY<_v(68NipZgkO{{e;Ea+oKA8u%O2$;>4|U%9JGQT?O*M73wOVygON$dVQZ%Q| z;Uwc}K{a(~+AS;Vdneg>Ui}804lQOW#s<G2$xN2dO0Ew?<+Z7eVeY#aOUTmjNdY|mosiFZ#3-pNngm#$;kjS5$oSMJfF|<;+?m72LNO1 zkJNRm5(W*^oOS?{>D2S>>q@D(i~=x8>fet9k(_=#b6H9flH^meNhbMIy3@ONd)?{j z)U`DRz1dDp?4;UFa<_QCnsldL?YfyvKKfDE5NIsUml2&WWoanGO~Pta7x23F4g zxX%NQzJvTmPtv6kjtC^<8Ty{zpZVsq_GviYo{vYgx8=K*x@+4{J&w3dKXrSpB%Yc( z^4m|(qJ3^sG34?)`W*f^&+2M*#xsII9^iKGjCB71BQe$zkStj6qD;d(+zj{qRWZG&vE{)sp^7ZPaHOQkVtKV5g+kRH|*Yn$AvZAQ@LEJvLJ^4My z9r}JeGYrF!+4MU|>IlIkk?)VkwOm-vSoIv>ZT7|q$j&`E{b|8ejBrRCg1OIe$5D)A zt;=wO4QZU#mO$v%Ue z0q>p<0Q=Oc0K<%qxbAb)oSb(ws}4gBLHEJvGt?hWwLCF#wDZnJIOnT&(ss7?SJQnx3h?R|QJ_>u3cO=>2*Afp;BkOK&N0)BRC8(<@?fJfy5|Ly zfTRwprH|B}K_j6ZHKT`6P<~5ckWcRftCDe_xZii5x(4pJB;zt0TlXX7)7K1t=kGf~ z1B|1M=Q$kg<2gLm4-HbBc_RqMFP7>Pl}Nid+56CwO>}ABU%ypS4%VDq_OndqKeTnB zmh;OfVllOG*Z6iuFfu{NKBJ{xi&fT!)@y;r(s2?N0~jEvgbsS-o;|y4J&oo-D{kYd zDqQ<_MC*>7yB=!9c6VHSkbp2ScK~tQDhME+xxm06;-4(kecElk6G}0@ysG!)_nnrP z!(Y#+wSrcMDY#$I+(cBU1qi-MG>S46Qq(dvO2VB7~2@!HUvAB zu&jU({+TV@H%!t+mW`tX0z$X08#|91fZ&mwfXqH)=TXITxE+@2z?=ZA6z3m%j<_9< zayiXu+Dm?p0oWI4Q-H((PA~z<00I0x{$uQW8D5j+vu!r%>e9S!r3gkUH|p)J?v|_^ zV7>dO$tM){-rpwl>#mkBAN7nIBd=lxbAo!4!Os}$!J@?(FMduG@~0g285uot0qc#u zMru=nGT+y^{OVmeD_$Jrs8%3lPdk9eB=pJpXQ-{QH-d_BjGU@Q%F>k5vc8=(R(JDE zds^=A&rAORTOAZ3jHIzR9S$+9Q%X_U7y{_NqchQusb$i>> z`rY69+S(}`1ucwVo(Uef>HZygrP(&gGsbg_k_b6HfFS2N{(Wg;lOK2yj-QWgjOQPx ztym)|lboHyZ3zYMXqkUG06=jV(pj`s%-~r1m(D z9!^MNcv4BnJpd&2>67h)n$QinoGBQ<&IsxUr#J*2Jt~!u3m(&qla(De4gmu#xZ?+~ z8PA~=e2_7f%M9(o0~~YIbv~WzSy4^%PMq9br_Id;D7R;OStQn}rtQ^q8g|t^f8m~= zKg;im>4%WZo`Wabk<{^?#Bs-ST>AwekA4s5+qv&smphD70y!XZe;-eLe!a#HM$xmJ zkk}1h3|Zw0_#w?y1A3W`kkHDqIT)_ ziGE>%S#~|w+0?2!nSY~gYEp$T0bJk)>&l+$aoqR)Y8wm1u)4O?rEv@vGedK8Bh0Wz z90=gc=1k$Ejy8x$BxVH!USXA9FygwoaPWj>C{bTmsT#6{SUE}bwzibISbARGZQ^pqF2fEg@eH%-))3iMf$6oOdh%|fX^xabSeL*!?v^$F! zZFMQ(k_)*DNji&t@kXC8M=_1*7PoA1f{_~lrbt+Gmz8K1IK%+zN%I6{?W z8g!^tr8vc==teQ7{hz>`;-uZ1ZQSmB%?j=-#=4%{?K64KCBB@+3*JhuVwAQW=klbG zyQq$Jj54p1%e4;gh8Iil&5oU>YyKy(zB-lum#N%Ep>xSnchEm+X*)jT(O@k5;sX(8q4**D-KkD*3|TWEKZG;~CFfHgZP(iXpevBiG@S z$#`r^TeZQ3-YDb&CJd%M%0$a4hit{6E+k1j#SD&wp@)Q^=~AyMmXs+?#uS{FDo&kO z2}xS*^C?@&H>;b3;~7CY+umNvF>9G}t4^xFvyYP6_Hos->6rC>LsVO9xHS742(8}i zM+}R3GszRmAPm zn;DzdtvpL-rf61r#-rqEk=)A_yb->^aj4%sYnY-%MV9tejyV{etd`Iw4AHcxCGBu^@@vXGigb|TS>I}+On1OSk_UhsHsK{rPOICDNZX$=^0ASt*v=` zsl6`Jxwm7Yc$(_s8Sb?WLMu>Uc8=EK-qt&y&Kq^>+p2w|YPLjdc7ZE=&mI+QYBn(G zhG8toGZV@_Xw^x~gn$Mz<&b~@ImkH6WOd#b@fE(cVI!mlD|lP%`?l;d6^15k0V}s6 zOak6mI6D-eS8pu%l#}XK_mEk$#v!E56-zIjQ_b zEyGD_nq1nP5Qlq+BAR#@#;?_7g&Yu_Fxz7!g_Plg4>R$$zu{eBVq04irE2imL1@<3 zR}FNWK!Fs zc~Qnt?&<&o#&y_i?V5c?_I2}!nq&hRDw%VHM9UF^stS{w0!B$aSWJ4s)2U9CIy0)I zl$}gWs&c|yskpg6Wfap%xkl-~iszS7mO8VII(28sDx9#FJw-j$*D7*v-O*mwwVtL^ zTk2Yj4Gs3Ca+VOuEx=PNuroLSx}=S>%w-C@n6Xp=6otSrPo;Q##RE?9hK)d&Jjk*# zg+(MZgb*^UP=KV8PC~8+ocN>S4ZfEWz|t~HA@gk&Tp*e)j0=Ed227Hnc5W^MI;$AM zuZC*AH}N*7GRY0DyKxQ*#FqM2rz+q+OCgHQ!)`2_%C_IQ01Cr#@%9`C(QrXDpRC+eeXH+$WmPnTqLs7j3SDo#yuO7vTwE3)Q-s|hLJUwY}S8f!!9 zo6j3~7sIxV{ga?u++4J5R`%}Tg@$Bs%O$u8B(Zt93o3_@ZHzVsAq-3*a-#9D*STw&H%E=|?gFH)Ss#VdY(r$9fSdGf5<~sy!s&J>482MG%eEe1p zwN*-3xKyhM-Kk17<2@X089^sIyCmE+wx`m4V-J_pg&aN_jXS%0OhjqAlU7UJgz8k~ zlkZ9@-8Q-U3F7aLqVr6QXmKU0-9fVM>F(0v;zgDwR>Wp9s3SgGL}ZbqN8ar27A1cK zxA?E(pA20?ccu7(cqWZD$YEt=llNvd+NMHKsW`xY^-Uygo#`ZgXW`Ebd`R(Z!%gtl zh4r04$Or~Vn#Lq#C?pas;Y+n>)tSPjq?z)W2k%T#zp80}v5)N);x8`Y;?p#EWMyIf zqE_6m?^qQLadB@Pq$&{GK~PJvEU^cdk~o|-DaxG~)1xM$;+080PFDuhxzn5GYgox@ z-rY=bd`nK8BZgALVJSf+I#oTKRYv)J>YVCr-r6hg(Q1y*OZ}yFsqZah)~8#jp?&ua zbdiwg2{`~e#2Ap747*zoxkz^>*YuB#ejo9r&BmE+ZRA@lLTT--ov!XDK+?v$_LfIMmrcUo98-T>@w9{&3z+e{ z%Oy%jGfD1$4?kkf8^B&5(mYRZ@q!&u?QRxLM?movg|(fIrwYY9X4=T7*yNAR5|aLG zvn+sxaHchqAz)*Vb>~v8IuolH%2gtN8Y7F*vroHcHOjiS&clOG^I|Yp@x-9 ze(t6gGUdO!c*#%3>koX7N^LJtFzPoS+I9;JcMx4gY_=xdQc#A{Yj3t(7AUcq(qx)rBrYSD%Q2HD z$DTR5(!4i)X{qU3)Y@%$q_Db{CY9ot`EXC=PKzXJ-#RSh!x@p@U8;8vqO5#Lb>a^Q zL*fZ%m2;*hrEsjQw+RH16qeTDLo-Gt7hB;;kgBth<^*PRk18${@b4@4BRYKZsZwa zYv|^;vXw=Y*85=)5VBib2-y~NEV3%HF!MZ_B4`{lgBaYBrk8@P=F+u@F26WLw;pr=Wn=Q?UpN(n$2LN3X%a6S@VAJDh%Y9Tz3NKRgoW+1916{D z;nLtpBzYIhnkE}DvPV){MItV~*T>o|m4>V0JBf7kwbAUgOZ&KHzK#t40A=ZtY62qa zl1&p!CfMT(6sOE3nS7*aYmr7&DY~kK8fx5<#8yz6oFvp+rB-~hNnY#OzE{=v`OI6A zyk_L#Wf^k56&pC-GHt6nuSH|?Q{i-wU;hBXK)x;N&=W1Y>mMGawUNwCk2>2& zg3?7Q@+z>mgtRU^w+aD70IS#OOi{knQdPzjwm>X0ykqZu&^-?)1ORhilOM7UvvIBd z&^J2u>}4*zMJL4jiM-DQ)wyuuNNI}b{YWU0I~d8baFwB=J&=HDZ4mGiq_RGszG=a-6|MT@GN zx}7?4joSBZ7~MV6*6!=SRFNW#0=xhYKm-iuB)2DyWfmKF++MMWBt5SZ++7q25r6!zg z)D`)awdK9jIic*+ac(k%V(%!)Hk)zPr?XG|)va=CWd=!OC;fGfrvd57pG~@je(I8W zldx8J3EslT?9FR^xI3y9l{#3PC{GDgS=!ud400~cvkx9Yak@%BVMBEg0+5jLA zPyo&=OeF}=tBh)LPNZ+FLw>$;rXpF_XbqlvCGJGjPH*yz%-mD&MVVv1q{{QHYGIqR0mRVvH--QnpO^t}REn7KY1 z@XMWqe;0>zrp8nZ9ORLXq@G77@Gr!&5o4v4WgchtNPKq=VAZ-iDJYaq#8d$^a%!PuUa?hTH zvKXESAdSP+bLb{yVX@J8J2!lwwhjOXA22JEfHRIs;Bx6A+@V-G40ymLN6Jq^yH}CN zUT|^PW~$U&?)1`|j1*L16;k}EMartXdh71ly;l0ue#y>Jnw9LGI4dQ7SE}pX+pAAb zl{j|`85m4tH{E=2SQX*B6VQ=?(B`Kx$0HVCNdPW!(-|M$IM3sdJ5-x?fI@N?swh%; z!BfZ_5spp8&pAtgQ9h zOY7~WZ+5P-<-9o><&U}UwzXPb*S?=|BC6$^cg$Ag>gc)(rR z94JwaLjrS-2^{Bxo_*$!F=*UnRX_{@Do#%r7&$lxIr(v%^{S+7lsMe#!>0YgessqK zaxy{S7~=zpSC*#0fCZx4&8=&{v9!n^&506gOSsYo}KgS*l||6FMN&%9>W}DdZ} zRcl2k-?i-So79PLMhH9+jt@?FQ@!l0mD+dn+Ur~FWlhORMcu}tR=3$}m9L2{ zRrO2ddl_;m8Q^sz?tpmbIqTCMPt)3qscVRUBpy4EF_z9q!p+ zmoF(>-8aSCj@!O|8YoW3w zYyjl_?Bns{oDP1t>G)x|UCoh#NhE?#Oo8do80q-c;&~dQ8fn>5ijtCPw)Jn-x4O~k zV(86AQd4$OQH+)Ol(zS_zV^Mh-r5y}VGmN+?ZM-d*zz-<$F(_B9&wE4xgMDZk&}^% zcL0{_j^9ps9eeuXk(d$>V0qiqoc74+kMZXD~p!zVfX@yBv-IQ%L= zK;v%gW2ngFdK1Ufx3x=*lUgMWy58w`y|>l;&bY=gX*a90S8nNP`Y+Y@AUlaX9*2@f zI)FINeTF$4^rLYDfuDS1Imb@mejU%&ry!A$)Q%5MGmtprr65d!yF73`$6xDI(zcg+ zd)HS>YirrQ`roVQCsE4jrj?Sr*QNApUzPj4w1a78&JU*?ekYJX=O3zap7E-5&+mb|;WyKk<^bnWJJ)2V-U{_1N@ zTHZHH%WW6vM2LAMha(xl>z)Y7JoAo8_N&oGfkyA%xf{9z!8q^78Ntcp9@#8a+=Ffj z+;B3ZfC1wpCmHP692|EN!xJMj5yDq7T9k62kf8L*_3zk&+;V=E4iwY1M54PTzS>__)$MEBYXn{O zc1bmR?%TGO`~K#L6>=Em0ppD0sm?h#?cen2OSrF7ocelp#~^hbG4IzEaS?IJInS;J zdF}2#y*R}{(FWuZk8jX)ImS5rYdKSQP7#WSD^g7)($d!X^}X${V|sOz9Jy7~vTY{P zdTFJ;Z}8BOK2ge@q-VMe!0PC(v#_1&BbDbJdLP5E$2A7s!#t1)9E@b+CkF$k zQVtIlV@XE)WTc#cq+^kgSw?aR?0$zBHP4BoQOc5q(^FjXZt11V_O#}0cE8!d)IE-B&~b3e>?WmtX+%?XC!b2&@r3?&*};H;=Wh?rhjWcg$;zhCB3q-wP==RO&h=yU0+I~XHzD~ zqy(f%B!L8ykRpyoU8vzOwJ{KtNX;iMYKk=#IdY{N+Hz7*PU&4m{;yhdr955|g%boqG#gD~$3pmv;XNtx3ahmZT z+0N8ie$9WgRhp*8wfGb8<|$(=t|F9Z>hq@XO1{}%LAxZL4s(D##a>0h0az9Ra8FL% zcpbSJ9F8z6lBGH_n~ya4J)`B7)UO2MqLk83S|;yqsi1?=rOeWl;+&e4tr;}ArD*d# zH%iR09BYm0zL*E)U~|tWC!xvbo`Z}Un{G%ybgq6=lhE_rW`MvZco-hI106}=_UEMv z%KVPF#&eO6uTSfqD}Q4M@~b{o6qIJK2_}+Oliyv`m9(~{m9AVgPMHUepf|5`&>w7y z#9~7I`~Xiwk=P8ce2;!TD{oNREhTo~M;ppW>O^YJ*vY{=Q1Ea)#xq`ft@x`*&~5i! zO0vo2d5LcXk+VtBwmT%D;ao6QAhWA7$Fwm!Jp4^KQm(5>!E#c%uJSR<=3GKRza z{{YT^?XBZ~4EWv?W#OG}(^tB&)XLmLZ6sh`Y8S?PVJtFBx>rU=irJC;mx<(wrngtR zPc!rU?=sAD3|<=#hyBh16|ahL=W>*6m8UqR7^`0Da!T9TUf-r~V1w5?M9`fE$e{aupf z=APn9o5(XC+hurQRbrC_jK-1`bdkI5OfAC0aSh(-<+Zk#%8KnIf#8*h+1tfG96llHUK;Sv#4RH7$HsmQPZL3Xd3B@Rcro=@#51k*Gq$0lYSGOCMrXIXw?t{YtKzrEZ9C&#CJJ>^*O~S;n2^ga$?@tYy<<#lZrI$i-_nsdW zT0g+%x_7B~Nlpnil%rlYX+=lfcGD4t$2elEW95gnXsVHgEhQVM+AYSLZCSVRwHsNz zG|$p)ZNyW<UC}(BDI!GP(jW>P@gB{_G6+D`(EWx~j>>4~hik_$kVzm? zky@}WO6@zNW!~fhN!gTEaz%NRdcu_wfXy+F$_#fEML1)$jm38|sd0h3VyQnU6xgp* z=Mr5{BTfn^^9fj1OorK+35+o#8&5?UPyi#@m)2>eS`wvRjN=G6LQ;>jhq}3xr&3<; zly4V)*GJ9A6;X3ijaNlR-qG2(xUNLwc}iEhzN*g4Mc>`@d%JB^&2JKxyJ!q=IJlN6 ztfB#q`*jWyA2Iecz%J>6qoV?z5Tg#eeW@%w9s#TM!JuqviiXWju;K3>xMG0QH}18p_h z$>OwTM75o5uA1z?a}*_JM)_3Qtm1rwBLK}LX(Gvk6ByhY`aCWwjaM2Kl|E%fLwQb{ zg`(Z37`IKNzRbCAXVF?KV=(TT=5d>()Tz2$*QHYN_pam3t($5*t4lpBeNEuqe&*K3 zGkrC+m83IVNFx%%32@So{OgYzG0ESdTQkYz&Jc(v?Wzl<3opD^P@QP7|dn zkc+zaRNY3K>&-Ns?9N*Fs75qf#8s^tipjoM$zPW4>QZ`NT^qZ#w?2*WuA^-&&Bmn? zOAU?fg_O}?7Dc$Xhs%~pTr$M3JTTksfmzjASwg8_G5i-wmOXP(N4r*dOo9G(+@W`? z1sEg|jxoCf0D!v+)0L(*fK2qUU6;cRV?|e7m zjkaP}*=ndK4L;R%X`_eC5TuD9P|_W$8G|+%zDo1wil*u%3YboH7|Nt&Dt^wJZY{=2 z%5^`&)RNPc?`?8?RXAoeqgte47}bQiXU!>Ak2G|Oijr;lTST6XS$pq>G{1tMY0>q2 zj{)iTnx>nke`dYajn^&v1Q8JHH6V47yH{qG>3= z>^7Db0$9l;hFN4uE>me{AO-;e+M{Rye3c!{u%ELNv=%tYbv|O8XMzq_1Z^a*a=U;d ze^1n;w~Ab`3aZ2r!E=lb8?FZb0K1G5pboXy^_q~Y7bHaGc-@^><8KQ+PBhJ*q zBRfWT`^5|7g5j7KPccbWu8bt-IXXD##+S5d(Q>5-LfU(-QcgEZR~cUEUGJ~NX84*| zjAZ@19OA8JoM9O#rG8h;nzBpqOR4o-cYYp*IB&GgHr^8v^LE=>S=&n|3zH#I8JsLh zl3bPx_uK_x>Ka#y?XGkPZ#JSZn&Kf8vc)9Xg@)o3I~*ACCh_vN83+2dWAa^>jda;$ zv(fCPyK9Lp*48%iqn>&1merY-V-Oa|XSr6476_57ih!9FOBzkx$+T@VP`sYXQE>OM z%`Lf}c;{tAdwAVZ;)u!_m8EA`0+&$kh9^p|R|$oir0L3XlDuaMbmJaZ%kavYN-5}@ zf|qS{XU=9csMUoPN;0K2E@e`3l$@fi?#dS5YdE*&t#o}EH;r`dLTRl5d3>g5<;+t> zMJI~f?M48;N=EH{U5kQ6MxASAFkUJ;FX zy`9bVmCWD4-RT|XBsW0#Iu%BX z?GG3N+C~z{4AHycBTqHdq~2HYOT}~l0K&4_;ko99e-~a_cwbht)$FaFHnY`Yze|Yh ze3=^J2_&<(W2i8=R|m;h9XF9ThgK2Mg;Ef@5h|f0Pg#IS{Gu3==;Y};zMV`JOf?39=ai(e!>G0{s z<=EM35k{>O>Je$|t30;%G0Y`sWCB!<8=QC3#8IAIR+Tz%tBHjv#nO}~UP?BMV$^C$ zQl#S3d-HE)%zWaE>s5s{N>w8{S$j65uA2Db>~=Fsl@GA!i~zIMn@Uo0fU}+IX!dN z8O40R;qTd_UeNwLXx|t9A8U4=KKNmEeRHneH1orv_?N>YQ_@rRof?>YO-R&A5}cPUNXMS(D6UyGdoU}PQ#^<|*Or(4m)fQ4OKO<|D0aeE0TDi1lwsmaG${#CvtKyCbg@ebI= zVv^tDAC6S6+$dM^KBXjV2N^<)p;|X6aBz6qHp4|zr-=RJqsvl@RO-pPiKLVEa$3qh z_Pou;$)ug^`!bpHVryM;rA7@!M%P^575cYpTd#esiKjN}HZC_0v@pP2=L-o3<~RTr zVUdBxal1M82P&Q#@bvr12FBI4@{En-BS?TZ&4idq80v7JmyGGoO_afgPV&P9;ZMtx zjxu;&djae*>aqF9!rmI0+%TI@w3}#CgfqY<+REWdlni5Fa0BoQ1GxPjSqMu9m_kl& z6Lg%PEk_3Z%O^JN2Az{yFIA!K)SoPGZC%n@c6z%#S6A2OdPctSuAkwfDR7N#;jub! z#LpbCt;=NzBuKa;aaK9<_*EqS_k1~#?mQ9T+o=kNyYRk|F;FlSMXEyzmE7YZ7-0{z z?A#j}A2WUf>gv*XSHib3x-{&T3n2kRV&3?vXZes4Rv@5*0)@ydhcXY|kAasXS@=!x z&r`~kyzqUM#9?!vBwydf7{fE*gp45=#?i)kXY=zsENW7Nr$?4Cs-o&nl`Z*Kxf-g` zY4dfncfPxy7mcY^RKZh>Vy^3RRF;cQS7hwkXL7{MnalgR^+FnjmpaPKd60b|1g zquk*40OJE3bR2Rk!>NRHZtByW8j_DJCfn98mg+LQQC7B=YfW_=Cm6P+2S0^f^E-Sh z&P`iZx@&7bt&en2p$6gx2w(;YD0!|94ZsE4xD87$cx(~K&lfHz>A0?K=UGwe=%xaU3i{gtCCe6v*P+gqxTZcScJquJ}U zQ@70&=N@Mk+#8HnGD%tcuFbaYt-g!zdO{#Z!*0$pK4L%_Ad$xz%JNr(3VQ*B!RM*x2f6A89Wz}u zGWj(xXDV@~>BZHmxua2O9sbL^()L&C@3yg<=8ck%Dp6Hk+%Kh)Z)Ezn*GI7f;GKs% zlmpN<0O^ifr(OrPGAb5zROK+a+A_Sa1FrHtF~M)IJV>C42@Q}&avL2_86zj#`5H@& zyH5SvFaZmeJaS0Nj+n_gzJ6n}SUwbR1 zef9M6Wch1J#oL>Cq@`=L?G0MBw7=tIMne@R9S<1nGx{ETb?G0 zLC!``L5z>CG0$F|27PKL`fRS9{`6lf zs-r4yoNSfTYnp8+-KBoX?PY&6uSm$#bAk>D2N)P_#e1J_-Rm02rOUcVakn4=j!xh) z>+774#{(76%${DB0U&^iPDtyF4!G!f9mj0(Tt$N{S5is2m^A#JNDa9L-cLZFU=BIX zIl=6&P7}q^jNvC2P7;%fle)6JnrSy4_V;zN?qwNAlB}mF%6#xjRM;*SI*?mO{OfN_q1;|Bx~ z0q5#{c>F4J87K)10LBkYpH6y?2t9|l73R(k5m%B|zqsAxtk$mDbb3EqE9<4=ly$2p z-9}e!S+yIzUa710wwm78hi2Xh$?kL0Kg+q~dxCkVV6I0TjOQ88ZVw#v&unz`snj4- zyE(uE0CI48=eOgYO$!>n6rOW{4(`fMbI=TcIM36Hn>MW!?bhvUw)VGOuX|r}RZ&ys zywYka$w%K&_g0PQ@25@ou?ndq9PoHOcpmr}!5?1LJYa4EZsX1uBOSobdH}inN7kGp zAabqueqNjqIuV@xIR_)|o<%tjq2JDMKm=pv+rYs&Amy>QB%E~Q2;SfDKd+np4PAKi zLN|Akzr3#El6S6e6A`&`yDhbrc7I!9G& z-%r!<*T}U3d6UH+Huvhg1C0Lw`u)K%fHTg}bDVNO6%yfs#PTCnSN4 zfCupr&N(K4D;$3b>x^;FY~$M|z0~YTZV6CI#OI;tps@oT$Ww!Z?ge<1q<~Lc9(e2T z-yGIbqN0;(?|WTctv5>DHodyvL$Zz{rDWWax_{t?bsjyMCgT11G8X$^KnxRmi~wSdf11 zNCQ5E1D-SAgG>bEe5W1qK>q+5l~q*aA5+&LV*v6nJLeedlbXrXb7@L4j2pjY(^qb< zZG7*dDl$<^oh!eA-&DQZt9L2yWv!I2|wtP&lY`=~Um$tI5GQMg(%e;E|9=2a&<; z&2eMk-0<|&Wj=bbigARKjI`Cwd$vnkH?G=R<;E$>o0J@!os^U1!?RMLGFS|~p zE?Bl$4xo-n81y`hboUkE{{R*L0B8RIgZ}^>UrLMO*M%;;TL*{r33W{fTKJ1t@XXp> z{JLGuBc_4S> zg##p2SlTw0OgA9qxn48RINC=TBk&_R=dx<0ILFw|tqDb??WtlaQLLJ4NL8aa)PsF_ zWUjPk@r)-VoaCJ+%?UTBa+qUiR?Wv1EvuI;3}xic1LvyNA46 zJ!?(2wRe^~I2!5(zM2#zQs!yWXMq+>ZcB9~@FI2vq!6#4imy(BbEy5SqL(yWq~)rO zoTn~sX7sylw(i^9^spE#PCiZ)DM`jI@~YIF72VsLgdLl2vvO_upH0nqZ2}gaXrUY_ zGs_ezNGzlMdnQOK!{yEjIXDryUTRMlcyh_zXV*07loOF9&dzz+$xsPkGB$3{87c*Q z72+QmUtdo8n_l_0QC+o_$XSfmVnk;ErpcPzy2c%3kTGv8t^tjhMbTO<_$xfuDDgN} zh$eis50XyiR0O``ovc#_D((r-pA6O2no1t(jg(}aqqVOkDKwq$q+Py8(ykUzrm9uP z!tEyGN(K4AuZthFe}euQm(%67 zyzwTf610zNq)iG#7RZ^J8E<9X2iiQqsfCm%Tt^vsWmdQOS^ofRpN9S<)?~KUj;-Ly z3dk)RQ?)vVnhHj;d3PF>l!*i_D+vPZNgQBCA}*3T?^6Sq;wU^s@U{JmvZp$6_R@Cc zw=NzIHCmTRC@Xbcy}Yh@PFlU3D^llL)T=2$)RSDgJ#MV;%B^Lto7$)CQSl<{!d?Z{ zBeazzywvU9+va#>kZ&a;iZ~}He3D5ZDCM>X;Uam{EBKG$9XChOeku5O#lAbUhT}=F z*R1vJ0JlI$zfRqf;#)hvu}Zg4#`aOkx<&J1iewXck-;bu8Db&lPq=?*uOFw6yjy7% zrN)z}Xu3QSEv4R;Wu6^MX(x7vQWHwVtC$G^P7WMlAVi5@;-8CgMJI>njuvPx1QN7U zL`qyE#T&AS-3XNnvDiv7lB_^EB#%y~4bB-AXI)AQ8Aba=II1<%v~6x!sdFuT)cxCC zrmk>eIFl1gGlQj46QLQ!weGa-YhF$AJr$&ylG97v@o(Di_MPz`#h(}I7B(8*puZ0^ zR=CsjO*%(Mn^}e{yO|)q)aO{^mgXgjMUE)$BUhY97?FO*91ypG{5A0%<<6+{Tr3!5;E%pwfUZd4a-{{X*GfD4Vy$LQV|)ck9$MW#m-vAhB)FWHr( zYh-B`%-m)hnsx?4uPTtc9%Q3=K$leKPV->TJvX`{{Tvy!Kv($6C*_( z)B;lo$z_cs^BI~mC_)s!%H(-q?`XufTJ??0SBT8!Ev*}5mc|)&kG;rOU>kK{+{y_`6be zhB+h)0FpR`uvA#K1|{Y@N)p67?#K*WmfeObb2RK~e8lJ;M){87G6~!Fm9le#(~wBQ zllPlzryJc)-EGKZ-SP!(z>$_h`$G}GZzpSQ&IabdJr(G~5r}f9QYw`=r#cX*)N5Tu zUMby4#p>5LZ8g_Qm7`KIjV9Zs{rOLoNqj2tz0$ptcE3*A`<8_i4I|HDR z91NYy8n{Iaa=Uhr z+FbI&v)O9RWoZkymm!`jm&!sS4y;2V@}>t2O2)400u9H|v|km; zsvv!5P>u_xlHq5MQPm8RIrC@xI$In0jl`haqIuCG?S|uesC7PNbVmG!G=h{UUYPi4r==S*q+wx+*w0xCz4f}w?%R1 z#{owYNea3c=2ckZSyafcA;Cp8&rc0rGUdWktg79~PNZPy$*V4FIWK1<+fhqpy|qWu ze{A95IKp#u<7ms;R&tZGNphtZcH3*-eXr*FJzrh2HpQ)yEwMHkmuUxh0Yel$0p0U% zCAR=i6~kXyPGVh3Akj;98w&~BBcp;O+7~WVt8F+Wl^_yBZ9hQhKMQp`SyD}7Pgr1; zH>%u&<|y2oXAFSC6@Zc~hjr&6NFmp6#C`$s8(ub@r&wFu*jch$$3LFQB=;sT&X!is z7n^jjlA>m|jiQNm$YfqseQG#L@xiy4R8?H1Z+308-z~11le*bB`@Tft@i=Tu={hy2 zRc$8@YLTZEwf_Kk-m+Fn!d$xT(D{c%xw){r-p9+D7|YwuD!$`BSINVuW&`g6#dD0K z9AQVlYS+-XifumG%1IECNlqhcQpb2$zj*S>7BWWVmNoej zG2+WdN$z2{Nu~Qer0sD%xST{JZKYN@c9jo4Rh4^jqFHI$In(tUSZe z^_XNVo5B$d#lPA#^Ei@fy)h`*QogO;%weB@YT+hcOTewyWb1x+AgM(!+)i*pHC1;CCVG- zxiT-CZxNQ}6a|jiq_@iiljGf6QdHHhe$IZ`U?-F+4bX;+GYn*-FjfGz=Z)2a72C9B zdc9BL&aG|XyB#Lt{NZH;@yH}ei zx)G~Jal^VY_hA~9CrNufFBmr&Ny+c;azS*HK4Tk-#MR}Bg*mqGDwS$DN-9YyIP%4| z`tmmzyY8ghTz`qRgL6txV z>^IlU{{Ry_Sq=8M_Ez>urOTm8`g{3Ai;2N_bqEd3(wODApHEwbiV5U~DPlXe1a&O_ zx2AZXzzMJTa({@rt>%m22(^1FKM-Fc%O%LuXO&j%E#|kkySWmroHNWNjt{gVK<^&b zL!N))Kf&(;c*DisCe^=Y&l$t<2gBDs9FNC$cGsg;({HTqt|z&I{{T&~ZGGPA{Arff z)*g57BaNEj9@$b8JdyeIB`Utwl?YRf)#=W*9}&w3Xz5QCO*|@kZCl=xNn2KJmSwf8 zN);%&jScq+RI^qxinx#2;hR?R70n!`G~> zF^fsLI&P$ord+~;QEU`l+d88{v6hUiFs5xkWjs5m=pPmIZ9h}c+Q(1V?{w&I?=-09 zE*@ucj>xXC%LEr>`Ih2B&21=X+<7ReU&}|q{cbHA$C`5Mu%vfdbLtkYr(R7Y6G5xX zZf#(Zg%aQ?*g_}TApNoZpZr&?_+ai>mDr3U7?Rov96Q*Q6# z_HkURM=5H1vW+Z$8np~;R4dZvz6oPPgAmA&IWVG^fl)8f3(ks{v}*XX*Q2* zr(Efi&2I(#E|JT72b}Twlfcu+8YgHK?Oh%LwWmal(IAe!7Sie)sAHNhEzy@M%F5mF zc~Zl!M_jPM8Bz)D#AR8A1<{@|l_)92R8!WZb+(F8oZ}~F)#-a5nb)mCh9-D;)NZXg zCX<6rU8_dzwBJFI$*}JrIa`k>Q*dCEC>93LVZsh1CBc92N|#B$Kx%$@cd!; z!DQKQviwu=#kw{QFuLW_fHAlQRbstxbN4~vf2iABCDp8|NoelvoCAykCPUXNk;Vz_ z)0~X|05o5;wSm(A0BBE+ma%RF{uY0UE)bOklwP&H(JV4fS>|^BA{CVGAc~0c(^!jg zN;M-1s)`D8gHwgnIhQhbR(5e#S~IIDa+WI-Hze1xrlPFWlTA0R-PYFISGQBdH0LP| z#B3KJf~2l`j(q_KX!P~yb6&R-D|j2iimE)t)9oUg3`kbna>s(l0Yb)u9=|x=Nv|lk zA(Bj;(kMH==*I(-jAURBUf>^3LxM(c3Tcbxd15qy6r3HUa>YuVDwBpn8J8K(4r|%U zaTdbkqwK2Ggt>WjRNYG?o%L~PTT5Na`|WRDyk#dxmQq%g-K3)3($(JAOJ8+r@3(H> zQ=O*pW|*bGnj3jm5};*F8kN+s?LtT-g-&o7mL&?Y0e^WvW_gX@z)yrFR4XTftz*C^ zC6`szWN-^_E47?}2;c$0IM3jSKzum_tVDt!uDBtPvq)VKJ2Dy}zXu5pWD7k`Xu|Fapz|Y)ZC#uVyq0B)UHgkz zr*zX+Qc{cM)#!Y_aZ0QugnrhPRBBaI-9Bk6Mk(LjZtCvZ>8_6d07Ur#5kMu0!64-0 z1Rrc1o_)TcRoLQG+a^n3oB|Y_-~dJs%Z!}=04}sc8x}bSj=wf>&vhy@o;!j&XBBAM z4eG<@BN)!(`Y*0}epTjH!$J^L)fXx9rlri=j9)8pr+G;`J(Jb;(H=~y&W#>QiAM9a zl=<8EUD~>DTd#|4`V|~8Y=MLF;0`g=^O7_8llW&8s)Q4qmO0?^dSvYYdgGwzJ!Nt_UD@5j2cmOJ)~hdMhl)cn`y^I z81q8!qtf22rHE0pg*7^Su;#tgpx-LHzP5TjI_b5Vr?s(#CkL-MAalnI3E*@iJ-cK_ zwThjDbk>rwoi&aOWfe&;mb~uhSW*+7^+1`2>TU3?6`FfsA(@ z4oBxjY+X7|QFN+%-@EowZc3)f^U1|6FQiqIPRVsQg=zDon^BxoT*4t4hmDzboCVBYY+% z^-^$7acN%s+u3udy&d`D5$%dJZ%5egI?8 zWs4SZK)2HR%fyXpsV_F!BbrYZN;Y~(vPnIgt z)vX#%-O{?+=;y=o$yAGyl{Y0XeLr!#S>0`IzpIp@yP56Hyr?mas#kVTMdX%jk_T*b z#sxyk0Ko(hNWtUN0~rLKxgSq@ys>#~T;mLQQN{;c9OUqO4mjti0y(4H#gr1nbDUs~ z+_4?I4ED$w$%$g8h=jC^rxcW%jAG>*MlGqm67O|=QoYgF2tu0G+V+%EO+~9KUrpOq zy`8R}`%xpO1o{$Acs(#WWOT?CX+da60dT!DkVZak`5feqcH_A@;$a#u7zB_=+&6T< z_80_@LVbM&BLruaI6VgnK+Xw2C>R+hI3BpJH$pb%P;t^WYH*xe())bXms`8&K4liA zdB#$+<&$zxSn98JZSTIjbs_Rbz~46J!5wy-oceLc1e1Z#oKUfn#~=m6bIufkM+c5U z8NokyuQf6`5^Nx_#zSX0$;deX4Y=bN#&7{N;tMNd?v=v#10eEFJLG!w&wNz4Dt5oy zNw>?ZO)Kb|(d*{BUutPP)JgLzNnNRHc2+%+1V#WZ z2GSVwf=DL-mggfpf^t2wGN9y+q+p-nAms2jDIU_#@MuY3{DehB?BQbe0iKQ?(h=NzA2{dn}?a4HqRPyizwXBimj z*FJ!Ibr;>Zjl!w1(a8Mo-npz3A4CS!2mQY-lspkRpY1M z12_QVn)B3PN6mmq!1n(D$GY>=ke8hD5me*)3rAFeH(jBZwRv}rG z62Tmet-?0q2p}Gu;GFuOLG(VoPk2kJsU!?wXSp3dzu{S$w}^CY7RpQAOHs78 zh9*>#eZ(Gq=WU9KZqvssk~@(aC(0UAwV4%UcZg3-CJ+(C$;vw zOC<+UmI5CjrM96?HC zN#;ty*;URZj6|*#PSQ&Pyp+Eb^?3ekeH%!cA`0%B=TJkC6cfH85S$fayD+T%)S{1) zJXhh*5NZNRH2E%;Rzeg+(yQm@!m6vQv$k-GfRg)mkl_P-hW`K-z99TA@nhNFY8Ur* zw>Ik}mRf;VQEay|pf>V6Dg!^4K1q?HXv^CCkeS7<^zbvq(W@#LrR^!Dct$lNCfkke z$tMe|>g633le0Viqk_RyQLl!TRcP8(+iG76cI9b7cS-rAn$z`OPl9Og55S&mNgQ+C<2iEebS|?rqG|H1Q-%aulqR2+J$(mLg{L{X1Td z3eik1AS2|GA|EP3;Hk*mrFqEYYPDlhI{YzHOGnMk?DTHdx^C}# zA4`B)T}Z)F!Qo)#8}hzcOGh29q~5B_RduDdzk7G$9~D`G_E*Jb3VUN-w;GQK4YtQc)|F3QfsXDMxY z71FaLs_I4#2s{(J9ZAna@6Wwu>dYdynqa32r|$O703<0KWRAYogzDnmy`0?R%NX-l zjCtGlUow{?ZCd+A{{Wiw9s>hiB=A^$8h7PTe`WVi*4^HX7BBA;P`sB=)b0h{>yQn^ z?2J(rm`5BS;USJ4&J$qWlFcK@D?7J~c>e&!dPat>eA{#`08mIEvxy4i#~>@XBYoUH zV{!Mv8+)mY-c!OJwJvF>uXfAbc8Ygb+po{xJPD>>c;8+~?Jr)+#0j-MX5;%x z-or^6WO7^mvVBpm=1XZJd1kjrUD!;w4G@k+k&)Mx{i%Q8qhGU+z!)d^bKxHq>0cDS zGU}H4g_f7$?+afIOF;0vI(@&Bd8_!F#MV-@0&lbyu+y#VJjrIcNpD~wVT?ujr~d#2 zdeyvH@Pojg1il{lOT^mdkz?bZ_((iO;zrYT2ewOzbSvNZN29e11R_aeNE&(Kd2bpu zl0~}vJIc6U=f=xZmcr=VK;>=-3TAg_oT%QvG|tT|hZ9Om$jeva#Xj*@KNwtI1T18ja%##+s=XfpYaDo25#pj4T8SxLqE#e6h${D=a{93m9j*T+k!1q@T60MBZVk@BwnUx|)#LDrH zNcfN8{{R%+K!ZrOw6oNqmUs=Oy4SV}g5kd!RR7Vp+_>{UEQ z3C6T)N>7z4aG^;$a!Ls*Ud_jmyYEu(M%#K{^L$KeLlKFbU%Ah3;x$SO-M!;^>&<&L zYpq_Io<(S8O{R2pWgjRxC5XVu$l5c>7|&sx*OdH4bi9gNbvr_C1WM#b3xnUtrR4T%h^o%9X%%+mG zr*`7(uJ=i+t20_vqd37uw`QG5sdGb4Te(hF-ur0p(&v!)6HK$x^((m&B#wLQgp%NG zQutq*Fz1;zu`JsOJIbW280~TO6~+0FM_Hr8sEK1JQ+k5P5akKMT<~{gPH@1fApFxl zj&v=T!L`S3W6Cy48e%=JfBdvn#n}=q`WMD)vifJ``N~L`9|LPBsW|G^zOB9 zWpcM#P1q7G!pOF+<^Fedbhng9L2oiPWpKZIH_E^lQ(rynR(Dzy&2M%7k#D(`XHa$= z76cr#1Z5Z_3i5gq=4|Dk?Op!ltlJDM~VeZZBr;-R*6!e=|S9QN&fA zDw3Tls_rUt)wbjBD=WSB)oRwZJhNMo=V0N_lt3gG00MGb_)bPKjDeAnyBH^whfsC9 z5&4WF1Oix`V|EVW4m%F}M{cJ*{-|F1#7`47QN|4%_S?h)w`dc z)sQ+E?YezRKU9W7f3a5L(-8ju&n0qs$@Df_CAsL+2UC1QDKcI0t&y zU9lGkBDuPWjy_0tAd!K}##lP8);J_?8<+vNJfEDP)a8MSs~o=X;~?ab#_Z&Q=y?N- z;}x>n%$pSinRolnt+xXM?w$Mr-xws5!RcL;u~j1)9PTP~DlO`w6Ch=-pbD2s{8alxwG+o-M*C3X}Y7@Ez->m+S{xTEb8j)$C=tt&2hNjJN8 zcXqzEZN0aDJ3Aj)crMFJyMilAjW*v^dE_KPEbOcnIOJIudP^CImCKo!AzTfOhatTa z!%L{#X_t1s74U`Rc8e5^sKsuYh5e9<;54kaI<)>=R>?e}Bah6QDK`MIBE|=o<~SA$`^(8R+qZ<>4YDQAmRe}w4ih3pjbu4>ucLk#_=X?0 z>JUQjZ=_ordz-&9Xkd!s7~e81@8_^cVRITsB&&lX?h39qOYGsW)m9;U3e&9TX(%`F z+Sikes;EUP?4p~sF)B^%PMoP}iJPZgB%Iu#B`H}Y9!RwHzR+^AwbDIG7|L|;zSf-?MNWS7r)r8a zZCye$_oDQAtrqO}Z9+XN(p@5bMoDMW^;@_tByk(v8+E!MiDob`YlVRs$SV>8(Qa@e z-qG|+Vd49mMAK6qTF69bu&UcMZImp{FP3A-Y(^R)N`+Nq0E+R=Q^!(iJ{MW#mN2b% zXKc3H)+V`IgCs#Cd7o+9D=N507&4+F?v*y@_>1CRnvH*Fr+AvjOiczoCihU2PklP) z`$=^<))_^N;LmfZHklF@xq`|_+TG4p8KsoE^4(3&GN(pV1aF6yMOG}UvM-=KY4JJ<{3EJ1iQ-w}*90)lrb;GzOZS~MTWB6Mg6Y=v zAduG(LMztopchUanuhkAx8S%4YZcpx5wk@rpr0Dwo6YF;gWu{E}!ZK8P{Qr2sR zFigw=%u$auB55QH-y*5W0hjxSt=2WjSM3&=f@1cWVH~l@TuzK(k`_|gVB0sLDIyTS z46AMV-^Jb_mJ4ZYlq<(@Fp{|anXF>C@&c(vjc1i&+4i#%6ATf`UtyCi<&;ORKjD|ePZ#_+)~4|Xgtd8e z9cs!eNus+E31gWw?MB~GST1euM36}v%L0iv#9d=Dzsw`T^?nlZ8?wu%-aKxM5GY^? zWdW3e8bGWv!y{n89dVFftoU}!s^8j1A|ve)%n&Oqv0O*D1p)#J?tk6~2WuSTXwRRA zrBy{zlZGNQN@|o6jNES-I&zG8>GHeV-iorje9p~!5%(b((UMbwPH9>xEf%+SZuikH z=g>YplFRmm@ST^%Zw={Y*Ww?C=6y%RvFLhD$ZcjT3u$#7a(lQI8(Sh^zi964R@N9U zET&20f;EaL#Vv)pbCyGKP z+<_yHw(!4;M~5}LhOv!CovM_MXqC>;uOd85HVumKqZk#|Fnpk?BAC!3Z%+?6<1q89 zR+q7-h>Y;`ZmIG|nm2^BPHRuI%NwT~bUJ02K~UtShk}!XRTWQuGMtjUlwzs4yWf>_ zG`g<;0HFRE*X}gi*d(=c6jt#S%wuaZ28~uAFxXNDAx~@rxB@=?_$%=#^_eVmOTgc4 zn$fP+Vu_;jAx4R%mMCSocViTCz|zL0gh<9>5E2jN4wK>~w_AIO9hNByxqZMQZbJEH zEWaVf8S!92aZ%i z?5>Ktze;s5mGHHriC)q!l8hwrA&ftiRi1skKINuT{B9 zQA?IFy1ti+NOM zoA!pakCAWjKT+6rkW5ddXtKrz4*+grFkN{0NI6yhtTZcc0_zi6X}%+lNt$TlwKrB1 zNoeah*bC1!q*93?Br%prBY9X6D>D+v-xvPS{{ZkvuZJHQekJ&);*W)XJ$SR>?z8b1 zS$$to(RJSl+4!5o7MGEIlG+rC>MQB>(z9u(A$E;kSDrIGu}A)l(tdS33-*;MPMusd zYg2r)`n`NaTZEHJ5yDchT`pT)I$GaW?0eWOBfw5v^K$mwkb{J87`UaaNlC|+_j;~v ztz-F-(vUoNCy>kXw&Fm+R>96aRQmETdW!VDHaO<+eZGEdT-<=9L790Wf%?y z++Yr*{=Z)T0Kp8tFX?NjOX1(zPvIYmmEgIW!rxHvw~2f|CE1P{<+itXm_m@;%;=Gp z`+`z-E2NMz{Hf#5_$SxxWAJ~&caUq}3H47ETG&soPP)Ito68RtSX^nAl7DGKs6_>> z#86%5AMns^$xYW7MhmNGT)a(8iyR3VS0gsD1>?5W02 zTUonXzY{uCs#BxwY1fp}o#{=*MuZT^XM1hGbw%B6KEZ9|yjWR;#LbP<)1IYpCi z?U(Eyvgh`5@au_0ix-OgZLxtu3=&@0tP7q%+HK@gtAtnHxK|9LY|c2`78eadt_Dt} zN|fB!vvT%rx?cRblXlmpme*}x%jVIkR}DI~9GqcUI7ZQqkC#@`w3VA}CYQ@o?C4kJ z<-;}!Jb}RHfKGS=IO~k@Q=^gh5SRY|fRV#s;NYBKjF3lCdJNPkzb*j)07?0=)9^id z4i}OKITdOVyp5_C0r1;;l0f5W1Yl?GmFhs~Nx~HC$vE;;jak{bPANuF(b^B?#9((lZ{>}%} zlke;m0E}`;$FBn?k4|zRm2xt1k&G_l zPEIk~C!Bx!t9t7S;ZakKCn^5`50XCCN?z*G)Q>DJebla-zK3-QLMqOzdFK@r)AyD6 zY?buByDbuW`9|cJ)1Ix!AYhOPI30ex{vE2EskL*Qk%NUC^d|rU3FiO+bK8?v$t<}) zDIGZW$?AE>LU1wZnx`S!GlCHH<0N2Yblj&sy}9DN#fPT~kd*4XLy}e8=PS`yP4h`N zWbbt)e!B{k9A`RllILp5OG!fQB;2$~rk6{0XYFywcICGg;EugdI2h%#?T~Uf#!$%^ zJI3I^FLBFro!+_cfC=X~&S(t+ZW72xBjy`+j1N)^4nY|MpQla_#~%^A4e-}XhhOo| zyq+J?C%a8P8|JmRy)Snnmq^SOsPZT=hGR2@h!rKAB1-Z8+Z9HfrAf+D=WZ$xr|l;j zS+xZPInK#xXtmbXxoVF!R~J#vbsApIDY9U*ZjO zQ!NT4T2F-U9^OZ|jwgv0=HliRT$zzeNFr4wNhi!uK49dTvSCB}$(qIZ4U8HzyeBt8DF#Ray|8TuY7`j3Vdl>+(3HlwP#qKWO_qEgtuJ zKh;U1k}yl50FA?Pk;8!Mc>v+p45zxBjt?3$m1?;^H?#IWmnq}FCQ;gV)?b8$4jMZ-GG(y~Z^Zn#LUBxvi= zd^0J!4>)qsCH2jdImjaB$Mr|e?v?kok5IY5wS{sI$Z|3VTy(~B z+zy@c0pMacBRg}+!NKZIdXBiq<&j#~V{9pq-*9>!4sncf201zIPaM`kZ;%!tKqQcO z>CbbIq3c`Yqe4>Tl(}S-RHG>S)hB4^cZE2&6TVHmrlH0MfmDEzGns(7| zr!PmUPj_bg`llB640na99Jf+I$;#sc z)#?aWX#gZ~o<8qhgR%L$Wbu*GkwFqOkP5eTDgaZ-CphPTPB`R|>0EMm_v<-7dGbX? zMxE1J+VAgHwXN7wnv#oc$)(F~RFk(W{9W|3-*iHrYh^~^h8QG}2GPjp)SjNhjB`@P z(%=kyhi)^DgN_D4$>8Li3}geqq1>ZwP&vUrDaj=AMhMC2^*m;p>QMab+gpr`0P9pa5xF(CYhBo2AWAPf*W z7zCcU$53i_mjGpQP6jY}B%X(+anNKQJ@Z()u2D*E5bY?&EwrqXidItJUATYbCJj(C^wBcd+Bi5zlrK?H(wF_LlUIUs@n zKQA(`-XB^TMgStNMtM7c80t<&4}5edjP%CkEQ`rFBcD!$4w=CB_dP+trz(241mxGf z?x|WUOIfz>Yu{UFw=NTkjkI#o?K`b`Z@N}n>ejv8*)HBPamne&Jaswkk^MV!ovCX! zx?D{pt-!$A7;fhvV{-(_Cjb@$kV)H;F?><4rje&Kk;xC*r(*@IO**5zzFSGTR0ffe z00r4murq+fHr^a+7W$o@+nyR#21VY-V-+Cbw=0F;hE?HD-6L*Rxn|O|q}$fbq}x}r zZ%zJJOHSvdm*FFeZdD|lA#PO|JI_eFb0(7Xx_z3_oZpN7B5JoclG|z56KOVHRB;&M zk~48QW%DNcOr|$ol5YWHjZ*Pgp z&f~b9=D4pP+{NS4f3)tF-g`NWEN)#g!9ufb0gGlf0wMzP>nd@XEDm`SKng*-Zn)}xK)LfD5|Oh5OOej=c&Nt z?EsU&$4Rv0w~{}!Myd%c4gdoq2aE>CB%Fc=wnrQWrQ$nnS{rMfI(X-{xMCubHr$db zGb0r!>=%)Y5;rR=E>(jbqfNSKZBaU=2_SvqNF6fDeNQ>|^sg>a#6oe4lxXumeZk6a zrs=Dz>3w5stvat|RHryPl9Ow7CGNR7a@nNXx23kxcQ@`PjbdhCqZ|zFY=e=24;=b( zG0z>oRQUVgtvkfJYU*ol$*9@IDy`H5EYUMG+a!x*irbhIAj*jxB>5LH`OowQrF_I; zNCO}M3C?;9l1DAu9=sl;^E!pNzte7FaIPec)MRv0kDPE%Iq8BhNa#&><<+ZUv9#qH zt}%*DP6|3*#{F!YPFGI$+S(>@W9h=B2gx?vQr1fHOGeiAZp|%xud2EETm7G|M+3!Y z1>(XA$`&SgoH*K5vJ4aMMpaM0yoNP+V`fGdHsmsw zQdPXSWh@wAaJbySe1GwW!dI4FA=mXw76}Y9S?enlWZL;L&u0L5qy69y7`S$o5F9Ll z*x^FTzk4sLVkyrJUk>VfT1uz2lpN~wG@mpaTx{)otElu^BjhSi5rl5NImS*hT(N1> zv~L^T^G{nR?^!F^*>1m2ydUGeD_Bpp*y`BzrPq5D%4MTq1K>I{ z_ZpV0x3+RY2s@=+th|zW&lm*t8P5ld5P1ji=fb}iwSNWbc9#n~3t4AnoTe8I5RF~t zV<=|Aq9I`$7Re3@regga_+#83 z`rS3kN}4B#0_s**5IFNK8DrI>m`IbDT(-qo?f0y;{JrWnQ`p?e1;j6JC}J5Kjij7{ z2~oX!hQK^w(Hs+ANq@#^?v_F(bc);Bsoh;=U=T4}xw(=_cr#5!)J2K{nFCC%N! z>9-TfaUPo$zVt+8jL6&Bm-k6G^26~HMz_83HPm`jMJx~`5lGOqQbM;8#-e!9pe7(9 zJ5@@OtS}YIs3Nem=a^t^1vvd$e6%MHN}SNLyN#`Mxiojx+R}R8<+Hg{mNCkmSk$K| z-8CgHS7@f2PnlWi%O!o9df3AFRq>wIdkX3EP?}*|!-jb!zu_w?p(#M*XDx597ZZ>G1fYM%6S8KU~&tpwiK= ztxtwDiS-+znjBY-*N;4rTP&8g?Dwk>xzrXJqLrn1p&qaC3eQ%xHqWLLYa9$>oni9V z$%vye7`7uqW0AbSD8xLI8?y|DiJS7K3v_#SiLoqbq&hilHZa2&;2iQ8V5u1xBagMd z5qw^g#C{Q94eM9J_gwJ$&u=srKuJB;iWwHlM3@AU;bB1QGlBqPBO=?Ic~w6_$@6$- zFpgb~jvkVli}sJ2qEftl>CRKBEt9l)qMA)#W_-?Ngv4R0LomTrlcmg|h{ILAYFw|C zb5l*F8)?kvb#Av#`}6OTJ(Ef1$;_A`a~*jWie zp|&>+n@LlUT?ikS;_|8Bq~ps^pWfz_=rvCFj*)Om+oka(eG)#~4a8WLXv(^;OGeaW zwbDH!<+^%3)sLvPeLCw-Epcl(lG;aOvNkLfk+_yZ2vN{v5>7@=M;tz*8tN+|7-2ltuPEWjw_o53~+I1$k#u#Hii4pPYmM9nH;aO{(eI zm`QDCjhvp5u*f@(R|k+mI5+@goDy(r%cb>-6Nncx9TUl8> z6=JK>#KAgMp$Syhr8OOr<-XEwXsvrK6V)wFB6y54Iy87?Vn#sSjQ2eAjOQE@I6kl! z$Rp-nF`kFN9-}?6)OvGUdZoKIWJlcScM>y}9Pmo1BODRV-%% zwQGAVyB)-^Y!Bi%`?z2-F@+e$anqXbG%pcJZ17I(vlty*Bk$PQAe572h}#?Pk{MqF zl_21cGoD67MN6U_bILOK59CAqpx2IOWE1n_C z*-GvzDl>QYc2=@aO)nPSiRcnZHEZAPC()~DpKW^W_#aq!OUBdPYW7q4F+8@8k|;o9 z5kVQtLK#a)Op$JiMs4pZJBc4@RUbxpTjCa_r0NrEAlpe|(4RLrZwtHK>90JY%;crW*D6s?9M7fw>om1Vty5arT_f);cjAtnuiU|ZV+@e$Qg2(mGFxah z4F*lJ7lC4u;JRI^JE@6oAdW?u7VOAbVvY>!9}z{Q+B$1?Epc$-No;jdc(L2sUkE&? zCz13_*q!pWwyGK)XuD-O3CEPo@jCaDnU?K0HkKUNaLtF(XPFnWy=`2)P&Uo*}ZPt-Ya*uwm0T_-K(^&(zkEBZEe)_ zo7?MP{%p*Dv40ua%lpnhX0Fn;o zW@aFgdW--%oa_Gp6e(V z83U)(@r%Q{wVhewd7~IcF4W}~m%C=5){?%Q(P^z8Yb9$vRrOZ3t7p|{?DtLeS{)^g z%#lqS#_Sk2cLLwRgVY{#jz&FwMSE3^>Rsuh4hY&eD|3bl4twB{(~x>*zJIluvQ!n| zDGk8;su!r^p7`&cy}I2uSGcm;s_f`gwHs@&;WZ5#x@(r+dRg1L@BU^Nj%}SaZ?!}fW0okx0M9@$zbYyDWq`>W zpOEvuC566lt&l+lKqj7*cGEO#Au7~So(`Sm(doK>zK{lnU$hUIAa?u&Bt$c-hXl0!Qhhmp09N$HL)W5Os zgMJ$5jpLt!5_ppT06_5SUPt2%BU!ndSYNT*-Xe{6MbyotZsyglmeTglW=3ML##Lrz zOMZFcaE}p70Z~+iYS8Dy%9@1fq?8nWrD)A6a-Oa$mB#jRlzQ(=4Jy>L|eO$b3FhL!^og=Rw`4kR#0K63jQ!sB+*2P^WD7QX%c zkmgSh_|nt+Y(TbGHt<=cu)7H*oHv&;Sn0QysSI+PWNE|PTP)Hh>t>EgL}*DLn_myL zBLeK18WworRosR)A~BRQqw~E)j(&Ak1&BPE0R18Tm9))9SMmO@745=?(zG_Wk>-Kz zXN+7;G`7KJ-6T%NFwrzDq{tELO#P01+6n^meWXyqGURN8L+W2N1+ zPUnd!Vjeow>QYov_o>M_RF}e&V4IW@#JLRYgW2WondV}jb#KHvfNyVA&o8~)Nk(8{hHql#eA=`TS*&7BR@YR zKc$}wyb{OZ--IQ-FK4dZ=ytk}vajaI&3~#ix{Qd$+C7ULR~GQcT`08nQ0@Xr;9YLRT46gXnrcu{Hth(+HHnl)iepMwL8OXo@m9vk0M1fNii}P*SGQK zj1o3|Pr{01^5nbwSB3THE*2vk(!}hVJcIjHf?3Th=7!9m3i~P=R1$`D^Cj@t zz+Z!&6Vsc+{s{0+o8XNpuIIj8E5p;=+i6kWT|;jb#MahVGTkI5Rke(=M#~v>5;&2- zHN{-%7FvzmH^yuIGf;tGTU|#(zmrM7hsn76LMfUBWswWCibSzH5bQSy#?#Za=ZRzU zE}`)q-lZzSRV=j#ZKag7Tb?4iw$XNEl!F|ed~XqHJk!K%9||~1E>da74+rl$HmOPu zUhb_?bA+$naDqvs-i9j)Nlq$MAzPIw+M<->7%pA8pR-n9+>7abEbi*R24IFan@`ml z$VMVNxDaH4gjHw*-~+Ih+Hwqwf%~#N!&C4!y`<#NXr(g5$!j8AKo>bbJZl`88%E;M zfQnVHROY>%J|HY`q#DMT;yZblXfj&q(Q1~5j1{om5kgTn`K;C1M_{z@i z7KL=H@<_s)gKZp#oRyLUdxuuR+EC589Zu|w3gVt0pApMCaCCI`b5gSAT(?QblI>d7 zw6AWh7~--FHA+gQIv&DK^R+%$^ERT>e9hT!qP6r_TC?XVEvA4-LNI)gau^(y0D;rG zf=^!O6;Y#^)m(*Ac*fTqNb8b%WRfw*C!E)?UdiE&O?IfB$592AWVy7O*${?w!H}z4 zxn%O>U*87C@(`kLJeBW;Zz5SGh#Q+=T&$_{r~?2JRFUWg3C;#FoDOVGT~?GGNJgY0 z+LKeK&gT@9vQur^S5}f~?DS6O$W+QPG%HFma;Z%_%2AYTn&rCX_mh`)oxW!jv99CP zlsxgr2L*CmFCgcUk_LXRkSJ1AC}Ke8s2_VfMgo(^Z0#dFoC}gbV+1k<8@_yiH+|*D z8RvH=IO)jblTJvu1rAj~+%tkQS-SuT=L2c&j(M(X@r_I~=A%~$1smDM$;l<6jqJIe zs`5`yMsd}XjZRfQYg*d!$|-WB)zaG6c72`gV>&<>eBB8-!N(m)>N@`bEd333RAt(6 z&~z9+`Oh5ZfXolqAx*g$5K4w82cQHHI5`I=@Z%#V-m`9G*o2HXpw3vHdXtf!M1? z<)`eVx@7XH3h#!`&B@1H_BjChW79a!eAC813aq?gZkAfqjXk7n(u-*=V4fy&n?7-H z8-P_=LV<=I!#YXr?YnaA0|cDsJOj^Tj)NTZBo5|-BuC{W9tLtoI_Hf1r?0QzFl+YN zWpt~{+P5gTc_^tQ`E+SMXz1VfQqimHsox6qRanZ6UuM<0WhlY7Zd9*1Cgk*5b@J5s zqWAVn@D%AHeUj2b=_(U0m2DXUKu$MA9h>KE*(B{?T!|T(TjPI=-|$cGh}!Tz8FV!n#RvTF{bLmWyjN zy9GSQ8_zWADzjIAzox3ed_2xHXLq2BjTUgGxlCTv)+e&b*^gGejxF-xn%{t z{-a?N$782iM-)jOnQQiljI%nzf;PB$<&joNQb&?zKV|zD!j{rl>EGGxQQ5VfPg5=f zXF8R_%u~KZVH6Y8oSfqre1oYR0i0mtr>Gq>j-QuvSF~ao*9V_BxfnbQW7LfF1EzSu zr&e`q;^-_zO4x-_-lVN>b}lJ@d^)URu^28kR&T)>o&M*%d#(Gu>3P}ubx#RWe_bTU~oS`;fY^8xV{i;=%8pha;YtKZYtP7@$xHEJy^fz!>UyJ&rTm9W&Z? z3*S5>9o#2s)`Cp*w6oosn*OJ7H9u5aya7HQNqw@L59DJmL-2(i) zX%pKF^`zTVw{1T@D6=R&q0BV zp1*}mgq(?R$9dWtz7!0AM^3#5PsXdeA1okJ92}gC3;=mJ=R0x99_0GIGb0Ve{n6I} zn>zr&&U)Yy0U0Fo>4NqXUe4-1>$^_wQf@6LcdFfOew(4j-0CuF>#nwc3tHOl-kSEf z@r015Ayj~JPu}VV2|Ns(oaFJ;3RjUq9EJl2fEasY<^&Og&JSFkqM~Fh8JWpd4T3&W z7LDLY-s>b81zy0U9yLvR2Rz>W#z9svM>fHE_|IN+Y# z0h@oQTUf+yZfA}-j^H;Cc?DRYL%(xis0y9^m5pZV+RdJgYc0joGe0YD1Y25riP#X4 zMy$Xp?#Yr@{nRL>q;ybCc&CakZuMyAyLWho#}@I4AIphA-G#}FvvM~{@-{|tnay)f zoTE**bosett$E!v{nr^Z)9-Iy@6^FyDp$0m+$gPY%A0RX#WwEKyH{&Rw#d%$7mP=W zyhAElLFMTBGfxGeV1CnaD#}*k@>~@}GO|2zs>Z-Tk%gI~A)A|r#By8e4*`{$CUf$h zHiB`0cXh`Yz)}z4US;spRhv;hD45G7#HUn>DI}h0XK3)3a>%G9l14!zPq>9Kh1vm( z78R+gquXD_bMtv@-@GgLR0jLNumb=NMt_$$(p<}yP+MN@-S|l0KS* zRR}}bXsPm~(rWhV{{Y2r>dNWkEoSl^ejD2hog+=IILFK%R zT6c-n*-LqHe9CtHcj5`Y)p2btwwo5EBE+j3PjRx|ClZ-sX%$-z5rCCkhw`0r6^pVr zm}>Dgh0W{g&llU~w?0*ramwUgNibZwkQrJyBl9FKa}iZnl13ZEex6@hQIeCVPPCL% zVx1UOlzCiiPuPcW>Q{ji&@i&Ra4wn)VAzvJy5dNZZr+P5>O^+#a1r zVP0>fEY_L@g}bt&L27}b1Z>@dCO}dS36P(dD*|vpB;!in`e6YDh~$OAI3SED6e;ucAvXts)2$;i0;O54;+7_afL(zq5&n35ymus~{{W)3DP@yTAjksb2O$y(Zo|;&sPN(y`tYM z_><*rJrZi#{MXG_f~iWP($Y;Odp&z^YhOn6x<4(vb765|J@h8s;*J}ep$vo%-k>KE zA>2U(WmSVPA$KX-<2)CH{vm6g2i0P^u+%OgiIr{5v6=qO8kyqEs}{*fc;hk!+lbjt z$yZ=Q7uTN_d^seRhE%g6HF;NbaK;=w#eCOd04Hk&Y#ig3!Rmez@lJ_%2cIYgI8sDi z%uyuTTmV9n8zi>Ffu1*Drz`hTO1=e6VP#sRp~&S@OW7#FMYp2SyIFI_R?^Msf0&_K zSZea6%|e_MlCb&w;Zg`;HiLae=4#*cyj01Uo5_!m&{pN=$3eREB?x4V;0zqFDx)-*fosIx4( zec@UBsP6{EyMQj)gBjEbw>-ayehB!1b>MFh3E+)9tA>ZqnVr?4lH9Y!Hr0`0e3Rvt zc-eu*?aPLT^z-2F?G$xZv#``IBa`hd940R=AkJA~1R)9>w&hSHMFp6Uz=xS+{Qm$S zlESN&BMU?Kl&>n2c8hfzNh(S(T=u@&Ui)uvtHS0Eils*rIZB&qP^lTH)Q!0<%P7X_ z^u5#Fr+c5Ae;j{c4+Z=mo5ROQ_?hF4O2@>_tK8f8a%;QDV6cl#bdE77j_M_m0GfD| zgDP%$;#YK*MUUpY{{V)XFNb1ywEGo88)>-mm1LK0LM*JRz=cIS6iI=|1(YaX=uhI8 z!aWn?J+70jY1+eD>RL6vp=n@RR+7@{(&}igC%2l}O~g>m_JEUojHRQ?M=Wx>z2Czh z62E1y5onrjp{d+UXDz*y@OiSuId(D49FedI2t|-7A-HK&ft{)l&mUe&u$Y`Qsbg_c zS(JZvm07r5u0)|q6)jHpPepfjwAmavzt$&Kc%FLIFw#k`YOS@i=5dcKrmXjzo%=Sl z{B-d?jWSAh!a{&l4S;8y^uaw24s+Px4CBi6EiPD9n*Ve3i}wK(%a&dnr~Npe}PsYiSFIIuX(8;n&oB;#nxa*rdIHMMF^ z@6O7{$(m#V{Mf=i=R!C=_#F-oIXMLN9D3DlKUUSeAL2-LT`|qQ+y&M*V0OtAtfp9{ zC2%5FiS;gxYc9L85ZR^nbFTh_Ez8ZM1OEx$1TXj4oA5{SIBm6Qxd_JbfxPIae>Xsa8%=)mcI_N-=3Ar!4RM z%_Mns@<>WDbZgdxo9B%=)Rjt5Zq2H9_maKstLMcIJy~DZ-!3_sJfLYr!ITS zX)a!FXi~qjj{LmbyxM!N=<~4dz7lhU=O1kbqNb}U#?8m#i+t-&l_^_RmF%zio8oT= z>fRW-mfrH+n%{G^z1pLgULZkHAsJZZRbh}Aq>4nSR5M2xC-#eoFwn~?W0lz?iH)Zt z?lPVT865^X9E$xB@n45*bsJ0FI^x}BOPF@#qB%k(5(67ZjOf6j3dqRAdlJSrc;Dg= z8+cCh!&AQ4q8aoM~-E62%%mlP%x}l1eD4yYx9iPC&Xao&pbMn33BpO zjA!NcTx6uWU8QHGlHcgCc<9$^4oWp6YspGJ^R#a$rOPY3{5DIWhil@i*oN89m^+@V zdIR@~=dN%vdy)nz?XAHpq`?^&HVS71u0c7$!6VZ>GIQFwr-`?a%05xYUrxmKIUPCa zpGsj*Os`(Yp54#a>t8=C78OeBlBFiEE<}@QrE8^Yt^WY0r*x`2MM~}|y;|3%jq3IF z(CtpIr_7jna0n_x3=z=dKXikW-?t+)u}NfsysMZuG28&29l-I>2L*BmBN-Vgc;dK; z6pTJFyapsP@Nx+Zyyu`8&urjwDVH%g1<5=Tq0bl~mCkzPcIO>&k|{?4Nj9m@agRvJ z**Bur*3m81^wUsvIhxsRvbMTxx7T~6Z!^$$62uoqkAw2LY_M)Q!2>*W8R$UZQ)9Sf zb0K}I2Lm}9a(m>H&lunnoDTKEz09Fh#WN`cHQU+bRkTP6x#yQ+U z$s`a7$JFMd61y?n>_Vw5a@-J3dSi?d0Oa6x&Pf&)t?%t+cGdTnGSbPXCaryT>3uqi zQL|3`*X^a3oBseMuU3yjlJ+CalAvzm%~8kHvB)2XIn7cxwgN)tK;?6i4p@MC0N4i{ zj-w!u0na=uPV9Jcqn_=PfI!c`Tw{TryaQ4!pem5t&jXIVIXFC?GtN0aowk-1Ql{eO z`BPVn7bo)RXC~URwu{#K2_(6lwQEZ~-*=|onzOQ>U5ytOkQIJ&B%4P;^5GDk_~gGJ zebpf67HvlM?2~aUvKAvhG2jd+0dv3r066L22D8Ita2Rgq8;CxV-<)6(o~M(~ z9dp|~4{YNVoG|oh*{DUz*5+#R=B#>8CZ@JoCY57Fk;{@1QC*z>0C*}U=VDG2A}L}>P#KwX)p#?(+K-Db9@f#!P{KjE zx-of?#Mq7254?&emL@KYOous(c}sO?%H!7d)Z)cvSbQ|q z88++6DX3jVE85)CZqd=*J3meB;65GrU*c_WG^=bh+hOG5_fX!d=_{Sj6|`X{lqf=~ zr^}F(k-`|`%%1V#{{VvC5P?eTQs3*7Z+0;=9X`{_Wr&!b7#?pr6C%h6mO(Q{;1}fW z^;=74Sl(Gwt`q_ba#a-IWD$*_Pz;=7fyOb=>2}Owd18;u+zYEnqXe)RJm9w=5yJ+~ za#Y~`78fd_*Y6su8g4RET;AG$54#kj&3>1Ygg173KbXujTun*JH0t{&3v#M*QiQIf zb! z$qNA|&T^;a{{R+GE}7#001A^e(R-~oRA7lSh!jO}By6)rW{p^fbn?d82Ih7sNEr4H z2-&Wr9(%Kh&~T-f=Wc|N^I)!2oTgavFrvC}ITb8*X;c2rqlk=Vq^DmG989Pt;F^T0 zDL6Eee(P3COK6rUgi0>qq$_0#KJ%IeD3PkPezXbKWoeRb`o*dR^Ukn?4D^v4y zuNK_F;V9ntszs;W2{jq5Quyij(#H{t&6W=`;?W-nBVm71k66>Cw3|%v4d$B+&bHI4 zm1x@BNWN+?Z#ya?3-b90tagVPqcteI$6AxAc)KlIjFQpYdp~i} zwasdO7I+ik_lmSBG?C-Ie@(Tt*G{>m-00elmo3x%w8m_18Nr&)+DmeYF;BEiBP@s* z$NPP3E$v)e+ut_EntcH_6KRwVQm; zm1`L*MMtYrwf570(N{!ub0VXu2aX9Vw0iTNxFGb$BDqW5CK(I7V+u(lrg22^)GG_w=lry-q76cVn~wO6DO0hZFJ~*dDLy5QI<>)4&YFZPXPVw zFaVxM*Bp^W=9(Lq7PogPZE?e_S2H9H^Jfyp6p0L{k_Vcy1yhc!Yt3{^xo%^P_c3yR zcz~P&00)9`jzI_J#~hX%`ZbD=ETow$v|uqLf>&w4$4nA34mx%~LfHG8l)0VfAM@mLoy+(ngiDgum+tUC21aJcm39;Ui%n>q3uDz#|2DM~S1tw)kqUhMYG1{&H$pDjqj&p&@`u5|WTFShT zKtWTM1cnDHy)sWeyM4}2YVNgd2*l4MA%tL(HfM7#O97p|a0w&jIOhY3I+d z9C1??!7GgC9N+>^*YY0R(}+`!Gxf(_e=+sVe#ucs-)Sc9`6112TUyD~ddl7G-Tr9W zNhfH{Id0NU&*9m-C#Cvre&sV3NmmL$z+8@Y;1QAuBn;yi=Nao%XSX1+0BkIAgWCrj z6P}%NJ-rClZh!^A1D>aneb2X1>-4Ck3AE&bFnWvu#~9A|fCfnINJ1>V;)|+*^(`^oNWKfR7AgDax2Io2FxW;kQgWolK zK#dvWQGi6582}yF2aqswc_*PKw_2}t#^47~H~@3?9-sc9)YZKdGS3e{45N&6$-(WA zNY5jud{#JntW^$LllD$mbkkB=+V{HO%KLsX?4+FSI68Bt{oP$fFLkD+_bV?wZDYCA z_hADbb}z4TdlG*t!UPQGAdKLSx%@kK_5M}0s<>xdec-5|9{o;0_6NVeQCOG-vRi@4 z>D%T#y|OxSUZyIdjwYi@Dl*a>)=AkUl2_(WRcj=*TA5&?;SYJMCfvD|9CYj3)1H{lDx(%=RRe%G0?OFkgMpqg*E!&W-y;<(NQXZ#`E#F`9sxav zJ@7uDcdFr>eC1c?8^4Q?qoC`8KqO>f=c(vCinM7;ag9YdN&C`@S57VItew)b)!y3q zYt<(n)=t*tdbRHE_F8HFVi`88hb+hDQGmGPjF2!#^2gBPF}DD$gc43h2ns+02L~gm z!NJaY4rq{(jj9-J8ObC8v@aP7GC@<&;O+IrS6~SFcsM*0&VHQxnxz^}GlXf$q^$_Y zRJB%aEv{zod+Vxt>!|G|6q}DNmn*ie@v`%KYySWQw-6>q$yOUrI3%8f__2<->Bz|F zX+)b_VA`sr@nND4ZDZPIply;oSfui z2LyHLj&Z@_x#>`9b4faN9C?%Fw<}g@T3ROjlGXZ+P0BJ$M-;b7Hm+}@bCZ&LXBg>>`U-nIkHKJYMlyLBZDS$Pgj0hYSa0x ze(jLD7x|B|InP6k^&JPl)7G;!O-j#7x0deiLmUvJNO6{t%BwhvHC1fOCeo>tAQD-? zRdY_fytcHrxxBl$g4WW~7n%DOGEGc%@+s44!KlwbDc#OB2QRS!8QV zR*@uV;*F$OiI;LInW4abikq4>VxgKyExsZ{t!a}Yv5Mjtq=HD9SzX*)s>bL^5i&~h zAXZ*?{Hz0ko){e{{UN)RPflixmTJJ z+m%bSffME@3Z;Z&1&Wm+fggAbV06ZWF%+XXFK0SQMpRt++kWrDqdsSS`YXRHFQ!tR zD^sOSq>@P`?)2Ss?RR~5?svZd^-G36Iu1iY{{RUSc_434!Gw;Cx%mh%fHG8^WGBhF zkG$M07jVfNCkzPapz@?~+d0Y4uq0x>bNDviR+XsS!OP$3PcbSE3tNw~%d~(qmH~+v z3<|bimalb(Qb%ttLb1ve;BX5bg9B+CoZy^+>DIhj+^Z9LU2?dqMn2BTTXyc&m)74S z(ZhT2bm~Pyb5YlsH5V3|TH8mqny$KCoNlqE>Lz5f({9DCVw9PgUGPZ&03%VBMOG=w z5KB9=IE)V_)!Rz&wy|RtrFSjuX;tg$1rWqhiABg49uxu?%# z_KW%LBSM;vqiA5f{oK;Zdc35pOgG29ox`w@Dzir^^>7~5!lWZstvYj3aa3sY^L#(W z#7YrqKGBnRT{=TnzV6+U-u8OmMdy88wCZ}inuJl=Mx?Tp06At}02@I0a!BJFK<%8K zv_+|*!9`b$05URq5$gHtj^5y70=dma^KN4pEUcxlGEM_z@Oe2sfxsj2O*9WHC!ru@ z{l^7L2KO}%~ zLEwRoxzDBs4QRzNiG0}sAOZjxko$bR>RQQ>X1DoNXIPb2j;774D_0cYltl z9+gQsVV#^~yOZTl++3~uyv@B+R`lA#FEsTsmf8yv*v}+^kU{wdM125EjB0-mntIzr z+v#mAmiE&9Uy<|QinJxxt*3xn6BZw8s6aP1cN}eCbAUz;ItDm6AD_P!{1GOVs9nQy zJh)|H_DIMpEVyS5%0Vk2jdLqBLuee2lMFv*-^11sLj4eojp5HOF81)(!nGScF3&pxC~XDLnMgdT&jjZa7w8l zGO4e7_8PMut9BBPiC4oj5{KD1l>h=OB$2bD2y{ zZ@=c7%>oI(c`PiW8OdFzpPMCF0|G}-PZ;NrYwxdyKOgKodu66rYv$VRu9C?wWw~3H zw<+bwK)6|a)!H$&!=g(P0_6GolH(8Jr@%O`E!)ID4+0^5_Zo+Wki3xjb4+Df^mp?l zbyAlbk{9z!;53XQad^K+m2nkJJ!s|m&0IVkoMVfeQ=wiDl}0t>;-w8O@18Ik|84!#v!;9xRxV12aceE zM`O4FgXW$q@LKA2bI%R2X*`zNfTG|mZGfvFZdrno6@hGH8;BM3ZT^m;SyJK|Be;#3 z3{xY-!Q*C98Yxj&eCv;y3kB>en3BqN60C?ers0CPQmeQE0|3Nik_Z{b2?M{kS0}+% zgsD(;W~)KXN-*}c=g(&sDb$jTpsu+k8DFiQ%jIK=uS%;_o262mRHoaKQc>miZKeGl zwzi$WKXtDFOKU8ECK#5;BY+7bEI{XJ<%wLO$maxCo@$;HmKY@)O9jgUH(++k5EPaq zAG|YyGCh9r*R)9P=AF!0VPAX!B;_Te zSBh@RP04S0G_7Z2Ti8EE6Mw((>T7u9s@dvc#(msL~c+ufz3%3(*mjLS8Y5XUXW zv=E4s$9!$0t(!}f1P1wlRN7SU`@jKpOUkJa5@X$GS{R+Kp|l%r8irlQj2YB9e}H$B-H4USD@|tJDF%&sG@fXJXl@-$ zR)243+BCM7Tt|Ew#acO@8%6Q28Te}3Ua)%`sDm=B(pp?BXUvXwjg}@xz;cZu07uLe z2@Gl+xNkW4Kk>6!_-$~yE$aUOZTJgOk>b?+IeQVix3*iRQ1aO$lCeT$Smt=u3d1~c zT%zoSlD^AZPXqYBK$}zWba#5Co%*oRl$?WmDM7Rj~D?IJGw5{{Y$L8>FK4k27|5vf2E_Nn$drQ=Lpbu+frq@i3co z?A&y$oBiD5)pN=+wbv`q{F=S8xU{>uw7OS@=H6)`nl@4tIx!hd!8ap-9Zmtm4eG9} zqLU)wnVXT&91H?@&PUT2&rhX&9r35YS6U+tb)cFSMv4pcnlCJ$F~l%Rs;F?vS$yJ3 zfR0OpvvPdlBZpQDr5o4S9OKv@r{D#CbDCi4VKEL4oZ!@&yXI-fM4jjFHtnW~KUC57 zIBZQET^LrOQZ*qfUQljHJsV#2pWpJhi{<%l8wBz}IRgVD1QCPR@%mK(bPMnWEOp8WC;(9(o` zgprV^<~UK;0CSw4y$1t>-lujf$6+N0B3SOIugJOX;HT_!< z&lp~V-=W5P;1t_wxZO0;j>`7sz3*%5s<-OdGMBT8v|D;}DDU0hUHe>^Ctgib*@pckXiFvH6Y%&>CH`$mSW-JixLIFF5TL(LU!y!Ba!7IrFJ?lowV9lV_*y^ z6w0h5k}Lqb7QPYHtSnmU_gQ^IP=+>RJH_QB+f)~V3s{gU5L#A_R#kAMZ`p-K`aCWl z5knOz;3+&rX9+3Roe4@b=J|DuV+xRt_T`nD?5>X|>yW76t5(C+X{l17PIHx5O6y9p zrxc+2a?1LxR@U;Je?*q=#@-COx_ywP{I+*6%0kH}+gjc$cw$*1aN({QS(G$(TIk{0!E1)=GdeX3d$4=1rjI$aDYFL_>T7H{t~Z;5F*<&*Luav zAGAd7(aEKyrTi}z%ZPC-=2uwv0(Qn(1GW(RTzYV{({42z%a|=~tsoaSmp3vb7MAdW zpv^3@HUK;YMc*u^5yc#m5QT8hGtDz-VkZiI&YPFAsXA0?&P}WP(p;%p+9sRTZ=uzg z;#`#qa)N}EAgRI;b5d4}c@wjgTfVJ3y@pR0+hSI_`$Jm5mr}~1io}t~A`(jf04@}- zl!wgRxLv5F2?YG(w!4;Rl1rJc*8cJ1XeGN}wn=j=QM^P(u=%AD zPcvd5l17^^;uppd;t8L_`W5`q_dD`0`HrRVa^uT;jX8z0 zgxaxBx11_PM^1Q74QB#q1%4ez_p!({k8T%pN|juL7vQ=L1}m7~jO zN=gY!P28N5+S)U&Tv3g}PMtSNVr?XxJ{zUVttopqXZSBe|$LM9gy#|^tnXc36ov52IH6Tgx5zl6Ww zvHt)Niz;}_;GcuFP)G(}4(j(BKoQ^&Ld_^+A=*MQkLE2AVYW-99}4(~;m3uw=4qp{ z7MDn@47SWI%&M!hAs~$jZKauUML9aS8nij2-mXb0H@>f5R}GzI z__@MECA_$p)kO7R?C6w>|$S!tS#)^`_h z-0Ge=)FQQUY&SG@eay6h=g*g-06|arfZPK_J-^o{{V;lN2Fg^IfCxm z@);w!iJ4siBuf}7$+$c~f~~n!g>Vv7id6kF@PEV!wOcDdWo47dfQBAmZNWCN&&bF@ zVbo-T2w{`I(8#IM%rPqu^*BLtsPa+8=1Q`ala!+@W-k#%>vWr;Ry?a%G|0}gd{zNc zt4YG8CZGE}J}Fw(ZldLtHR_w!;dehs^)HV;5%CqFzVME&`p%s!k|JGCVSS~{9VB4r zZ10vSGM_66BJ-dj&dDEXW#?_YUlyx6HsuWDFwe|MCnZ-dLh#x7vB(3Hg+D3j_ZN4L z8~1`X$!Up`E>vwi-~va=2P6nCmkAcoho#alXlva9A4|G`ONdY!ySp`N~Ji;e5#yW zoi%qC@l7V3n_Jt-_4<>;w`Jf-RSNCKat9;jJvqTWe)09`n(Sh{xRQ0jXw!`6BcTJn zMtQ&;y84RF&@`{JLW)LX8%YEZNnn|5Z37s{2ZN7L0X@=NS|I=`vvpCF;eZ(gU>>>7 zbJMmmeq$Spcv`Jflf*^EB%6&@1o?GqNyYR_qCZN@a4piLtvE}OIj27;yYt7TtkS*j z>0&>#EYWY3m>elrJZBtd<_A3c;|CcTtF|vMJBcWO11 zob3dvobpNMq3AgSk8VrE{yfnIBip@`C7GyO5znDI57+;ejEMmb|%2 ztxtMXIUv>VO({vFlWs~X=_c0w74FY^b+Gt44OFd8@mET7QM*Ybl$-mT*6-wK_@~8M zR*PQ_+dcQ$g{Nqwj4)&|#9(l0I+-ulW^lG0>%Oz3Cx5*VR zca`$#IX`+URIZ~W-n~!i>39J{XJ{E51Hc@Slj)J1^vLg0tAxFvq z>@$w%JPx_)PkOhY%emqu7&}S)LjVON9<7oN0SrLz#b@0evA87Q4Ci+k0E3+I+dlQB z;qYEavD7*5!zegD#~JnNy*d-kMi7oRg*m}SQIdS=#xZTUCZiXA?^c%9+k7f>LMii2 z%FS}AD5dc9yVs+8tNq<}uvc=Ber>C}ZZV$O#xce}pf!fFlaaRr9B$lm_m`aD=N~sc zGBP?E+SN%}3pQBba8B>x7$XFdG1Pl@?}+1iGjkpRIl}|gpW-0)$83+TYrhtw97RdR zDM?9LPu_29IJXp@pPj913OuR`O-q_dCmv+gz02Cl{{Sye_bbTWTLKhivy;HwZ~+(t zBLoIGAQDgD*DE4qzBcCzj1t^n;4wJI1JLB>98?mPcKMFcl2m7)2N*mKK{(GD$F)Qx zRp5fn?8DHXfByhkuP(bzR;dnmFL}x_YFae;ZzXQ(%FoK?vE^_|F?_vjmGAESt@6`h z{$?@`Nf`q?fyU8+^uY%s7#*-GJ;Mi#9Ool}jDC3Q#~+n6$-6n*#&L!Ro)06h>Hh%N zV-i$gk8zyvdww}QW36WzNmGM$zB$JJslU%per)}C#amBYCHQB4Z7K`rh zZS?LwphY&}W?B;~emK?1jn$pYz8~F^)c%sJ>7JMhVAF*&K79oB04}bsK;} z%yH~<&pZQ>j9_Ocw=_CVvyDi`Z9_>ZJM%#_)RddNmbzG#FXI4s3=ha1Za%%M?wY8GA*Rs9(u()~Rs=|ycoZ4;4 zxTOWlu90rmT01>n`=?*xcaN_;Pk5GEeZY&u8fC*Bk^H~fcd;F>J<9@KSYTu(6=!5= zON3bChZ0!m-F3W};3k?Xu z6zWu>ry264Pu}KlmD$4l$-VVnuXcU3D7-B?bg1%1oMNM7pDUNToaH3ko4;1B?XQ>F zwmvVlxQsz0iRQ3nMUhUzaG+tyDy@^iV!!N$AlHHXbDrJ&H>w{m_v>qMExeKJ2hB*N zF@~6)5Hf>>4i_PSVjDbHgMJ}eYNpQLQn_{hqIco*rnS0LMT$ zzm_|QmPdH*5w}L{@{=(Rgdql2an5jZ7ice^ZoKoBya%2T|^S(|iZxn%)&e1PwiAG8hUmR-!AZs0ft zN$5u-EsT409eKeV*~B4J7fn-&m%O>_C^)&KqkF9(*X*r2=$X8A^0!y$wZB`wzk$m5 zh+1nGHc;xeH#h49bIavg%`|Nq#pN^1{{Uy5B?`D1IWeFGnMqpbbZgxkQr0e`(h>;c z(_xV$5MRZ7{&~3w1dkiZv(1Y#ENsk5F7y$?%B|1*Ptr6G5*tW-N#aZ24CztY>a$$j z>DrR%7Yhx#+FVU5UI8OQfEb}UTPve;_&9ry;qn9)Nj88=Dr8`i3mlyEU`fU>F~%|9m?5`Y$fFywS-xG}hspqM zGCubl9G*JjxGFl8KX~XV$421lT1hTlV%4J7*0uN9=xqr}u2|@|dneKE_IoArA=59f zCb~po$1bOZ80D0{;z=KOlffqkJON&x3JIXv${n`k5;4eJV?A;^4ZT4*$;jsHbfmQk z;3;A;zyM>82MTfkARbR%g1b8iz(M>an1PkSAYc*6$?NZ2Dx{T3N}OAYOG!K0$|}v? z&dq4Q$ljdg6s(+-+k~#KB%PMMUYD|8ec8{xiD?_O!Wh=!j$MKvbAq9AKo}>Ia&U2y zb6wt+Vur>&$+}cb9Dq&&sT|;pbt3?t#DGOs`!sCgLZDzA0(m2;$SO&|KBNxb)+3!G zM_d3hKq{aNV01Vg#ycLPJe-v$8kebN>RBezle5ulDK@oQTHO_%&25E?r#@L(Mc>}l zZ)+(1otI5j?c}m=npxe5bpv(+lEt?YNX|yn?URgWr&__5BFxaY3`ed&Jm)y$+nn*= zn$^5zX~d{^3?7ZfK{&twu*UA-bJL90Wz4EbkPW0@V00sb-0)8zlb>7?a4SEB$C;$` z(^jo$wRD#K_2`wfA_>%}<2s(?n~P3-@^*SP)jbonwz|>Y-#L6@_;DAAY+F#Ywi0UA z32<$Sv5(G1A1$fd5g2y|age$YBXEl(QJ4Jf@t=jFhT{I-`q5T+L4}fN0zdE6MvW6P z26Ylj1X3X}`H1Vxiz*M~&8R+iCh0byl%puZ zQnR;2tkv&ppC_Jh+QnY23A%Mxy?C#BX*j!E?7b}CGi%WN5b@80XVa%L+r&~fEOJ$q zLj#oxNM3{ZNcm4vtAIS_U$#6&f=U|L?Z^Z&;{*f1Ao1zO276=lN5>xzuI(qg)3uF0 zM4A=^H1^F5GVk-mGTMntJcG$0gy0ll>HtzCMEQ5cJ^;VcZv^l{A5We=yV5eOvc~P^ zA;J|a<;aYvGqX7xl11C@00fy`D-O*jQFjM6yIBaet`v5b8k;tvp?wclAq+NtDUCaO~Ln%@SD!`~JM=HZ` zFLTf>8WAyDifrdm$Q^=({4&C z(u#dF)it%QuXFW3;>YlxuxR1irJjlAb2Wq89g8KQ`jBP%FpaIRe% z+D7qjwnHkiN}@TyD$2@LcVh%(PdZJ4bgELsc;)Aqmk>6T%2Wj=Z&YJ@}CgsY8jYYG3pp?Y`#eWhf$Ci9IoCmoRD+r zQzhlAyFoB;y&VqqAxPtn7;(^?WOLrLUi4jJJA?tb1=tJ&&QCov&tO43_BHO}Zm&*6Mv%0%~ z7JNYP)UvdOIG6|ARDrnd0O4>j*#wb=87mc?cL>XP{m$M z7AV0%3UXP=2N(qO{LePw3e_u4RQaN+y*-kPQg%(bM(J7UqP)7=_rKEMIF5xWQ=tlO za!FYv;^LZ$a+{T|^=n0?_D<*LtkzRol3iLne|b(mUPmNzl1>hI%x^-SB0fYapt?(+V@vaX)84K^FLASr0@^N zUjkm-Y2wP_+Tt&<>K0dlC7(pRk~UKn^kx>ALT7jKCGt$rHsoY1?+y3mKg7=iYQGPB zS$W~zR?d558z<5(CfnuedUTMZB=+(~Hb}|2yv0U2LmbP9*B-M18rj{EWa>X2Pw7fB^807M|M-|7GOsJ2U1eQpRwuB7C&!;&Mv$BdmS;m^UVs+#@vHV|RIb zH+ZWynn&fDhDT9@gN~ygUcSKe!R=8@F)>C=Aprf-dB_>$@&p@fnEcqdqkv5hVuk~u~R0nCw%G;FJpbqPTe62Q0w=g{Myn1I)qg75YlrE!EDr!w~`>iP}rnGjp{)TY} zB~dDO<@_Vl>*z3;BgX?B&bWYXTx`p}`d zi0zSp000sOanJ$z^PF|T#Ed1_r)l6|WFAHd9Ag{-l1ETRdJ-oPfH_`4Jb+aGI5^{x zz#ii~)KQrpMpnU9&fYk{;A5NsMhGW410bHYcTZH6qpQBxc3N4zI_lPsVw#G(c1qgm zt@P7Jw_laXVj*J;(Xhz@xW)&d$pnn@PaK>QNC2~7GOotP{Nxk!jHx7$2L}Tqo(TYo zw19>RLHWD#K*y;7f!Dbi?V9ZT8{n;5#@evc^tixCK$i0ClvTF33i2_G%Nr!Kghp>D zNpi97jaiX`<7prPjz}3i z5tD@($mDUIh_v%=04mc=!Gfe}&in#NL{a7+y2l#{`5UMNj#pXuiQqpAjO!jN(k)!X zvNfz%ab90CvDy+FXx=n=S=D0^yzv6^Di#tXil3lju0I(vRJ%bbykdBM>(!nPMe&S+*DVVw$}S!WudnnjEd{z&d{=M3}a_d zNd-tEGBPm7gOT4E$vP~M+1eYK|TWtg)Z46_nWSmwJ69}iwEYMxtf1aY}=*gOElgy?w7jPwMY z3jB}zUan6_imh6b_G-2! zQ1+=SHz-QcNj;+XTWi-k80UF6%DcCH(g`PypL=mUXCHtk1##5cUS33qv6d^q1Otzh z07uHl@;$n6E2YzXF+IVFOiaMz?-n`Qa7bK{gUKCmFmfxaw=6PRy`HkX(>ND-}}cVCmlzdmP$&KZZ1_CtIf-1 z)F*hjy}etu%;GKe4NQd!@|eI2lw9<|1ob?Q=Y!4;Fe|dqb-hX!JHy}x#^8AzU|^oZ zjDht4jMrml;AK#u21(Tj!q5_Jc{is`~b0_m16?~?y)!+3yfh3E&({h zpZ2j{wgV@})vD(SN1h2v)g-xOrOJ{@Yh7)lO?vG9dcJR!(n?ZQnrcciusY zTf24aoz81dxw@Y1W@76buS~juy95j#FmiF%zqU2CO%b(A8C%L`3dk6RW+W@C1t0^3 z-Gj~sbJUvew9g0ptLG|&4U8$;MgS)pcC%m+?~#nRHP}a_X*xO|v|YrIO~b6s6lx^f z8x=xd1fXC_9JkI6M-}=N9vrWm;Nd~X4^C8+V=rji9wubbLA;K7_U4trMXiZDclOFTny)7re zhj!-9YZQ@yj$s|R`F`p#i4sk?&jm*St`N4<{3oN6L9xEfT7I3WTuM22veIwv;E;d< zpEAuJqB~%jhov{6E(`KrL>-iVZB>MzhT%s)#k*Av9WO#E!ve7NAt?ezFThs3Ep|RBTw$iNU5=Cn@q9Vm>I$@G4>r>@DvXUANu`bA2S0SIug5%uoynUc) z*2YnB56d3%%_1%pRGhOHz#UFOVS&a#$lDwJw<${vio!f6wB>3LjAc(wTeUxHH79M8 zdTV0}>+``Lpui44hzq zLBPr2bUyjU)cjpxpg}Fn&V)q82b{|sd-Ea%X`Pi+D@Nd)uF$Wzs7E8^zaD&l(`>EU zD|_hL2L#FF2;`LnWH>7S0BFf4Zev1dfY%o-%TNPK|N-a`KJkyKiZ<0LG=WA(lyDe|3 z)6~T0wVQFkVVzYa%Np0DsH>=?rTfUX;ag~+N%pM!Ev658PO0C87IY#Y!%SgsizMXYnp5pT=_?%0OrA9Dw=Y-`_Y7nIrytkcoMZxNa;$cYWN>)KM&s15>66ssfITYWsW@|cM>iMCFMiH? zt#sLMrM3LOMA9+3lw_`zo$me}T3TysZ*{H5Jg+F=kO9WgoE~yAdUofL>^bEOfX2fd zfx{9(UcJf2G0*}4>w;-9vm+VCat;q{A8+T+BCD}EZi55YJ-GJ38!1A()o8{oPnI^5=1WU@-*olT??gl!P+Znc z-*pz_@Y_z#*4NI&l1Y+D2fkYXXFT!&IOp)sr9d2QW(9fm_0Bo`^T$8ZtkX8qio+xy zlq%<*y+?7##(R%*k9VuuS;kD#AUVb|3Bl{rlg-6dg`r@XI(eM-!+4Mfg z^v^#`*OuyDKhi8wq@H4mxWg=MCnbX5f%P1*MgAho7-nA^Puhz}PY`Q5MYfY3qi3aF zNdjEkUC9=qaImydyO=KGirz3;*p1jK3~tS|h@|rId6piVr%I(rxh_v>I*^YvWbLFJ z-89{vmR!!-J#Kc8O{d$hL;}r6W21(=s zNCTdyq55Q;bjPW$7|}jE{5sYi8)2wwmfDQ*DUQlrB54VKC_IGA3xE+DIb#>fPm*^` zp=Hzfuklwx_*vlt;$2Si*(Sbsw9stjLnOM6qR_LSF579D; z5b~&wUAy0F;c&B&j^B`G+~IO>y4+Qw>%wH-x`;%_z88#lrS)W}36w=Pa<_WmlYO%F>4Y2H+m%c)6m1-x)e0?3mjmXXNvMP$&Jiq ziNW;EMZ`$<@iLu-(O6&x+Hi~jXUI4OaLxjPGmcBA2~?#CR`!(XsKu(%uKAvkvi5S6 zl2_8vQNK;isl$?`N%KziB&EvdK2^(YRC(nT_SP|j^3coGbt5$L7!(p5gkBpeOJHQK z3lKm#$2c9cT~~%-l30>ih*B_i6PSenSM0WZRO=DP)ICfCGKpiqcDpw$pjz&A=WaJK;Xn}#=!v`42@5nt*)7HFM z(VQPDyL#O#X?r_eH+!pg=ypba>YJM9N$B-l+F9SvcW2jLh??T#a)P{)G6q9-J%)OA z{5zahYG~wBvTR{*0)1O9{WUTt6 zn|)K;)Gl6V-NZzNB;zG>$vopc5Dqd4&IfE(UDWbs6(xq|!+*3gp@t7&4_pqs``eO1 zAR|1S<0FxemyB_Y`X0QR&5%g)BLG~hoN##nkD(`yxmD~Kk_AkYQEAeRHKJ0IZN6)2 zrEPT8J2hvep8HKIbs;)(dfCaZd8?&;@6%>#d7>E4I0O;Y0CElneNGAO-mA*!>5_g^ zf=d8F2aFu&ImpLupGsxIv!VGiRSN~f6T1hn#t7-u=f4%n$*35&G?|Ys6>_8y06Y!M zagLwMj8rT$f>v=!ySqJA?C;2x?XN_>OkUpaRIMA?w40ZdujSg^wCTqDMEHHC`1;u` zWM#6tM~p$KK!~$IvC0@L7KzqaWGWV9kz33`jNr0K@K3~l0%{%>yPHYXtjtVhZ?arE z#PHulj@WBwjJmL7Q2T>yWMz^wAV%5xTOHcTf~0Q1>5g%a_K7&hVhB0SVtAj!T3?Fn z1)iyIZ*YnT4dq*#n9-SCk{O;EB!@Dhy6prjmtcg+Ab+pO_>Ngqr$W1(OdQp!O3Q%YdFa8*^8BGNl?Y8+MQ@67k6iA*=n~}-@eHFbH4DcwBqev5w=+V zRUi$owRaRMiIpy-fXd}^z!SAd@IMiF9gWPWV#@9zNjPvmVYK|Gb^-Ma0LM&c^nc

RB9mFeDKo;Ig*hRDw4K;f6*B-MHmh)hAXMh*WiB7{iiWjxS5zF^pVg-O2B3-6VV^ z8u^A%i+D<^q@&8Y=N@FICCjS1QcJVCU9Po071q2tE{;U1B-joQ1Q6db$qEM_PBETH z2Nra#R9lo;`3L0APH}<2a!CWe3FLvtOnp1A_(6)pJuS=E$o8!p#;k2u z&OT|2Xk3IPz*Q%H-{Qc*$O=i^4g(7BtR&OCNp9O4$(@u*lqyQ&8n#TAxjsgcH0Wf|QYQw1y);Dcl?l zqn)Rq1QNCE<@vlSL)lk!;XzB@s;cr-kHdFs-9^0~jqdf_^Xp|WoV}(Fr?QhxxXS7r z=_P4dq};xKn(OVwZ#1bI7E>e&rwnjT++%`qjs|_W!N@q01cLtn%u>p`fdn&PoQx5S zbUcng&O4lgS4#0T8m){n-W78!OXdKnl$UDxgDLWoG6_9SP3E0C6`CKZ=#_?Y2n~axs;M-Pe-B-`LGmPZx z(}g73*+J@(`H zdgB#B^GCDQAe9z13@}IBB!CGRz#|8oh9qwKSI=i$KSwg9FRRLgVOggYK4(%D7cJBk zl%UpBh2m2l!kbQ_M> zJGo!G&qI(w8RQ-qec(<1019}cS@3qZVXx?S6Wd?u^T#@Qa+rm}07SNv5>;L+n8`!E zSrS1Mtgf;7ZT1(9z8QtxY(jnr)}O-mNtCdfEN< z6EmTiVP#6Ahc_yWqtDIGotM73W8Uqn*`;@V{j1|Qh3&p9_-@PM$ATkmUsbWPoqSKD z2;zfUi%PMMBQx&v80wcg6pHZ58fBVWVJir{^_;`;{l%hOX%^S3ZP}w`Wl!{X zok=PP!3niS?ZBRMn)(Cb&yBSo2Wn=*v_YXFisOuJ5wwi7NB$jZzgY7m1$NPs!%<`;ksTpL9oB|PB z&CGDo#Ni%mPD)iFB@9g-RTiYulBqWjcanDAF@jREX}v|4s@O*GeDSYA)N;egE@r8| zThpAQCX$W)-*OGBFMC^fqFI(uQBDT|fX^e?kL8Ye=e%tb9Pktj;Ozi*^z}VKTw)WFab=%8Z>!*>AA`-bw5y&8ZJdOuk9{9&z zDkMRk5Q1_}&`BKgoZ|;1=ke=W0t_9kk+=-A9H{7Ucp3M}p0i{P3wY*n16Gao>T-%vlXSnjC6KBkgCMq3MU<^!- zo@9%-n3I|C6_>*ggq{$!n^x2&wbW*`7uv1lu`T_@{oTZg8H&=)BaTSnkL4-0Q34po zb1YE#I9@RQsUXoKlFPxGB5HnRt;1XEn)#C2I3=4g+RJCB+fQ)qHME{&%$HIfn<8Zj z+Z2AUkYPA`hOqA!onk4^H-w`j&U32J!)2If z3>0N)PNnJ7t*mcSRejXbS8=q}?tb%q^Br%T~@$cluF9si=;>49}#$`UGZM84x{1?PUBO$x3^n+NoHn9tz*Fw zEv3Jh@xZURpb?g2g=2)d3dQkc4=ji02eS~^JP$ym4?+OXA6oKrUlMa!RGkdAKT)96 zqt8-Nlc@&REamL!)2O9uS*o0=-`#zBFx(G;P8D-3MSOK-8~YbE3_Mn!ySj{PRD@oa zsZrmxm5+AuAID#belgtopNsG9>{tc<#?cIVMv)R2j3PCP);R77D03{2a^Gl>2_7^B zkojZ0MbXS)ovN!Q(9&TQv6&9$VmH27iDF&Zix(v4T3SnhV+4l_ka-y(5JovXXQ=PT zzU_vUc`SewJ4wJh4^6~#lafFnXCRZoCpG!*CmWbzBUM(bR=TSqKeVc8!rQ0BZOPpRwNq~J3Gav4>L!iqYP;-_a+SHx>`gJ8K zO7?b#BT<6r+e>{?Ik6b1sYlt$H7dpIrxc{0E9Pm*S-z`VJAC-~t>d@U?69E|5*7d` z`Po?`B(Vf2DBzXln*f3V1Jv}KZe8fCiaz$-o^Uqq=RJwr$E|XjMu&T<>gfb_vr5}q zKm<<4DACXpBJC1JJjnqoFjA7rV<+ydeF@=@hkBjH72^O5INd773IWbcr5B9-)h^qC zP^{Po>2l1Y6<%vM(N4l zm#WiE6Yc0?S?z}n8k5)(32gfu@y)4UlH4Vw%B~ma=#woeQ->p5sVZ)(<-3e08C~7`$`-R)^geCl zV_LMSVepQ8tvF3J?5^6np1;nGes{%ne0D=evavI0Q%}VA%8t&i; zI1A=VP#i8W3!;#42pf(8C`lR5i;bs2*1bH#6*?8#PLruoQmHk{{n`?&<0&P#G*#Z3 zJ2UBUn5xM`p96-x5^7eQZk4_@Nj0O}p4Zo7^*>bj$Dmoq_IoRo0zdU@YLQyPRy=LM zfW!dEQi@O#5-4XB_2aBoII&9rJ)mtXr*nP=7U}o=Fjd84QFmV%Y=@+u3uuh}yvMoM2+S z>XowGEZil7qgBEQw^|T|Wb}T~)2B{pt8SaROMTh3Nn_kBV+C$oIJr}l zy4f|#){mL)eUaiXjZo_Mw_1UX*6G?tGOfW_&|NT+c} zDyM~-bt%RwUi_-mtIXScwt`ZNj8@vF`n?v0--vuks@`47M6$4*gpx3JA&zl|0PSED zAi)G}8&AqWIl6y^@AX@yoz_WFfIEuh0(O&@0Dwp%_={tlbDHk_C7_$jw71k%lt+fz zrFO`!k|ZVFmP{y_eo(8HDYTSa@A_-Qz7MsE-3laT6;NYMqkqa&5bd;Z$0T`Saxe;< zDXx4rNsYx+r-{aWV!j%0_c5DyYDz6i3Q4(Ly;NH0lU;Jf=hQOy8dz9GSLIyksY;S< zMP4-;<&@ryq~g6BOz|HKcuq@Q&ooeyi@G$*$f`n}h0tUY9PcS36=n#`jJX4QQq%#4 z&ej?1t+kCMjqtV?Ce~?Uc7_L(#pjEe5u;=gO9Xoh`E8afw0j1rpy}~hOEg%rmO#h^ zEXaJ*k{N?;495-k2z&zJ#?gno{8ES9>Zjij($B)OF@?H#8ERr8uiV{)$y zZ?YzmGi_`^D0es0R>4Co!c?gkN);6}(v>&uprE76$r!n)$D>kqeQb{kHKmTN?P}Cm zdCBsq^2YM!<#tz_ZY}S;wp$kOJY#XDNogIj%OgqW%`C4fN}#(a1epsvNh2(oD!3R7 z#N{^oXMnyUEYX{5tJR5QfVf8zv3XZ#1ItNRuEQtp$UicIHmGX&uIE&m-s(waB0^cz z?%-__IZ>Pp?c92fOCC;ZQ^US3OKob`?JntPqV1&ePSz!?F4Er{r-nBRbk^+Ylm!)w zWGe&YQqQZ)4&#TdJU%jMC^<$BE^C!3LYs`Dr=^ryjnAQ3MjE9UFJ)3Il}1%z7|Kce zw`womyU`@Bo%TPv)qL@iJAe;82_JzSef#ms32;ck91O7>^f<+e;a&T-yXQ_&tJkL%;Ol;X}6-C^tPIEC3~)$M*T`u=A$W1sPbAp zEZei?U2kOj{-%BN6^V%C_aqFSaez)wLU_pQ^fk5N`54a#$iPBRBPT1jI46^X*WW#> zp424^3$Y~jz~pBmoB};RQ|Zp8i87CEr zpUm^eU0tet%y1Vx$O|aCxs)}^nWo=%z`#x&x zwzu`Vy6Zk6SinfzEI2GrOyqjwrU>Mbp131EVXA74aO`B1@K}tu1Ym*Oj0|uFM|^Qk zznGRNWFTaotO&+Weleai&pGLVjx$i5lrbzwRlww)a86HtIr@>?t$bc95mKa+=ZjXd zbk`$xx5cgBqx-IVm@GUY%%f!5Nx3An_im%5?C$#OqirLX@m1*95=n|iS$xMB8xxYm z79endu>^oe1b|I^J>l(BP`mMWh%T?DOAS8a^5P4Lrh@7mNY^Ax1NiH~x(9`|{WkkivA5Kx z*Q33XEkX-v6hPLGZ3?bM#zb!u$bpt3eCcpL(9@-JU~-Ci&SOrESUJur>Yp@YDJ9D8 zr0LbWTG?{wle?5lL|(32!8U8QM7F3ZJ@;DAnfkeS7wz_^0AeiVtVteGMnmybq(xYj7>} zxDjnUN2)!;N}7(LcWh7EZLAR$}we)BQhZ@TcQNqe@u>AvZ^$&iBrQ=QBI?v;Z~qC+CGk1dV}AgKW1M$$2~;DQGupvR<7;p<7DSmXjp%7zR*aB!!z{cy{V1 z)1rHMH^(9Zrz&xfGC3y$jz>8pkV>+XOU1e?YZ^V=5$DY@yF?Rq{rgD;HjV1)PDbCE zPb>2U2bS>d%4pJ?E15#87FdJ00}GPMo7n*&R2&Y2Zyc_DB+U%4>_=HQuSMF}-H>yG zfC%TI9=J8bh;^uAV^V}G#&4EWgrf&eN-j5i%WW5vUo)mMRjIhbZAv_`=1p@Zl6=cX zwe)Mb;J*iaeLc+g{tne|8~Z~=pX@PN$#F9@ZdDpcmfm>`u^F-^fVe4f1hGpSmz*KU z`1cFjX-#u)EXcA5nt7ujtXu9vhv-y<1U|>y8=+hrj#J||hBXfiYc@X;yg3YZ_qu(= zQ0r0IiA379rrCg-QMHc?ez3yxs!U7C88R!c@7*sT*8U`DUlV>Dq;?)>+aA`@r-{{r zMiNv;c}pKPVtE7-$G9Y(hgFqDpTH3k31#rzFQ^9B-7nr&t}UX4xwe!Y%cu6wN*FBNJo^e($W=Cx92Rt zkWo%PP%(k)I<~VH*}|hKaTwZ50CyE29$RiOJL4qa8uD)kB=BgkTd5_|AW$eE$@1eQAZQMy#(l2(kbcgc4v=+)YK zbb9QL<4@COpUYy0V=y@Yw%`EhLE1qB-@j}z^$jOZQyPVgCeXW!0nTtRtW}Ojrtj-n zdPbil7U>+3#vK)N!klAhBXI(+%wIMe1$YN|$Q=G$ z*H3owNZ^J9w_pGtFU!VoFh@WByc`Wy!LAWZ1y)qYEHW4la0-FW3GN3YkU*{BTb4~! zha&AX{hPhDc2|y_uY2127}SGX^0wXW724Xhw{Dy3+=40OmEU1(I&qN9y*TTPW7K34 zdUI2x^TMAn9I)I5;y}ud4(+S!jB<0HwKks{d1|sm0a1eB4o)yIPFUkT#~gv{PPv9a ztRvh{P7MFCXCz?fBd0=YB99Dc z?YKU0I*&&=95Li=Ab>NUKu(ECRvas+W?bc%ou&CbfdFR%lY#CAc_OMtO{qz8uXVR% z^@`H(T{}LDTOAcrn~mbR6JG`5rmU5;cUrA=-%T_*rqSLl%L{$L!Fha+2nRni6OqX! zi8wi9k|?c>-Mr#PF6v1HVem3T0nc2ITyz{)V5{W}%Ej1@3BlwZoe$TZ{=F)*$m=0c zM9SPS3%rE~1RQS0dK{7u(~8+vtr$*(WeGw`E@`JPdGhToQ@>qZ*QJFjcvS7pB`ii!UwNm?duxkKN+oi#2a01N1Tl@vEX5psD()lSkcC?r2P6V>kTZ+{ zxQ}dL5;?5fg|*mNC?w}_RbDq_9AE*+$9|dVU72Pot_rj#7gCi~??zFVB{@e+#-vt~ zy`GNl-5b>6#^toJv5Kuv+C|#da*ec&wNC5jqWPbZ+V6(7jR#Pg$56AL+F>NPj#MFx zO7bGO#&E8wxk34Yf(nL5Rq~G(cyVvhl^_lKhf-BeRN=@Y1xOt3$m2VUbJz5r;|7Ok zDQmdxqTC~3?jjuLeYy4j8)-269&+GDykf z3>;*S*YYfUbBrX^lZ{6nS-~f2lU%9&pLRRDb357JqFx~)Q_=j{`AaCT`l zxl@hai+3rzq}{c#@=m4UE6CZAA}Z0oLlRUF2H<%GfH?=AF^*3LvZd4KYgXR4k^zu$ zv}{rVE`5eTPbYwVap}{7y3rEdr3(tGwov?` zenK{YNb8V5!2}R-$j*4H@ZBo4=P5cemAO)*&gUz*w;9uIr6~7Jr)ybtNSQ+ttc*PkgG~mXBkDS zM)R6YCbUqBPthIHRPibtl+@uUu6f2%oMR`;@j2_~beCIMBiq%ib&Gw##IljK4&@FA z1A-SM@_z1kZ0FLkW=p*)bYdPzS+RvILBZq@c+Vgc{?{1dyt7QzuAz)c9Ofs8fO#h+ zK;_gCkVpZ(U``Lr4hZ%AFI=?Nt(9MIoUxGcj63chD@dPszdWM=9Z$+|!zb9o<0D_# zLR4zdt1CWQe9E0!HygS9YB5UBox436vpj5lDd3ej$x5{ePnuDLTY_tq>nU?2mbXo4 z?Q1)-)x=h}s~8Hhi-ZiM1z^np8-s38z?B#TjEv-PFitA+8+}21xsX2f`-}k$ z4;VQIAcAwoFmOf>9eA7^G1TKn7L_^>jG*CCFqKKo^1ga=o|dw;xl_}9bmpUttxl{b zLNZZLn$S^{-p>A9|q5E^MZ`-f!a0$V=EvfNgpWO3h-3@{(uq| zyz@{xo~#a&b1a5fATMv`HW{LbENKq%zzyK;Q@ASaCvGxF=zoa(Gic8{C8fM*13M>{ z2+zz;&B53)Wd|hSo!Ke~uZMh3r=`D*b-Q~#sx-UAwL}cN9Sya;(v;b>d2BAQFfhSe z2!w1;$@r5XrIorUT@q{a*~SG^G+|3e}?yQ*2|~dE1cq?KINHy_tTCv` zV4;Z4Pf|`X^J9`o10d(19V_E;6k|`_lBMsYsl~o;cS|=FcAD*b>(f)}s#I};l6>-N z@Kqybn&rQmTB|44?)qz;2A(e7IAaC8#z?{@7cx8SQMlk`i31#W$4d4u3*6fLHdfTE z<7>|n+(jk4rh7;V>GszvvRhbYX$!7YGM+5z0>_t}GxZNmKB{vOg?JTX#lUyJyTjf{A4i0!-rz4ZcE6M6G zXq`D_m}u05XiBFqwUnvToaarqqnsK|zNtaV$)6p1G*v@TZY*@~mZ%vH$}Z z$;rkET$6*5?~ID_>fv!%xk9X`CmPnNHzwTFnpSdhNyTsOCajj%GkCf&oE6llM+K<#U?uw2uM2j;`>ks}sv87$XE_ zPIv(Elk;TYWLH)bDXmpFN}9x1mn>s1dUV`Xl1k~nbz14@^}Dv%V>3Er4B4+EO-t^7N0ZL=z;xkcKm*kcMa z$j4r|Bbxgxw}UYgt4YqRXu|8AD9TWpcDx_D+*)0qDz>*<{KhwlY1K-dImSzoYs%u) zxpj(MvQ~Qg*5$HVb@9C`!Insf^1{)$VSq+PLW7(#t93a9Fvbr&Wb%I+VT(oZOi;!O z-D^75m8V33f)}>6m-|yuVyJw=iEn2c9EXgC1N~r)%%*r%&E>Q<6%` zHiV-qC(~G~-|amaclLc6uCL?$V^g^|vuU;(Q(Ks)dF8cdy&@P{QU3tZq>>31DGRE! zi^u{bQ0@2Ln+DUos6)mG!1;bwz!?V(+qmyvApXr(R`6*0&Z8h7Xt}tyuvaeU%!&fn z5;#En0P|vvK~)IHb_|abG=F1SUfV1YM!~m%wX?K*!I&=6N#FnvL5$#xiuAY&ouPMD zrY*Ml3L%bnb z_K~}eGBQGyZl0ur&j4qGM$^X&Wh;4-vh5tI%ku!F5}`@YJpd=3Gn3Qrv3Qu_)ME;i zD76;j?Rg~znoY$?waq7cB)i=n6fpRT({Z5%bd;wS?G0_LZ|_S@RkVIv9>b{EB-Ta( zgkx^vIBX_cAo6jYrw4Wb=jF(6E7ZIVCaWw$7h$lwDSIa9T_1t2$G z(?aoOuB~k`xM3FNWr40Gg(H{haag!Oq2tRmRCukm7d#yP#A1S~JasnJ4NZLs4 zk&dIDJ*&aQ#p05oHT8v2Mp36tNzsFsDwA-Gl#;VnwAU@}`XdIh7+R5Vo2Q75+^TL7 z_+HE6zV7Yy-0@9g;ichBLj;#Ez*ytLvo|FCk`(NCEh2-CI*Q~qABOsb7JhHp3rc}m zp>T~N@Dz>TJZ$T@;Xzd_HnXQ%`pU`fXOUx_)njtN$tD7VLE5auVRsS$Vn76_W6o=T zQt+kqlOWYJyPH*!1gi2um?VM5^qHoN$r8sbXMA2{k+hMJ$fZh}^Oi-G(Wv0-udJvi z8dsZF_gt4WrlWqTTbWx{?PJiPnr1MCH&zj;1ux!7Hr4MNb4Bdl>z3(k^+@;)d@*_? zJ7JL(56%oujHDhx7*U^Yr=AWf%6NY5l&%~hI02Idn;kG*9FD{u=g^A#PUFHle3CZU zk~rkcPZ>{{n6tSE*k%Ni$#6+#8=1LbhcAKQia8cIl*J%nu*l5BfJkG7G6q&BX?7|b zIL<4V6TuXgZtX*;bOcGc~97gLGjPP|fzgK9CHD!+4h>lTz+ zmG5PC-kS72K8wQol3uF)mT4mxIT6Zt0&qGFq%iOIcsM7&q3IqMcPZw>GCHs#GZ952 zGO|6fYU;gV2%3Kx=wjErX0*d@QFLs!^1r?2*K*3Om)Sfn$FSP91%m)3Wu_f`0gIJeVCGEG7x1lE^BN$`A2aFMn^OMJVyQM`W z>hY;$1wbv9$OAle&PeUY1B?M){{ST#bR$QeG-n7eXHI_7^KC)%d{2`0OW)Qzq>dWC z-9bvF>Nv_Va+A7gxa-ZNV`Y78y4u9@>-v54ro!r2(qvux6uDwh`E0>hu`C3QyKxJR z!9Z*+c&Ch`l3Ro4%Krdn^3@<&b~8kayLzJ)I700!;InfhpOuLf(Rixb9UkiYS&m7` zv_m}cz^^p&tYK176h|SYeq`DjBp73r2s7t;{<~wXL2Ib#)7)I!$z^SCF`74t1>9T2 z<~Usd*@GV0w=+wU2_9cRJD6he3jr$6pOS=?CmM9!YH7*ExYVlRqUAl;G~U`Jq3Pfu z6;=|V3YAloQpb~vTfPx*zzwC*rQAPmkYrF4jJ1g<{^0?jAJ>;0OPfB zLKz^FI4r>A9(!cuk-#4=MtQ*eYtkK zf&o7LI3Aqw_38Q5HoT28O9DvjcYh&{F`iFP!mG=F5F9XKa!A}1VD;pTj-4^cJ!;)( zNji?SV>Iy)_PB#N-2Ass?)yPr>)hx+TA@yYC|^dw2YP<5Iql0 z$BseA2N~y@zoB7_3grB%p_Ghc8RTI{U{5SMaa@+IsUQoquNdPXnxBheP3nZXIRxa9 z*BJ|w(;$Fy26z?En&$K|PNyu?FE4uVY2V%2@ON74<=1BY93~d8vV!MP+m`xC%S4r= zl&x)SwO4yRO8TN0n6p1zoP&eZj(VP)az;mPnc+2AUu((%5UC3Dxe>sO<&+67A8b*5 z(n*p;RSL|+Dmfn|e%c?lC6DaM;C&xcO&Iux#(qB3wG9`++I7Xl*=d$(VQD1#rnjiw z0A-bJ6s#6dStQQaMaljAQ9^$Te0lqMd~o=YeQ-6;5?|Tb%pzND8%fk9veT{Mg%`?+ zTuW|B~T`1FyPi;;Sl%Fp?vz7UEjX3hP7xrA`3$3rQ?BQ^g zszP-j;HMe#yU#|`QI?IVLrYnxwbdtM_ZQ-?#$N;YHttw89eUF4?ER$$wv#j$^G7rX z4IKL$V&+Fsf;N!5yl?=L&bvNT_-*1p_*h>Nbq^A0QR+6DHKpa|n7Vw|VHZ-pl0~<5 z)a@g6g756Hl$z3AmqIfjjL7H;SLB9`@gv3_CD!!q2S-br?Pp)q?R5KVbV={7E*|df z4b<$ry!k{C$vxzXxBCoXM(sb%PJxj*T#d5E5JKFUm+P1pb^gV=O z7*VSj)tutor_UD~E>@e1V&J#BrwI!j?LjjV+bj z-CMDXZ1Bq&4+|M0l0C9UTsdguTmp=wZd3KRI;yousa2^`Dog7;^Q%&$jncGgMaD|o zCYx<5x2?ihe`gMCGEtIC+ULhI<<-)W?C+H9TN7NiW|r#x6|+9lYGl=6h!N3b86vkk=&509E6#X zH4*l=z(0ys{{R^LD=wd{Pv&d7bg`SrN~ZQ#jbgN$da0eH0vTV;l&n*;xRGt|jF8Ri zVj!to6HA^LUv*M^u2P(R#Va{C70BC8^4jgI-54m!vQ;G2HrFhn%}vXla<7g$IIVQ| zZ>`j)SM_W~1ka^5dS zowaXU8NvNsq~q+}{pP0Ro4jet)N4;wLm4>CK%?JqMXlA#aNC5d$MI>`Z zPJZN(yDQ}2p=BY16+Z-czr%V@fILYiy`hn5qH32*r{CzwF}Jd?mh$FF+WlSHR4}jE zP)8H0F_wQX&3R>M=la6V@uYyA=OldSSe*R1&M*K6p52Zs|Ct(|pUDgu@b^WXRjVVxfwaPvHxW7KIvq+MFxYsaB%qaZ>hia7jrg1!UHXt?v8T zEpbzo>9{GzuPyGIw=cTec5Q2KUngyTmeS*r($>mLb;32Bw28@l3!7-><2Cu1JqT5>O zu*-2gh9wXzD;=g7291W@s<>bZ0PiLdB%OY5_>=LELw!F~OFd%3?b0a3alp?pNt)#c zlRARC14dm=3ET-0xpNyCp++^;N|uvxY7V3mlv7P{7;BNS1gWZ!&pcNtGmjCRdgw z%#f8*N)kyeA&}r#RvLvo!OsoPR+FU`g9(?`|YOF!0oUx)&EvM!^1ADHb_0dhy)S0E`F?eKJJ0lK#~Q{{UP2$r;toU(033-T-{dleLBk9PIp^zxd0nN2HQ35Hq4y`%jF)plS$f`n|Co6MuzFEma(Sp(1@$BDMg3WPpj1@V?UiIb8cO<>l zcB7^BTQ#Qo=ziMI@sidTP1jLJ44@T~StE)j4Ve}E){v+T8wW#`-iM5gS2KV6L+H9r zscC0n6%Ochs(ki)q^P%KOz- z)B+vP2P$~!v||BrT#IX$uKs+tcVQ0b)EnEU3%_XrRiz<#Kx1WG#6y_&nH<$s%J_N} z=|5{V3C7MpRE#2*yKP-Z%&echuG+oQb~-8Jac-qIQgKk`=9*Eo?;A=nib^fr{{VVd z@1@V)iGDO_mKuGmp>AK;gkw6Skq|U6nSxHtE>GmEu(sg@uy0O%) z-APdKGc0rNE4U*@(iUPq;RweE10b>%{3g@9bA3LQAD?|8xQX16z3`}tzEh|zAsHJ_ z$ar8_ZDMn)qI`ANG#6+s^%mNb%jGn&hKL+2JjBL140(vQG8pb=8&#E8pB<58^%XUT z$J2Es@Io~FrztD97|KywS}M`%+qX2k>a2dl{{U@?N+Y~^47gwur~?cLY#iqpBOLVURxUM% zh-Z2-1tCET$s>+KWb`=i?0B!tJrnkd)Z@5VrJ6XKBP|@QJZ`yQ2rsp=7XXGp21&!? zpHFYv3Pq0D9$9AdIbxC^h86h$-?_k3hb2#RM+fG>K5V#8A5O0=T1`ezk*9fEm9Kg- zjHZ?QNv_w_>8GAhd)19Oty-KMRCd#wSaUViJM`7<_cgDG65>M@!m14Lauj2NF}c78 zB&p=~Jf14_H&)(ck}HM;oB}}~crfk99P_s%4l`d8{{X`G(BiZcUEe^nIyTtSP$h^X zvXDV5^I>vI?+O`HJMt@MMEKEXsYb%m@ovzZfJj_&tG9_b<&_u%o!J9$Ae?bI1zK{C zvwW_}Mly@0swK*=2};p>>bGtAF~d2)^Triwu6gSpX8GkSC4Y9!B(!}#?N6-kuF$I% zOd{?hXdI{=TP_ATB;cQ6#~4#yL=ZX>LucjSkWUz82ab7MXE;2Lwc-{(7PmU-_rW8b zfN0s=i0&a`s2VvA!Mn`djD@!Yv=j>@Z8u`RE3s(Ni^f|4oGY$Q4hd1_HvsXHdJ&V> zxWBTLDY@5Dgl9G(t6cYb$#M$=Xc@?nU_CizMEa5kv`4oK$&6Oc*A zrUibB+-r7vy`=H3#a)|na7K2VZrVr(kO(8tV-@k2#VZuHzSJfW#Num#upPUSBAD7Z z7*r&c0PqPTxj#q3Zx*qW!`F3VPNWoR%8Z(EYh}u%%Ct*cHnm!xA;l@zp-y;tUZphe z%OvG#a_JcLcHY|AYrWWD+J9p(FeJ=L8Ty z&M+8qI`fMC9-JqI<&3DxqfS4(Q);5+QEko-e>T(Cvbp)hsMYp&bz?O4akN}HT6)Q@ zXLOos&%Nztb6U5D;kmbI5UWc9vj~h~mBQeZ;2rE#cNk-U6lZO4cOD;EQdBSqNy{l! z^3<^1&?o?9iT)5d0|LFFnRj4eTO5qBAdF`Rka)#8ZYQ6IH4{T9)>eEeNYM6{>NIle@EVyR*HSczkt8)2f{6O{AYS8j*~nW|B{r zNnNGA`d<1UHQo4TTSk)*IaV#ZX8as6IV-e*lb&;doNy~UPHVj~@#KnW5;BaQTCm#6 zs%hQ;hSL~k~oujB;+9Z zjs^h=F}sXo)xKhSSC>;Nt44I*8P#;6lje-0PH}$ilpBvMqh{N^@6zg5J1FLItd$H! zDvBy8w^nhL{u?<%m0tQa%X@9zoPM3I{g&+#`bQRXvMPxQ@}ZITA!4B9Z(JXdU2z#= zs@N^sTv`}FxV4dfKv->&SR8^uS5iX)FhLAefzC+l$z5oTaTD4+5koO`FiOO(#tcH8 z$jl0Vc_(BoOxv`&~2_)Z5qCvZaT1~Ti?P?(~63nHk4qOx|E|B#kX~J zC$02bJvxDOmz#mZN*p7OG8hB7e(~fM8Q^0)*!2bS zFt`l6TbbEqf>9KafLbR6ZtKC@yks9h;GRQZVV+@!!DdvUM-f3+jX8eqoi)o=68Cg> zi*4EbR^@tgXb|SZHHdXRl~^Q{p{jDxB-E2qi?)hME4O!|J~{Chf&5?K`+JytQKQ4D zXxinK^`Y1NIW5ew*&B9<$2`{3CTt{GT`tlIozOs@U!Myy+{fm=6wz!Yxw^c$yS5j0 z%8#a6T(oGPZAnp~o@6bSF&sQFN(^l(D3%$b^P{iP9a32Cv>UfDOL07L7BG;EhB1K5Mt*v_@P?P-m@cm4&~E%Ut;cZ6H55d*62%R$5s$Ms*(v7Vs$CmbA3h#R+V~)j2 zg+6>GIn$*jtmPO=F77&0id>Lx{{RmC)sHHJ;fkpjYYoJg56Z|)sD!gN@*xZ(Vbw#l zOmGX6oHFPyf?KJNg4Fxevq zk~7yl0#ABF;*CDY$P310Ckwprp-CD0>yUZ^q#j5I10%g)vi!c3ROw;x^yAGXA8AUJ zNJ*&KJKauKdQ0Bs()!)MKBZc)#$do|k2ODpi(l})jx!Z3bb zMgYkNU;rL~V0FMgwP9fSgFBeW1bpDCoDAnD7|%Ju$T;k4n7Z-an{15C$lMc{er4Q9 z7%HXQ43GyL4CI^>T$Zil#hxaX7g*UtC{VvM9Ai0eo1yBw^N>K=PW7|Au9gmrD{@zq zmEfG5n&#zP+|-n8({1Xk_3LY5vikL<3361WH!iW1qW&G^qW8YmSGJqnz2fac7!AXM zq(Kw!UZiXeWCU+qZAD_hFg#;Bg?@B+t5};>)%9uDV0}winoD(5QMS}vZM4n=fEEb@ zh4O~Y%lok;VVmh+6Znq)_*_D>Kks9i7^!9d0A8gJ%&ZxK+{KjTUvUfwW(VgDH^R5i zsoUI0{{U!;)@a?BhF$882>Ds$cHkBna(5`*CQjq|M-}7VKCKEdr7m1Flw}$&O*G{e z+`XpOTD@9n>i0iG;rzc3g`qmN>P9s=ZB8ot@~gJwwD+FQ&dTT3J^tVcxCJC_RtIq#2f;4gvvyH|-<<3FtHZbDqMx^2+$k zH6Lq)bHm1QRGN(GQmHCwLhaR2jGF0dH1+pWcRVaj3K7LlrBx}yP3sn=3CXphicj82 z^j7U>%z6#)iYASAZ&y+Q#YZGIU8xmg5_zmThdNh%4;Ap>yW zmCiX_@tgLVJ4&mSV&n{if`=VJC$A?R^T$KLt#jehvIUprB;*aClh>mzK|J<7atIv< zrzefB>DKq_#V%LPO}}8YYH6o!8`e>Za@S2vT`U}DdU1+v#ibcqYe?NFr*&?s-*%>2 zc(Ur;goash)j(!YGC}_Ok%BnE2cAjb4zEf0qpRuCCK)c&lbCKi<%b8UZH%tO51CHj zL0lZE#d8<_8~w<50S_2l#p>veoLF%Vh~dT$iS=6r5DKz11e( zlTJF`%{AY@)*cf0p<$?q;kma+C7BRxmT3OZA&_EKF3#muLlDZzl7t_asaE|Q_;up# z6T})?X*#U-GS1e?6GL-7tk$;>eUT$f29T(lIDw8VD009<8zssJoK^tfP`J&4{h;R%C@q zC<>t`Y=y{njSKjHi2fLOr^3q8ye+I*d9NB>S+4D>sLmQ#n1np#R+c3WU5d_nl|OuH0+i8H#XOy@vZfp>a>V}vL^EpJJ|u{ zfGVK2Gk|cpJRYYt9gm$H@r6blDR6;_KQ>4ua7utOM*xlpD_-BL{4nvplCvAlDK6lT za|txti0_KVGE9>xjweM-0CpLemQl19P~LnrtGK3=7$u5CUCk7+JaQn*v11IXj)A^x z}m@2Y#-jqv~`-R>-6gkA!%x0tW{#AXX1E0A0WS@#7$I*^PQHrDaIdrr5z z^CgPSEd19pO%yPs2rfAix0f1-*Krs-S8m*FB?fEWbxRo%IW9=p$O7yu%NAlrc?>-c zRe%Z!AXh!5>F?qQFD%3h9JbP;k1%ob5X~SvT!{Lk6^xSZF~betKaa!vOf@_`D)@)( zWTfD!%2eUYDBs%C)k-l+?pJ#1wpWZMWkU^46ZfW+?JCe!Efl$J8l0_O-7S8pr>j0t z*KJnTITV7R{lmd*gWM?rM{KYGvBpj_HTn zQ(jptmUG%%Tp0nlTwMs(G>o};5uhX%VYC)fqn3~Zvpy4h;9W;exR%Gsl$2-0;tQ0C zfM5w(9u?%03Iap2g4+hx3-ei~Pik|l=|>ZWp%^Dp4bzM&E>`2oHO!OMr^{qt>M@Q*gRhjGScSD=RdW{H?40$`?@b!OVc?1B2IqbAV5AllXVeSamr6 z0Ozki{eE-$ah~1lf;yNzOBMbc5s(NO&r%6IkDxi^3a0v$jl!|^%MwOZ9=|9DIriiN zF;iO`RYoh>J$@PTwyGrKX+19+KP#&rS4#xs(sFGmrrMRz2r33Cf%djNp55*Cx2B^%)LVamGg-i3Fa<>HLYT z*)AE-@ONNjlatd2uRQnf#(LM6iN;IaUh3y-nmRRo5=z}&p8Bg_P0qN`rtM8GZ4!Rd zMAg$)Z>{%N-^$JH>sreg!tP*M)D|VOar6TKU}q%c41hXUE2wKSENDwH+(%vDk%l1W zIqUxb*TzP9{c`?Tkw_(ixEVW+4hT8JayU4^7|$n?E1lMFh=L3pWXF=YC*~VZLxan8 z=b^4ADLRST)=T1T-$lP=Yx*vi)b7L4rzJW`r6|RFitA5zZm!DDeSEe%iFJ&`3qIeK zMhU>cLQZnSZ$AA1YzpIbEldKzTsH7ddX*W$83trN!g_Te&QWtM4eCY*thf)K^# zHwSLiaM_IhGo40DaV^~Q7~_FjNgRSos`n!d&PX3BsLQqx24WQcq<_IywRmp+DSRlf zk&U`|d*QaPs9ZdU5f{~U)Ra16Sy|s?tfLF&NW~*fga{wd{{Ucr_$PL&9!XVXI@OhE*{Z zr|i^Uvz#aIHw``1n|d{SJ9kXzoT=fX8$q{GxkjF;)tj}_ZL8~~rOc9A-3j{>{>+;0 zli|2NHT+AleH&QTJh?s|_-gLff9*>x8hdAsUl40h+^hpnxxX?jnrsk;olF7CZH?K! z^3wc0doHzYJ4kVDGfM&nAf1T&N`k{1Kn>ZLDcV%9BiXe}c^S=-4=B@^$FeZ;g$js~ zs2C09h=frf!i9`Q3hfB0zCGes)O^@3Y@xe`Sb^HHM{~bxZY3TxLiq!Njtd4Wv<6Ze z^~|p)rwlb1RE;;vllHTtB}+vXxttVTNhPzC?RzA-QO8ogA+C2u5vbbIF^W-(akO3K zDLZJKoSphNv;6|_M~YVaN4K?~YF)u?3^EMmB-;`u@JoltI2(#6kPXZgi4Eqz5d15z z>T}%O+C=wI+$iEX9o-6n<%El4WkYp@XZRBlvfH;>bj3eCXqFq1lX^qe4;7 zGhAe*Nh%dv%V0<>ps4!0LAbe%BmUEwT?iT8z_Z{ke(}Q+K?D`f56yrE`B)VJ8c;lVA7_x<7ZSTi;{z2gcqLpH$XvbgfP1 zghb*gtz9-;5hQCfL=DQ8K_nR=%#!HsvqJ~)e9&~^LWI>I~ot|eI6 z-b<@Vgo_=#@kb@P+_Wh%jx?0KTNFYaB3R~6*Z%+%T6{j&?XDA4veB+jndVNC$88|R9QBD?Ml^7o0Z{0E~*28H%nyrH!3ktLBO5=cjp9wU>;+2!qT zESQN^41}n!W{x^|tTMvG4~eYSkc6X3q#J5K0%{Ug)8dS*v{Gjb>C~lORbf%VPAPKU znu}`A?M-s?Ut7IfdOnA1Wvq>FNVbbrj@_@LvrBnGAwe|D50djda!oeZm>)I1eXLkB zD@a+7iujkqvuXG7OKw&>U-hLo%ksNiNr-S4hW%PeSr>Ly zmnj;kB$FWzwLlz(bAsm^O9fi?2&99=dT;L;7I~rh004iiP0Bz3@wwEEqi+O-((ciou~xIu zX>R^Dx7QkZH!ed#8hqd}+ZvKfyJdi2i}MhT%|GlGEWz>$=lUf5&(6 z-Fz?Aq0?^U0`E<>iCQa(7C$kgj$4Iwj#$AV(}i_qC1Y$b*wye~{3Im7J%E*#9k@j& zdzBa{{{XyWB9cHmfmY6U9PSPEMXl_br-5}V24z`4v-L9$(tNWuwx1|~kn*z`9!QfL zT@LARqXY){FT^(oOuM!J0G3Z2DQ`N0a(1+=6`hDbD3SPO`Gzn7+)taBVk3ZVGI6I$ zapqJMojJOban{gLk1}rVH%TomxyMPyy*nscQj=-jR7y{ks7hDSrq%4U(Y4j@YFA{u z47qc;j@M(Ba8#(sDo7+8^zF|XucU4Dgl#s}Wt8!0Z5~;V@Y}~82_*0UzyzEC26|)U zJG=Isw8E_Ao_H{s&P1|A03#$4#|qg5@)fdhE8g#Q6t}Ptf}vFvbI9Bf4XRa%Ibx%5 z3~)nq#wldAAgk8IR;dnuby9VmNhv|{wxpumQ{5-E+LrgS#VJCK7`eh#9R3)&H0@;6 zZroB`(!SQ}XLy@c21#SLn8<<14ZDQ_NqwPTgTH{>ZUEDS~bB#3VuV|Bu6OGjU)RgS4 z8*gnMt7{^?&Da<+VFAF%=s7sxxg>L*qaT)OO))Y2r+RYA>zrhAF@gs}fyYp3nuL)s zR2U(cs3!zx=EvOuy5IxAC)22}8*D=vE3*ZGBP0+$T>9kn04VK_D~?r^+~Zn|})U3_;3Q?Yp?iy&osa!6npDCZ<{NCzNe?`JiR9)=#RC_0s4?V&ka zTAtFQlZ#3&lpT`MYT~tQ=c`>M8no%gE^>@&IJe6groHFOyLaZ*+pW4jcGp;rXv|kK zHs#8w0h=FE&Q#%xb<`>0o2?p) zmanv_R&k4QyyMKdl#sOF zuQk;B6!G(w!6T?Ew2}VK?r;bh7_V6H{{Y2bDU(jrBn$S5AcEHBX$)|NcPPP}Z(`VB z+cqfqK#S&g*~AYDt>j9PM!~Q-RR@!Ta7j{6JPaI;qXU*df-cYoG0V9bT{p()a;I|0 zGm=2SA<;_i86MuOaQV&(POF_ejx0taF2qOKP7hbA zi%M#GILazBjg!*pXzk@=*lzq!E4qZ0Cc_QO<}p=5a7wbsz>s#0ps{5+0I1x$o)^|` zthGUPZ*?uSa-Q_Ahmt)>7)5UrD;K_S zTjhWRc9FsY>;NAsF$J=v`rSjrTCSULY^JxC8R5ey0OaE!F$#qI+2?`ALE1wq`--L` z6B>B8gLri)`?RWiO0jYGZ+Oblp)2X|>3vNk*lgC%qXftrm;r zx%B*gFtOG2s9{Y;TWxw0c3i2AAreNVmV0)Uq-An}N0R9>d7GEb*qcwG=srI%*lG5j zBDs<8B#L#nY8C3yoPC{kswZiuQBRRxDYqu3VyqDHjzbW5JDGT{JI7Z8RgUIIJDe)N%YZ>p z*Z_i9;~_}SPBO&f2h99G;){(YcrC6THnEkK+=qy{K|RD@zTX%!1&p&agsYIO<(*<) zdmUp@M3J60j(8auGlRH-OXO|J9BlxN$fbce+(QMrRO!PNPmxn;zjKmKl|<8uf9FdF?Iu$*7;{Y11s7n#M4G1e4W*~99X*t6XF~X#3vLC$8?zS?PW3e1<9* zicq~jt5chjY1EvhHoe>5>{;op(!P&I?F%7nHg+~nagD>DQIEr?JqN9M!Zu=zx0+WZ zfI(1mhQMGt=NR`MxVJN_fXd-u-M7^iqSjQ1cnL(gyzQQH*~-D2Rz z!y&TS$8rk}*&_AAE6Ovo_gjxEPgfUhc~j`NGcUCJW>u5QQg{RmWml(8 z>jX zN)QM>z^M!dKi>4NKg4=UTP?<=XFDk@BnV27PC~XMMN*(X9DJ@pVua^uK7DL0Yr?e` z2vdbvxzzS=_jK1PN@-1}^0u1Z=WSdle^t%&l~IQ zD1EZl+Dox5BfESZ4@&F312qXiYb#;5FUfbU(Ah<0(t17IFu2q4R z+%%qK192<6d*pv`Dbcyru5>>DX_}1U19={v42TTQ!d1MwSsr-=USw{T#N(BDN1hpe618Pbu3I_74&^&ZB>AT(!*wQ; z)zpnjv?Yk8h4%BN;{`e{GnM&cd0I*;5nJ;s-)C))6}UGt#I}}pcGFw1Lc(DLkvkwJ zHu7a~q=CEw$jh|J8wQR>SMuAh4{QDxn)Af|2x~KJ;_COrJ{V_$(hIwZrxzDGos$OK znTFqO&7HI<6ig(&j}PXC>%2#%ct&kR#h`eH#`<-gG^nGv5jC{JJw6%l zc0^+Yaug9Fos0|l zyLKb5r&ALweqC1;PYAuO2gy#XU)5^HG3861oFBh6Z8@Z*ou1zl!oksL)y2mRDzLpu zSd11b_V;w0xuq3vvncX5tkZj&ZShA&niDcSmiI9YmwJ8dZ57R}+(&Ho?02fnsVOR# z5f3im0G1^&yl(uB_r*JzS(Op)5GfIEcL|^2Tn8Kj!688aa2tRixcD$sPpdw&b~VQ> zG{~`wojX%3=^XLg3Dzs9U7`mA&UEN>NEQcL=Y2!E4IjWqoX)B>pAwCZJ0smk~zTa#Tqg z?L6afINEXm89yiraxzTT_PV|3R{iv#=OjLGMsv%^qklkR6PD@~`p$0+DLlQZ*ZjQfwK>1m3JP^;;qa5DqZet;DX664t;ppTo{q^o ztL=SHo9#S3dvabhc3D>&)xjig1gK?GyPP%#IpmHHI#YO3?IXmfLxs)>aI6?~0e~fr zc_%Bu?_W(?cw)f~uy$1(BKclk_uQpaJdGTLjpZ9HhWT(@F74U+u7-%kB-@oZRyaAy zAZ2Rt_BYs2Dok$p@f`j*2PQM!k;s}B~{62?F*)5otEx z=+Lb5sGdoFZ!oOruBu8B2#+{lmpLT4atob}VhB7(;sd4GnWVAP{&a;-@-z zX;r5hv8C;1qNi5;wv1e79$PDG7)f1SYR>wY?_)u8PIsN9K38eRFO?-H7p?ELt!A`W zL_QtxG%_Nw%7qHqAUPSwH~?|nXK@3JuUhpDKfrcQu@=gbe)iqO9B^<*Q<6dJ)1N_F z+6J8PLmRLlsK!oCPTYciVaUepw*ZnzU`Vdc$zLHs0kSZ69izE8LJs%@zy7L@8r{L;Rj@=Ir<)cE{PKCMdgglbfbc~#PqejW5iC*wR4!CynH!9P4LImFIB2|(9Ijd0E}$_T zHx^U?FUrA7e8lBQ$nVQCv5H0n6O)2D$>*N^PSAQOI6Tv?tzl^5F)A)q%(!xQ5)i9o z{nAeIRYIsIBR#vLO9?x}6+#$wFLH(7JsOcsEmRteC)Vv*X=9b*YE30QB~RX0sU({B zRpnTPy+G*u&;X7%XRR%ItVGCip6M%Muj=2Efbr=V(JiZW+`I}=auo4P_sxiqV z1;^u(PkuPBOc10D6ftKc{N(jMq>kX{0B4_myK!3XjK~x&?5NHd6OKFXexNZ?rvQAsq*h5L$Q%Md^W?{rYPVt0I?;f->ao*I#@8bwJNq;}dE$G2!^QMU*% zKwp(hmfMkn-8{TZF!gEsEKTW43orI}MuYwCk>!m^Mm)_+PkvHMy_-9*wQE&N>lG=( z+AFC}q$L)gD_7H*ckHa2XN=EpE8zLfY{LjzDI|%A`CB`bs~iwna8#)qfg}Pu%>%`r zAn?rAmr!3%CA?)rhP=9P$XGLB9$zdtz$i+cfO$cTWHfEL@Qv)qW%6<(2MgvXVoB_! zQOyH9_eufxU zJL|dDK>1K(O{4{hd4D@Bqzsk$!j0VTE*IzxFXI)B$c_HgxBvqp=2bm7kr;*;$PIog;i%2ZY84??OOk1F^75%W z-b;Po>DKs*;GTu5$t1d7wRvJ3De@s(Ynyy7-~uJXzm_+4K!;?6moE}K`BmS+ZxHx@ zP+dnw*6pCsyIXE<+rt_AD}ePI&K*O>xr9F_=qLp_)c& zolaJ38Qbgy}G-)DVn z{AD~9@YILNp)kD7fAE1eL;QZ%5UNM8~ zkU8ndABpQEw>VJBc_W^jXOJ_v@%RJAIn8sBUoVuTf_EH}NE~1(z#J|Ien%ZDrN!wX zg1I9+<+pGM$3_`E4w$bZy()^0+?;I{$kV%;cD<8o&2O(R*44I;vy3L2=5b3%-87db zpA9tBqw6gd-P*po?{-<4jx&-$X2>4fap}MS@O^40yKR{zdyc0d05QSOzfwWLz|UME zb-70*f~TGbTmVleuTksTppxZsRLD30V{qs}>JMX|uNCw4ukQ0kT5Ty>@>Y%Zw%5I` z+P$5udNiohjis#P6p~5bSIcJacJz0({Fjw&e&94f@G~m%zJxjJ*OCWbpUaZfB$7yB zK!vi%4tIQ^bKe|t21g5y4@$eP-H79XOMstfjJM6#aAAY$4tY=;B#iUG%G56cMF~&~ zd58*SOCBF1XeS_$0m16a$<1<2yV^2wcDimU#q6zZ?yu3?z3pSG3ra5$Dv@(mic0!L zrOSUl`gv+-#&Dou^c%3e9mgXCa5{A(9+k~_g8~z@kXT4g4l=y|04($)^)=JSlM0i8 zkf8KYg&jSA0giLjSDX0PSr!o$Pb|vBMy%uoK+Hsp<9e6cq?G;Q{Oi;T&Xyj&BE32h zig#%#sML4WCv>iqUETD5hQ6Dci@dDkb<4f=THi~h=h>s>?O*m%@K47tk9u#!KZ;Fl zs`!7yULDaijVD1%SDj?I(=LtsPj@u(BnVB&*vyu!=%B`rBt}cfzd(FrCZTDf+i5-% z)2(2zg5DcTD@Z4{u!hdYG;tIIf7#-7i54afh%*|28kSgKZ<1rzB+{f?u+mDhx{@J~ zZ7N&L$sTF~hm|LD7U5fP&OJ`=#l9lebO`TsSSE%w62k@6x17h%gcV>5Fe}dry|2))MT*gQc<&YB^c9EcDtt-a!p;kMMXI^(du*6lw$eiE_Rwx=a;!j zHlMnsD=ppmQj@yU#-E0KE#g=;32rqTdnEf}sdk$BDU?MNht6c$@)IaY9d@&_ySPFN zWLJNCqIezPFDqKEts1+%q{Ty{MhH7o?7|XvWmV&zMPrbSysds@YX1Np{CV*E#jw7! zbA5AXZ+Ngs#ut+2Yf`F`h+>?rw8l7OX7bS)Jh-Jqh1y9Vhur)>@q*9Bwvg*OZR6iK znf&M+q*n&wIb32@Xa>qb&mTl-FB zYjkZkwYO+h;Z-?Pi>i}Qbzq{`Hkve?ntl0_OXa7hc>CfF--8ke<|!FmtFz>iLv1BUXr@<3b(i4}jlZ(~ zp{L#I9whL5w=ijT;15>mdN?<^X=?cF28 zd~xyP#h0a_{rpkKbWOd}-CVq~Myks0=Gtf#eASWuS$9e1h~;G)HaDH=Um3N%KJZ;# zJo>a?R!L{qjlw*|+V&A66RBB5&3);=8gs%u{&9VY5)h?oM7ra5FzGSQV*_omv&7(&e10a?Pt=Nl7`v zN%peaM19ra--uos@qL0$w^sUf=aFwE#ii7+JHYJwqkAV7?NL3 z@VCV)T_Sj_ZtsbXKix#^C6t|vkp&Dx7C8hUQtW0s_idg(u+#* z)wdMtH1$iRns?a!Qq~&J#5OLB1cG?RU3imY43dllggG1|s<>`!xkrWh?` zxS4;_*9yB?)QNZPjglzSYO@tYA=t)qzB+h4JXviPyA_-;-mSX9C)n+Yp5h4DOvJDR zg8T;`F_@!EA@j@4H_p?`@i=@%YBKh;s>Rxrpxi99QJte_9%Stq@}<(LoW9 zI%)~d>I;%qgHrZ$lwGynJKfqY$Hrb2zcN})X$rQ-XC7NAF2-^Lt{0XJO9cQncNM|F z74a{`FESl#Q&|*)4ySao2FB$kXJrC0^C(1M6-WdPfZ*g0tgI)v(e+6qnIu(HEPVNq zGOM=GvP8gV*-v4+n^-RFw)Z7X|F;)0BDTSx*r;-ah?D7Nt@~^VEFF;aN}i){+@BBzJJktF^G(NOuE~oUfU|z}i8`Y#RFE zY!d1SByzcE)xiLFP{={S?sLX44;=K46~ijjF)^n`btSygl;YJtcb}Q^-;t}kUi#>* zs^1vhE=3Ed!tC#AYoS=05Ok4>yE(ZEYvS8 zY}0&(F)HM!<2#sSbHJL88 zOOYg@+5+UM2Oute4o}Q5cmt9M-~nFlBNs*~W%w*2l(}OR@abYILAzbQipzhA^Y^ux z6>DDl8C1p5_hAlda;DmIcWYZFr55>`?$UQx(_K7sQSf!_;EQ#X5>5#W2*~I7 zaz0~`$2jTIo1@8n;rqnX^*ecmyqMn06G#D+s|h3CIUg%-bj#y&GWlWHdu^_dsYGLC zxDst(Ga{%YjDfvMusgXVDhz?&sq}U5wWg_Hv0uiGGe)~)g_JyC0?W0$w;M|_ZKND9 z`Alp}SETq(Rp(n1LlrD0E0!~bIxiW4)5Jni4m)P=sGsCc6HtA!E;Ux&l(T62B9PK28IQh89Dhlz8?LM7r;frO{ zN-`fUzyHJj~?(! zJf3gSPQDK(!qcmW!6{dor%w>!2Rlh9b5-S=yj|P6O*{9w=2M+qMR};ktx9z{TxIPd zqo*rT_g^e$rrg`>rnmVwL-0$@wA)!vaC4Q42#c}%xCAPb!6C>1Had_A2U_pK7xwcc zQfX3|C18=oaS0025;g+M8*Ia{E?4IXzbbH4{oSN^6G`y)>uU>DV(g72#LABCwM2Y3 zk_v#h$0sfevHOc3Wcp3C*2^E1jOarnd1}s#gt%pF0t&G$!39ea04z;;IGp~4D)IIh zRqW*jI8~hL)0CU#-}&CO9ney zMRk#rjC|f>pZk7#YLkPTjAuGAokdN1zV+Q%OWD~+n%C`Qdi1iYbSF+2ii)CC)Tzoe zX9@DIR@0QFQvAHG@#l+rb-Bh~co#?TwWy9rJhr!m@Y60I$&#+v^&fvO@$-YRj~bEI1*zE*l6}1Yq#{vT`;MhP`?G3*xJ+Npa)Zwaq6Z8eXlY z$rQHWF4b)^?hCunkuv06Tih}U3tMb_S`z&hmEe6pM1ppO7FU&wzHEc$3?d7Fpp{ew zON9eFi!nJOxt=EvPX_5xeAOo6;ZmBap*efL`&DBJLhUQrNjGMs%2{M*Ch2lV4=JfM zsrx4is z)jL^EX51wR zx?Ho2r6jp|RIMi^%C~|}+wEiHt0y{+6OJb7ry0r-=qRxX#kqZq+O zd%o;>tkPRQJL>lZM-Lahl&vXh)M0BUWv1@ix4oZpk!HMENfNUs@hQn1dI5po9f83k zCz_5qF4|T?0FRTF-Jf0#;e-DG0XRH!!LF_Jp_j=kw>iNXCvXe~6yT0}h8g5!bw;&{ zI8F!L1eeduaBz7$PBWec20afn@O;wdjR;DeHMDBie3n+fWRhz~b++Wvs~TL>rx;6~ zR(JPw(`OfC_0zYerxp_6%_=XLq4$+1cG7c^$p8_Pj^o!j&0t;K-A6M(1FVQh3>1a= zS(k7mE(rrFM{YsJ3wkuSHX&mIEwX^yi7dhXY=#e#z~gH9dsPP9f=e;3c5BE_m{Q%F zX<)}{{LD9J1a1Q(rf@=%FgX>)n#L-o71_$HZ*PiCOWm|+&Wn<8Y1>=vt2$}Awc{lk zYSBt=6%=ILZk^Qj@1uHkvsQjw>weppL2B|1L4yCC+d*+ zBgdZmDSs)9gHJqUtEH>3W8deXft}>!BU!wzbtX%d5L7>}7aZ zJoZ-?GA{oB!Z}fFyjc`M6_;7kjIjk@m_CR`R7%SECP`X%UmB<8J_H-Yb^R zThw&RE6qnri7jo~>dg{LR35ae%~1u@L3F!_qG5{c)k$_3SIOjja2lKC~z+hv6 z%kcPoZW9Y-B^s45(xn^JPB-^l8;Ks1LOW-dLc*9R?YnxGR zcWo=)X*$KcUS6Z6>K;^xw-)lpByA)zhw|=VcX*|VpL9>USY_S0jW;u^D9KR4&m(r@ z&H-GIxxi!2NCiP7LK$V+s*e#Wq>zolz|L`k4?lYxE;G|2yB!R+F^xP#s#SGnqlc|H zN1BwSWy+N#oMjgD(QT{hP984{UY$x6l9fBVsoR!zNpsok_LN=wUiz;Q(e-U(!Fr@# z7V)$&M`1j%+vr{@4|0oi(n#^8wbh$K#e*`*GF(CnppaWLOC(W6C$ue@F+(&mZ&?;M zL$rX&A_qrSZdqMeI*>86s{%m;)bUu+FLY`D0O2Ht2h#1Wr-pg%)U*~>(?=|GOh{q1 zR7mp`2NOs(kbK4s^RI^fEnN6&YrhFBQuv-2WY2|fQsnKL!rV2yntthBFLekk%3H>6 zQdO1~c1fH}WNExyMqx)G#Qjq;#71~puv4U|RDRl1)My-4_~o%+^(_ zQ=*$v_n}Qj6s0A~ZlFkHx8ibono6v$*| zMTjYOW!t*EL&l#FwF}$$v@Z+FZ}n@9f_u34O&x$-O6B~gB;2$JJiBv(UGnOx$vW~E zv;08uQ+)tX=`}FS*d7pb1af72Pjzn=|M=4Dk4ULC0P608w;oiXSYUN=OhES zp#%ox5C${B85|sOzYW!;p=M@58Yl=3++|9hxi}aEanJj`MLG3*eE$H)%v%n2fhq!e zlaQq288`$UxWGJDxq`!CGU{%fXuM5DQ{Kc%GN;Jn%kI-^QjAsp(z@?!Sn=VHoaItp z%8aEZ*Si@;H(a{7xjS{%-g+DMmvae<|q7f|4<2T{W~CnpCP^yF69tlKQZ;ndwos7bdeCpk4W%gYXCl9Sh_i>a`fUb8(9JT59e*$*a9D9d|yHd)YVd5f*MSk5HtXl1?xH>@W!Gb5=gtD}~<^ z0&qssFr$JA&O3F+P67MJiul_1<3_s?Xl>?{ZC{+pBe0N;qsvAJ4l##3j+-`+E1bLd z@8cCA2CX9@C(D76h=G7&3QDr00H{{zlaDG;aE;{CS5HeTw%xT~beu@p5j!avGAw1$ zNEij(RfK%yer8gBOfwm zX2go5<)l9-AQDOs3S3*4O0<)XCe~7tr6*^~;**qAVJrUtdtQyJ=sNrlnqy%fYlp$b z!70;F#MDhS%K54&sK(Crw=;fPAFB4A5b;&*n%(IdG?&mjG;D38w#pnMGO{(qMR<}t z6NTL(<0Zi_i@`oEw<`LtgtgSu#DS)kEj-++M036OS)0&-SMJCW@EkiUAZ@>!`Zw*X z@i#*cBzl2X8y&5d=H*gTwIgP2h*eG%QcWm1%F8%j+lcAlxmEw_Cw zbJW7{b$mS(>fvDPD8?{!u?faaDBqSV zcf@)h{1j_cNfGsp8$`KdAZdKRCO5`ZzQtq?S3BpzPuBu%8}|i zHM9a_0;R>CtfBHm?I?JzrIi3w5ucG6)D6fa3ovgcd_|kdw?iL76yZD4rwW)wHk*y& z{j?oPK8opGrmVL*r;=cyNWiZdU#%yrv%@ zJ7g?2{H00~qfbKpr+yoFf_q3;T!zQZEFw2jD~;bXTK~?SbYgSSbNAHl9+%lyXcIL?dWDe>^Rgkv-0NeSrsne+I(u|t4q^MH%m2J!M zI#i;nsP}hU`k2$o@~X0GDz+XFQInMCDpObG);Do!J^7;Ew!5{D9JBqMMw%XL4O&9Y z^B5l9IFZOIFzyiYc8%C!gYt$WZrbbMU4KZvU4~tXO0))7Xe1xR$f({)e8!Rj!*&yr z3uJvK2Z*%URT?|nSrNaxb#BLy2?PmC4nKshc^nh8Q!X{FJhHP%3n)Bb#N(1iakz|i z>+8-58RM^><8c*f!nGPMF?V$x7_Vt=US^bIX3|eap5GF=+VZTs>d{zwvV!)B`$t(- z=@#cFB`+mp;@Vf{*5{8}Nvc~#_HZ8D#`~C+Mkg2yK4o#o9e6kZhR4genZ8UA6^=5& zc8*EO$j%7QJu(I}k^$^5FLZ`cAeI6o<7zxjyCCqYg^n;uJG0MRS39ZeIz5Uzy~IfD zm_Tv~Vx+F(5tQxf#IZOBsRe72rXg31)(ZpPgMl0Ort6u6+9 literal 0 HcmV?d00001 diff --git a/data/app.icf b/data/app.icf new file mode 100644 index 0000000..249d1ca --- /dev/null +++ b/data/app.icf @@ -0,0 +1,25 @@ +# This file is for configuration settings for your +# application. +# +# The syntax is similar to windows .ini files ie +# +# [GroupName] +# Setting = Value +# +# Which can be read by your application using +# e.g s3eConfigGetString("GroupName", "Setting", string) +# +# All settings must be documented in .config.txt files. +# New settings specific to this application should be +# documented in app.config.txt +# +# Some conditional operations are also permitted, see the +# S3E documentation for details. + +[S3E] +MemSize=50485760 +SysStackSize=4000000 # almost 4mb stack size + +DispFixRot=0 +[GX] +DataCacheSize=32768 # 32k of data cache diff --git a/data/development.icf b/data/development.icf new file mode 100644 index 0000000..c5d6418 --- /dev/null +++ b/data/development.icf @@ -0,0 +1,100 @@ +# Settings ICF file automatically generated by S3E development environment + +AccelEnabled = Type=bool, Default="true", Value = "true" +AudioAAC = Type=bool, Default="true", Value = "true" +AudioAACPlus = Type=bool, Default="true", Value = "true" +AudioMIDI = Type=bool, Default="true", Value = "true" +AudioMP3 = Type=bool, Default="true", Value = "true" +AudioPCM = Type=bool, Default="true", Value = "true" +AudioQCP = Type=bool, Default="true", Value = "true" +AudioVolumeDefault = Type=int, Min=0.000000, Max=256.000000, Default="256", Value = "256" +BacklightTimeout = Type=int, Min=0.000000, Max=120000.000000, Default="10000", Value = "10000" +CompassEnabled = Type=bool, Default="true", Value = "true" +ContactsFromAddrBook = Type=bool, Default="false", Value = "false" +DeviceAdvanceSoftkeyPosition = Type=string, Allowed="Bottom Left" "Bottom Right" "Top Right" "Top Left", Default="Bottom Left", Value = "Bottom Left" +DeviceArch = Type=string, Allowed="" "ARM4T" "ARM4" "ARM5T" "ARM5TE" "ARM5TEJ" "ARM6" "ARM6K" "ARM6T2" "ARM6Z" "X86" "PPC" "AMD64" "ARM7", Default="", Value = "" +DeviceBackSoftkeyPosition = Type=string, Allowed="Bottom Left" "Bottom Right" "Top Right" "Top Left", Default="Bottom Right", Value = "Bottom Right" +DeviceBatteryLevel = Type=int, Min=0.000000, Max=100.000000, Default="50", Value = "50" +DeviceClass = Type=string, Allowed="UNKNOWN" "SYMBIAN_GENERIC" "SYMBIAN_SERIES60" "SYMBIAN_SERIES60_EMULATOR" "SYMBIAN_UIQ" "SYMBIAN_UIQ_EMULATOR" "BREW_GENERIC" "BREW_QCIF_3D" "BREW_QCIF_25G" "BREW_SQCIF_256" "BREW_QVGA_3G" "WINDOWS_GENERIC" "WINMOBILE_GENERIC" "WINMOBILE_SP" "WINMOBILE_PPC" "LINUX_GENERIC" "LINUX_DESKTOP" "LINUX_EMBED" "WIPI_GENERIC" "NDS_GENERIC" "ARM_SEMIH_GENERIC" "NULCUES_GENERIC" "NGI_GENERIC", Default="WINDOWS_GENERIC", Value = "WINDOWS_GENERIC" +DeviceFPU = Type=string, Allowed="None" "VFP Present", Default="VFP Present", Value = "VFP Present" +DeviceFreeRAM = Type=int, Min=0.000000, Max=2097151.000000, Default="1048576", Value = "1048576" +DeviceIDInt = Type=int, Default="0", Value = "0" +DeviceIDString = Type=string, Default="", Value = "" +DeviceIMSI = Type=string, Default="SIMULATOR_IMSI", Value = "SIMULATOR_IMSI" +DeviceLSKIsBack = Type=bool, Default="false", Value = "false" +DeviceLanguage = Type=string, Allowed="UNKNOWN" "ENGLISH" "FRENCH" "GERMAN" "SPANISH" "ITALIAN" "PORTUGUESE" "DUTCH" "TURKISH" "CROATIAN" "CZECH" "DANISH" "FINNISH" "HUNGARIAN" "NORWEGIAN" "POLISH" "RUSSIAN" "SERBIAN" "SLOVAK" "SLOVENIAN" "SWEDISH" "UKRAINIAN" "GREEK" "JAPANESE" "SIMPL_CHINESE" "TRAD_CHINESE" "KOREAN" "ICELANDIC" "FLEMISH" "THAI" "AFRIKAANS" "ALBANIAN" "AMHARIC" "ARABIC" "ARMENIAN" "AZERBAIJANI" "TAGALOG" "BELARUSSIAN" "BENGALI" "BULGARIAN" "BURMESE" "CATALAN" "ESTONIAN" "FARSI" "GAELIC" "GEORGIAN" "GUJARATI" "HEBREW" "HINDI" "INDONESIAN" "IRISH" "KANNADA" "KAZAKH" "KHMER" "LAO" "LATVIAN" "LITHUANIAN" "MACEDONIAN" "MALAY" "MALAYALAM" "MARATHI" "MOLDOVIAN" "MONGOLIAN" "PUNJABI" "ROMANIAN" "SINHALESE" "SOMALI" "SWAHILI" "TAJIK" "TAMIL" "TELUGU" "TIBETAN" "TIGRINYA" "TURKMEN" "URDU" "UZBEK" "VIETNAMESE" "WELSH" "ZULU" "", Default="", Value = "" +DeviceMainsPower = Type=bool, Default="false", Value = "false" +DeviceName = Type=string, Default="My Computer", Value = "My Computer" +DeviceOS = Type=string, Allowed="NONE" "SYMBIAN" "BREW" "WINDOWS" "WINMOBILE" "LINUX" "WIPI" "NDS" "ARM_SEMIH" "NUCLEUS" "NGI" "WINCE" "SHARPEMP" "OSX" "IPHONE" "UIQ" "PS3" "X360" "BADA" "ANDROID" "WEBOS", Default="NONE", Value = "NONE" +DeviceOSVersion = Type=string, Default="", Value = "" +DeviceOSVersionNumber = Type=int, Default="0", Value = "0" +DevicePhoneNumber = Type=string, Default="0044123456789", Value = "0044123456789" +DeviceTimezone = Type=string, Default="SYSTEM", Value = "SYSTEM" +DeviceTotalRAM = Type=int, Min=0.000000, Max=2097151.000000, Default="1048576", Value = "1048576" +DeviceUniqueID = Type=string, Default="SIMULATOR_ID", Value = "SIMULATOR_ID" +DeviceUniqueIDInt = Type=int, Default="01234567890", Value = "01234567890" +FileTotalStorageSize = Type=int, Min=0.000000, Max=2147483648.000000, Default="67108864", Value = "67108864" +FileUseSeparateRomRam = Type=bool, Default="true", Value = "true" +FileUseTotalStorageSize = Type=bool, Default="false", Value = "false" +GLAPI = Type=string, Allowed="None" "GLES 1.0 Common-Lite Profile from Imagination POWERVR(TM)" "GLES 1.1 Common-Lite Profile from Imagination POWERVR(TM)" "GLES 1.0 Common Profile from Imagination POWERVR(TM)" "GLES 1.1 Common Profile from Imagination POWERVR(TM)" "GLES 2.0 from Imagination POWERVR(TM)" "Obey [S3E] SysGlesVersion .icf setting" "GLES 1.1 Common Profile from Qualcomm Snapdragon(TM)" "GLES 2.0 from Qualcomm Snapdragon(TM)", Default="Obey [S3E] SysGlesVersion .icf setting", Value = "Obey [S3E] SysGlesVersion .icf setting" +GLDontUseHiddenWindow = Type=bool, Default="false", Value = "false" +GLTerminateOnSuspend = Type=bool, Default="false", Value = "false" +GLUsePVRVFrame = Type=bool, Default="false", Value = "false" +KeyboardHasAlpha = Type=bool, Default="true", Value = "true" +KeyboardHasDirection = Type=bool, Default="true", Value = "true" +KeyboardHasKeypad = Type=bool, Default="true", Value = "true" +KeyboardNumpadRotation = Type=string, Allowed="Rot0" "Rot90" "Rot180" "Rot270", Default="Rot0", Value = "Rot0" +LicenseExpiryDate = Type=int, Min=0.000000, Max=999999995904.000000, Default="0", Value = "0" +LicenseMinutesRemaining = Type=int, Min=0.000000, Max=10000000.000000, Default="0", Value = "0" +LicenseStatus = Type=string, Allowed="EXPIRED" "DEMO" "USECOUNT" "EXPIRYDATE" "EXPIRYMINSUSE" "PURCHASE" "SUBSCRIPTION" "UPGRADE" "NONCOMMERCIAL", Default="NONCOMMERCIAL", Value = "NONCOMMERCIAL" +LicenseUsesRemaining = Type=int, Min=0.000000, Max=10000000.000000, Default="0", Value = "0" +LocationAltitude = Type=float, Min=-2000.000000, Max=100000.000000, Default="60.0", Value = "60.0" +LocationAvailable = Type=bool, Default="true", Value = "true" +LocationHorizontalAccuracy = Type=float, Min=0.000000, Max=100000.000000, Default="20.0", Value = "20.0" +LocationLatitude = Type=float, Min=-90.000000, Max=90.000000, Default="51.511791", Value = "51.511791" +LocationLongitude = Type=float, Min=-180.000000, Max=180.000000, Default="-0.191084", Value = "-0.191084" +LocationVerticalAccuracy = Type=float, Min=0.000000, Max=100000.000000, Default="100.0", Value = "100.0" +MemoryPoison = Type=bool, Default="true", Value = "true" +MemoryPoisonAlloc = Type=int, Min=0.000000, Max=255.000000, Default="170", Value = "170" +MemoryPoisonFree = Type=int, Min=0.000000, Max=255.000000, Default="221", Value = "221" +MemoryPoisonInit = Type=int, Min=0.000000, Max=255.000000, Default="204", Value = "204" +PointerAvailable = Type=bool, Default="true", Value = "true" +PointerMultiSimulationMode = Type=bool, Default="false", Value = "false" +PointerMultiTouchAvailable = Type=bool, Default="false", Value = "false" +PointerStylusType = Type=string, Allowed="INVALID" "STYLUS" "FINGER", Default="INVALID", Value = "INVALID" +PointerType = Type=string, Allowed="INVALID" "MOUSE" "STYLUS", Default="MOUSE", Value = "MOUSE" +SMSEnabled = Type=bool, Default="true", Value = "true" +SMSReceiveEnabled = Type=bool, Default="true", Value = "true" +SocketDNSDelay = Type=int, Min=0.000000, Max=30000.000000, Default="0", Value = "0" +SocketHTTPProxy = Type=string, Default="", Value = "" +SocketHostName = Type=string, Default="", Value = "" +SocketNetworkAvailable = Type=bool, Default="true", Value = "true" +SocketNetworkLoss = Type=bool, Default="false", Value = "false" +SocketNetworkType = Type=string, Allowed="NONE" "UNKNOWN" "LAN" "WLAN" "GPRS" "UMTS" "EVDO" "CDMA2000" "HSDPA", Default="LAN", Value = "LAN" +SocketRecvLimit = Type=int, Min=0.000000, Max=1000000.000000, Default="0", Value = "0" +SocketSendLimit = Type=int, Min=0.000000, Max=1000000.000000, Default="0", Value = "0" +SoundEnabled = Type=bool, Default="true", Value = "true" +SoundRecordEnabled = Type=bool, Default="true", Value = "true" +SoundSampleRate = Type=int, Allowed="8192" "11025" "16000" "22050" "44100", Default="22050", Value = "22050" +SoundStereo = Type=bool, Default="true", Value = "true" +SoundVolumeDefault = Type=int, Min=0.000000, Max=256.000000, Default="256", Value = "256" +SurfaceDisableWhenGLIsActive = Type=bool, Default="false", Value = "false" +SurfaceDoubleBuffer = Type=bool, Default="false", Value = "false" +SurfaceHeight = Type=int, Min=128.000000, Max=4096.000000, Default="480", Value = "480" +SurfacePitch = Type=int, Min=0.000000, Max=8192.000000, Default="0", Value = "0" +SurfacePixelType = Type=string, Allowed="RGB444" "RGB555" "RGB565" "RGB666" "RGB888" "BGR444" "BGR555" "BGR565" "BGR666" "BGR888", Default="RGB565", Value = "RGB565" +SurfacePredefinedResolution = Type=string, Allowed="176x200" "176x208" "240x320 (QVGA Portrait)" "240x400" "320x240 (QVGA Landscape)" "320x400" "320x480 (iPhone Portrait)" "400x240" "480x320 (iPhone Landscape)" "360x640 (qHD Portrait)" "640x360 (qHD Landscape)" "480x640 (VGA Portrait)" "480x800 (WVGA Portrait)" "640x480 (VGA Landscape)" "800x400" "800x480 (WVGA Landscape)" "640x960 (iPhone 4 Portrait)" "960x640 (iPhone 4 Landscape)" "1024x600 (Playbook Landscape)" "600x1024 (Playbook Portrait)" "768x1024 (iPad Portrait)" "1024x768 (iPad Landscape)", Default="320x480 (iPhone Portrait)", Value = "320x480 (iPhone Portrait)" +SurfaceRotation = Type=string, Allowed="Rot0" "Rot90" "Rot180" "Rot270", Default="Rot0", Value = "Rot0" +SurfaceUnalign = Type=bool, Default="true", Value = "true" +SurfaceUseMultiBuffers = Type=bool, Default="true", Value = "true" +SurfaceWidth = Type=int, Min=128.000000, Max=4096.000000, Default="320", Value = "320" +SymbianSoundLatency = Type=int, Min=20.000000, Max=1400.000000, Default="120", Value = "120" +ThreadEnabled = Type=bool, Default="true", Value = "true" +TimerAccuracy = Type=int, Min=0.000000, Max=1000.000000, Default="0", Value = "0" +TimerHiRes = Type=bool, Default="false", Value = "false" +TimerLocaltimeOffsetHours = Type=string, Allowed="-12" "-11" "-10" "-9" "-8" "-7" "-6" "-5" "-4" "-3" "-2" "-1" "0" "+1" "+2" "+3" "+4" "+5" "+6" "+7" "+8" "+9" "+10" "+11" "+12" "+13" "SYSTEM", Default="SYSTEM", Value = "SYSTEM" +VibraEnabled = Type=bool, Default="true", Value = "true" +Video3GPP = Type=bool, Default="true", Value = "true" +VideoJPEG = Type=bool, Default="true", Value = "true" +VideoMPEG4 = Type=bool, Default="true", Value = "true" +VideoVolumeDefault = Type=int, Min=0.000000, Max=256.000000, Default="256", Value = "256" diff --git a/include/cstdlib b/include/cstdlib new file mode 100644 index 0000000..24bcd23 --- /dev/null +++ b/include/cstdlib @@ -0,0 +1,118 @@ +/* + * Copyright (c) 1999 + * Boris Fomitchev + * + * This material is provided "as is", with absolutely no warranty expressed + * or implied. Any use is at your own risk. + * + * Permission to use or copy this software for any purpose is hereby granted + * without fee, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is granted, + * provided the above notices are retained, and a notice that the code was + * modified is included with the above copyright notice. + * + */ + +#ifndef _STLP_CSTDLIB +# define _STLP_CSTDLIB + +# ifndef _STLP_OUTERMOST_HEADER_ID +# define _STLP_OUTERMOST_HEADER_ID 0x116 +# include +# endif + +# if defined (_STLP_USE_NEW_C_HEADERS) +# include _STLP_NATIVE_CPP_C_HEADER(cstdlib) +# else +# include _STLP_NATIVE_C_HEADER(stdlib.h) +# endif + +# if defined( __MSL__ ) && __MSL__ <= 0x5003 +namespace std { + typedef ::div_t div_t; + typedef ::ldiv_t ldiv_t; +# ifdef __MSL_LONGLONG_SUPPORT__ + typedef ::lldiv_t lldiv_t; +# endif +} +# endif + +# ifdef _STLP_IMPORT_VENDOR_CSTD +_STLP_BEGIN_NAMESPACE +using _STLP_VENDOR_CSTD::div_t; +using _STLP_VENDOR_CSTD::ldiv_t; +using _STLP_VENDOR_CSTD::size_t; + +# ifndef _STLP_NO_CSTD_FUNCTION_IMPORTS +using _STLP_VENDOR_CSTD::abort; +using _STLP_VENDOR_CSTD::atexit; +using _STLP_VENDOR_CSTD::exit; +using _STLP_VENDOR_CSTD::getenv; +using _STLP_VENDOR_CSTD::calloc; +using _STLP_VENDOR_CSTD::free; +using _STLP_VENDOR_CSTD::malloc; +using _STLP_VENDOR_CSTD::realloc; +using _STLP_VENDOR_CSTD::atof; +using _STLP_VENDOR_CSTD::atoi; +using _STLP_VENDOR_CSTD::atol; +using _STLP_VENDOR_CSTD::mblen; +using _STLP_VENDOR_CSTD::mbstowcs; +using _STLP_VENDOR_CSTD::mbtowc; +using _STLP_VENDOR_CSTD::strtod; +using _STLP_VENDOR_CSTD::strtol; +using _STLP_VENDOR_CSTD::strtoul; +using _STLP_VENDOR_CSTD::system; + +#if ! (defined (_STLP_NO_NATIVE_WIDE_STREAMS) || defined (_STLP_NO_MBSTATE_T) ) +using _STLP_VENDOR_CSTD::wcstombs; +using _STLP_VENDOR_CSTD::wctomb; +#endif +using _STLP_VENDOR_CSTD::bsearch; +using _STLP_VENDOR_CSTD::qsort; +// boris : if we do not have native float abs, we define ours; then we cannot do "using" for "other" abs +# ifdef _STLP_HAS_NATIVE_FLOAT_ABS +using _STLP_VENDOR_CSTD::abs; +# endif +using _STLP_VENDOR_CSTD::div; +using _STLP_VENDOR_CSTD::labs; +using _STLP_VENDOR_CSTD::ldiv; +using _STLP_VENDOR_CSTD::rand; +using _STLP_VENDOR_CSTD::srand; +# endif /* _STLP_NO_CSTD_FUNCTION_IMPORTS */ +_STLP_END_NAMESPACE +#endif /* _STLP_IMPORT_VENDOR_CSTD */ + +_STLP_BEGIN_NAMESPACE + +# if defined(_STLP_HAS_NO_NEW_C_HEADERS) && defined(__cplusplus) +# if !defined ( _STLP_LABS ) +inline long abs(long __x) { return _STLP_VENDOR_CSTD::labs(__x); } +//XXXXXXXXXXXX + inline int abs(int __x) { return _STLP_VENDOR_CSTD::labs((long)__x); } + inline long abs(short __x) { return _STLP_VENDOR_CSTD::labs((long)__x); } +//XXXXXXXXXXXX +# endif +# if !defined ( _STLP_LDIV ) +inline ldiv_t div(long __x, long __y) { return _STLP_VENDOR_CSTD::ldiv(__x, __y); } +# endif +# endif + +# if defined ( _STLP_LLABS ) +_STLP_LONG_LONG abs(_STLP_LONG_LONG __x) { return _STLP_VENDOR_CSTD::_STLP_LLABS(__x); } +# endif +# if defined ( _STLP_LLDIV ) +_STLP_LLDIV_T div(_STLP_LONG_LONG __x, _STLP_LONG_LONG __y) { return _STLP_VENDOR_CSTD::_STLP_LLDIV(__x, __y); } +# endif + +_STLP_END_NAMESPACE + +# if (_STLP_OUTERMOST_HEADER_ID == 0x116) +# include +# undef _STLP_OUTERMOST_HEADER_ID +# endif + +#endif /* _STLP_CSTDLIB */ + +// Local Variables: +// mode:C++ +// End: diff --git a/include/opencv/cv.h b/include/opencv/cv.h new file mode 100644 index 0000000..522e55d --- /dev/null +++ b/include/opencv/cv.h @@ -0,0 +1,83 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OLD_CV_H__ +#define __OPENCV_OLD_CV_H__ + +#if defined(_MSC_VER) + #define CV_DO_PRAGMA(x) __pragma(x) + #define __CVSTR2__(x) #x + #define __CVSTR1__(x) __CVSTR2__(x) + #define __CVMSVCLOC__ __FILE__ "("__CVSTR1__(__LINE__)") : " + #define CV_MSG_PRAGMA(_msg) CV_DO_PRAGMA(message (__CVMSVCLOC__ _msg)) +#elif defined(__GNUC__) + #define CV_DO_PRAGMA(x) _Pragma (#x) + #define CV_MSG_PRAGMA(_msg) CV_DO_PRAGMA(message (_msg)) +#else + #define CV_DO_PRAGMA(x) + #define CV_MSG_PRAGMA(_msg) +#endif +#define CV_WARNING(x) CV_MSG_PRAGMA("Warning: " #x) + +//CV_WARNING("This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module") + +#include "opencv2/core/core_c.h" +#include "opencv2/core/core.hpp" +#include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/video/tracking.hpp" +#include "opencv2/features2d/features2d.hpp" +#include "opencv2/flann/flann.hpp" +#include "opencv2/calib3d/calib3d.hpp" +#include "opencv2/objdetect/objdetect.hpp" +#include "opencv2/legacy/compat.hpp" + +#if !defined(CV_IMPL) +#define CV_IMPL extern "C" +#endif //CV_IMPL + +#if defined(__cplusplus) +#include "opencv2/core/internal.hpp" +#endif //__cplusplus + +#endif // __OPENCV_OLD_CV_H_ + diff --git a/include/opencv/cv.hpp b/include/opencv/cv.hpp new file mode 100644 index 0000000..37b523b --- /dev/null +++ b/include/opencv/cv.hpp @@ -0,0 +1,52 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OLD_CV_HPP__ +#define __OPENCV_OLD_CV_HPP__ + +//#if defined(__GNUC__) +//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module" +//#endif + +#include + +#endif diff --git a/include/opencv/cvaux.h b/include/opencv/cvaux.h new file mode 100644 index 0000000..b15d068 --- /dev/null +++ b/include/opencv/cvaux.h @@ -0,0 +1,65 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OLD_AUX_H__ +#define __OPENCV_OLD_AUX_H__ + +//#if defined(__GNUC__) +//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module" +//#endif + +#include "opencv2/core/core_c.h" +#include "opencv2/core/core.hpp" +#include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/video/tracking.hpp" +#include "opencv2/video/background_segm.hpp" +#include "opencv2/features2d/features2d.hpp" +#include "opencv2/calib3d/calib3d.hpp" +#include "opencv2/objdetect/objdetect.hpp" +#include "opencv2/legacy/legacy.hpp" +#include "opencv2/legacy/compat.hpp" +#include "opencv2/legacy/blobtrack.hpp" +#include "opencv2/contrib/contrib.hpp" + +#endif + +/* End of file. */ diff --git a/include/opencv/cvaux.hpp b/include/opencv/cvaux.hpp new file mode 100644 index 0000000..952210b --- /dev/null +++ b/include/opencv/cvaux.hpp @@ -0,0 +1,51 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OLD_AUX_HPP__ +#define __OPENCV_OLD_AUX_HPP__ + +//#if defined(__GNUC__) +//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module" +//#endif + +#include + +#endif diff --git a/include/opencv/cvwimage.h b/include/opencv/cvwimage.h new file mode 100644 index 0000000..f27cfc2 --- /dev/null +++ b/include/opencv/cvwimage.h @@ -0,0 +1,46 @@ +/////////////////////////////////////////////////////////////////////////////// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to +// this license. If you do not agree to this license, do not download, +// install, copy or use the software. +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2008, Google, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation or contributors may not be used to endorse +// or promote products derived from this software without specific +// prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" +// and any express or implied warranties, including, but not limited to, the +// implied warranties of merchantability and fitness for a particular purpose +// are disclaimed. In no event shall the Intel Corporation or contributors be +// liable for any direct, indirect, incidental, special, exemplary, or +// consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. + + +#ifndef __OPENCV_OLD_WIMAGE_HPP__ +#define __OPENCV_OLD_WIMAGE_HPP__ + +#include "opencv2/core/wimage.hpp" + +#endif diff --git a/include/opencv/cxcore.h b/include/opencv/cxcore.h new file mode 100644 index 0000000..d52ad4f --- /dev/null +++ b/include/opencv/cxcore.h @@ -0,0 +1,53 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OLD_CXCORE_H__ +#define __OPENCV_OLD_CXCORE_H__ + +//#if defined(__GNUC__) +//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module" +//#endif + +#include "opencv2/core/core_c.h" +#include "opencv2/core/core.hpp" + +#endif diff --git a/include/opencv/cxcore.hpp b/include/opencv/cxcore.hpp new file mode 100644 index 0000000..033b365 --- /dev/null +++ b/include/opencv/cxcore.hpp @@ -0,0 +1,52 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OLD_CXCORE_HPP__ +#define __OPENCV_OLD_CXCORE_HPP__ + +//#if defined(__GNUC__) +//#warning "This is a deprecated opencv header provided for compatibility. Please include a header from a corresponding opencv module" +//#endif + +#include + +#endif diff --git a/include/opencv/cxeigen.hpp b/include/opencv/cxeigen.hpp new file mode 100644 index 0000000..c503f71 --- /dev/null +++ b/include/opencv/cxeigen.hpp @@ -0,0 +1,49 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OLD_EIGEN_HPP__ +#define __OPENCV_OLD_EIGEN_HPP__ + +#include "opencv2/core/eigen.hpp" + +#endif + diff --git a/include/opencv/cxmisc.h b/include/opencv/cxmisc.h new file mode 100644 index 0000000..6446944 --- /dev/null +++ b/include/opencv/cxmisc.h @@ -0,0 +1,6 @@ +#ifndef __OPENCV_OLD_CXMISC_H__ +#define __OPENCV_OLD_CXMISC_H__ + +#include "opencv2/core/internal.hpp" + +#endif diff --git a/include/opencv/highgui.h b/include/opencv/highgui.h new file mode 100644 index 0000000..9725c9f --- /dev/null +++ b/include/opencv/highgui.h @@ -0,0 +1,50 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OLD_HIGHGUI_H__ +#define __OPENCV_OLD_HIGHGUI_H__ + +#include "opencv2/core/core_c.h" +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui_c.h" +#include "opencv2/highgui/highgui.hpp" + +#endif diff --git a/include/opencv/ml.h b/include/opencv/ml.h new file mode 100644 index 0000000..0383a2f --- /dev/null +++ b/include/opencv/ml.h @@ -0,0 +1,48 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_OLD_ML_H__ +#define __OPENCV_OLD_ML_H__ + +#include "opencv2/core/core_c.h" +#include "opencv2/core/core.hpp" +#include "opencv2/ml/ml.hpp" + +#endif diff --git a/include/opencv2/core/core.hpp b/include/opencv2/core/core.hpp new file mode 100644 index 0000000..9e1d855 --- /dev/null +++ b/include/opencv2/core/core.hpp @@ -0,0 +1,4345 @@ +/*! \file core.hpp + \brief The Core Functionality + */ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_HPP__ +#define __OPENCV_CORE_HPP__ + +#include "opencv2/core/types_c.h" +#include "opencv2/core/version.hpp" + +#ifdef __cplusplus + +#ifndef SKIP_INCLUDES +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif // SKIP_INCLUDES + +/*! \namespace cv + Namespace where all the C++ OpenCV functionality resides +*/ +namespace cv { + +#undef abs +#undef min +#undef max +#undef Complex + +using std::vector; +using std::string; +using std::ptrdiff_t; + +template class CV_EXPORTS Size_; +template class CV_EXPORTS Point_; +template class CV_EXPORTS Rect_; +template class CV_EXPORTS Vec; +template class CV_EXPORTS Matx; + +typedef std::string String; +typedef std::basic_string WString; + +class Mat; +class SparseMat; +typedef Mat MatND; + +class CV_EXPORTS MatExpr; +class CV_EXPORTS MatOp_Base; +class CV_EXPORTS MatArg; +class CV_EXPORTS MatConstIterator; + +template class CV_EXPORTS Mat_; +template class CV_EXPORTS MatIterator_; +template class CV_EXPORTS MatConstIterator_; +template class CV_EXPORTS MatCommaInitializer_; + +CV_EXPORTS string fromUtf16(const WString& str); +CV_EXPORTS WString toUtf16(const string& str); + +CV_EXPORTS string format( const char* fmt, ... ); +CV_EXPORTS string tempfile( const char* suffix CV_DEFAULT(0)); + +// matrix decomposition types +enum { DECOMP_LU=0, DECOMP_SVD=1, DECOMP_EIG=2, DECOMP_CHOLESKY=3, DECOMP_QR=4, DECOMP_NORMAL=16 }; +enum { NORM_INF=1, NORM_L1=2, NORM_L2=4, NORM_TYPE_MASK=7, NORM_RELATIVE=8, NORM_MINMAX=32}; +enum { CMP_EQ=0, CMP_GT=1, CMP_GE=2, CMP_LT=3, CMP_LE=4, CMP_NE=5 }; +enum { GEMM_1_T=1, GEMM_2_T=2, GEMM_3_T=4 }; +enum { DFT_INVERSE=1, DFT_SCALE=2, DFT_ROWS=4, DFT_COMPLEX_OUTPUT=16, DFT_REAL_OUTPUT=32, + DCT_INVERSE = DFT_INVERSE, DCT_ROWS=DFT_ROWS }; + + +/*! + The standard OpenCV exception class. + Instances of the class are thrown by various functions and methods in the case of critical errors. + */ +class CV_EXPORTS Exception : public std::exception +{ +public: + /*! + Default constructor + */ + Exception(); + /*! + Full constructor. Normally the constuctor is not called explicitly. + Instead, the macros CV_Error(), CV_Error_() and CV_Assert() are used. + */ + Exception(int _code, const string& _err, const string& _func, const string& _file, int _line); + virtual ~Exception() throw(); + + /*! + \return the error description and the context as a text string. + */ + virtual const char *what() const throw(); + void formatMessage(); + + string msg; ///< the formatted error message + + int code; ///< error code @see CVStatus + string err; ///< error description + string func; ///< function name. Available only when the compiler supports __func__ macro + string file; ///< source file name where the error has occured + int line; ///< line number in the source file where the error has occured +}; + + +//! Signals an error and raises the exception. + +/*! + By default the function prints information about the error to stderr, + then it either stops if setBreakOnError() had been called before or raises the exception. + It is possible to alternate error processing by using redirectError(). + + \param exc the exception raisen. + */ +CV_EXPORTS void error( const Exception& exc ); + +//! Sets/resets the break-on-error mode. + +/*! + When the break-on-error mode is set, the default error handler + issues a hardware exception, which can make debugging more convenient. + + \return the previous state + */ +CV_EXPORTS bool setBreakOnError(bool flag); + +typedef int (CV_CDECL *ErrorCallback)( int status, const char* func_name, + const char* err_msg, const char* file_name, + int line, void* userdata ); + +//! Sets the new error handler and the optional user data. + +/*! + The function sets the new error handler, called from cv::error(). + + \param errCallback the new error handler. If NULL, the default error handler is used. + \param userdata the optional user data pointer, passed to the callback. + \param prevUserdata the optional output parameter where the previous user data pointer is stored + + \return the previous error handler +*/ +CV_EXPORTS ErrorCallback redirectError( ErrorCallback errCallback, + void* userdata=0, void** prevUserdata=0); + +#ifdef __GNUC__ +#define CV_Error( code, msg ) cv::error( cv::Exception(code, msg, __func__, __FILE__, __LINE__) ) +#define CV_Error_( code, args ) cv::error( cv::Exception(code, cv::format args, __func__, __FILE__, __LINE__) ) +#define CV_Assert( expr ) if((expr)) ; else cv::error( cv::Exception(CV_StsAssert, #expr, __func__, __FILE__, __LINE__) ) +#else +#define CV_Error( code, msg ) cv::error( cv::Exception(code, msg, "", __FILE__, __LINE__) ) +#define CV_Error_( code, args ) cv::error( cv::Exception(code, cv::format args, "", __FILE__, __LINE__) ) +#define CV_Assert( expr ) if((expr)) ; else cv::error( cv::Exception(CV_StsAssert, #expr, "", __FILE__, __LINE__) ) +#endif + +#ifdef _DEBUG +#define CV_DbgAssert(expr) CV_Assert(expr) +#else +#define CV_DbgAssert(expr) +#endif + +CV_EXPORTS void setNumThreads(int nthreads); +CV_EXPORTS int getNumThreads(); +CV_EXPORTS int getThreadNum(); + +//! Returns the number of ticks. + +/*! + The function returns the number of ticks since the certain event (e.g. when the machine was turned on). + It can be used to initialize cv::RNG or to measure a function execution time by reading the tick count + before and after the function call. The granularity of ticks depends on the hardware and OS used. Use + cv::getTickFrequency() to convert ticks to seconds. +*/ +CV_EXPORTS_W int64 getTickCount(); + +/*! + Returns the number of ticks per seconds. + + The function returns the number of ticks (as returned by cv::getTickCount()) per second. + The following code computes the execution time in milliseconds: + + \code + double exec_time = (double)getTickCount(); + // do something ... + exec_time = ((double)getTickCount() - exec_time)*1000./getTickFrequency(); + \endcode +*/ +CV_EXPORTS_W double getTickFrequency(); + +/*! + Returns the number of CPU ticks. + + On platforms where the feature is available, the function returns the number of CPU ticks + since the certain event (normally, the system power-on moment). Using this function + one can accurately measure the execution time of very small code fragments, + for which cv::getTickCount() granularity is not enough. +*/ +CV_EXPORTS_W int64 getCPUTickCount(); + +/*! + Returns SSE etc. support status + + The function returns true if certain hardware features are available. + Currently, the following features are recognized: + - CV_CPU_MMX - MMX + - CV_CPU_SSE - SSE + - CV_CPU_SSE2 - SSE 2 + - CV_CPU_SSE3 - SSE 3 + - CV_CPU_SSSE3 - SSSE 3 + - CV_CPU_SSE4_1 - SSE 4.1 + - CV_CPU_SSE4_2 - SSE 4.2 + - CV_CPU_POPCNT - POPCOUNT + - CV_CPU_AVX - AVX + + \note {Note that the function output is not static. Once you called cv::useOptimized(false), + most of the hardware acceleration is disabled and thus the function will returns false, + until you call cv::useOptimized(true)} +*/ +CV_EXPORTS_W bool checkHardwareSupport(int feature); + +//! returns the number of CPUs (including hyper-threading) +CV_EXPORTS_W int getNumberOfCPUs(); + +/*! + Allocates memory buffer + + This is specialized OpenCV memory allocation function that returns properly aligned memory buffers. + The usage is identical to malloc(). The allocated buffers must be freed with cv::fastFree(). + If there is not enough memory, the function calls cv::error(), which raises an exception. + + \param bufSize buffer size in bytes + \return the allocated memory buffer. +*/ +CV_EXPORTS void* fastMalloc(size_t bufSize); + +/*! + Frees the memory allocated with cv::fastMalloc + + This is the corresponding deallocation function for cv::fastMalloc(). + When ptr==NULL, the function has no effect. +*/ +CV_EXPORTS void fastFree(void* ptr); + +template static inline _Tp* allocate(size_t n) +{ + return new _Tp[n]; +} + +template static inline void deallocate(_Tp* ptr, size_t) +{ + delete[] ptr; +} + +/*! + Aligns pointer by the certain number of bytes + + This small inline function aligns the pointer by the certian number of bytes by shifting + it forward by 0 or a positive offset. +*/ +template static inline _Tp* alignPtr(_Tp* ptr, int n=(int)sizeof(_Tp)) +{ + return (_Tp*)(((size_t)ptr + n-1) & -n); +} + +/*! + Aligns buffer size by the certain number of bytes + + This small inline function aligns a buffer size by the certian number of bytes by enlarging it. +*/ +static inline size_t alignSize(size_t sz, int n) +{ + return (sz + n-1) & -n; +} + +/*! + Turns on/off available optimization + + The function turns on or off the optimized code in OpenCV. Some optimization can not be enabled + or disabled, but, for example, most of SSE code in OpenCV can be temporarily turned on or off this way. + + \note{Since optimization may imply using special data structures, it may be unsafe + to call this function anywhere in the code. Instead, call it somewhere at the top level.} +*/ +CV_EXPORTS_W void setUseOptimized(bool onoff); + +/*! + Returns the current optimization status + + The function returns the current optimization status, which is controlled by cv::setUseOptimized(). +*/ +CV_EXPORTS_W bool useOptimized(); + +/*! + The STL-compilant memory Allocator based on cv::fastMalloc() and cv::fastFree() +*/ +template class CV_EXPORTS Allocator +{ +public: + typedef _Tp value_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef size_t size_type; + typedef ptrdiff_t difference_type; + template class rebind { typedef Allocator other; }; + + explicit Allocator() {} + ~Allocator() {} + explicit Allocator(Allocator const&) {} + template + explicit Allocator(Allocator const&) {} + + // address + pointer address(reference r) { return &r; } + const_pointer address(const_reference r) { return &r; } + + pointer allocate(size_type count, const void* =0) + { return reinterpret_cast(fastMalloc(count * sizeof (_Tp))); } + + void deallocate(pointer p, size_type) {fastFree(p); } + + size_type max_size() const + { return max(static_cast<_Tp>(-1)/sizeof(_Tp), 1); } + + void construct(pointer p, const _Tp& v) { new(static_cast(p)) _Tp(v); } + void destroy(pointer p) { p->~_Tp(); } +}; + +/////////////////////// Vec (used as element of multi-channel images ///////////////////// + +/*! + A helper class for cv::DataType + + The class is specialized for each fundamental numerical data type supported by OpenCV. + It provides DataDepth::value constant. +*/ +template class CV_EXPORTS DataDepth {}; + +template<> class DataDepth { public: enum { value = CV_8U, fmt=(int)'u' }; }; +template<> class DataDepth { public: enum { value = CV_8U, fmt=(int)'u' }; }; +template<> class DataDepth { public: enum { value = CV_8S, fmt=(int)'c' }; }; +template<> class DataDepth { public: enum { value = CV_8S, fmt=(int)'c' }; }; +template<> class DataDepth { public: enum { value = CV_16U, fmt=(int)'w' }; }; +template<> class DataDepth { public: enum { value = CV_16S, fmt=(int)'s' }; }; +template<> class DataDepth { public: enum { value = CV_32S, fmt=(int)'i' }; }; +// this is temporary solution to support 32-bit unsigned integers +template<> class DataDepth { public: enum { value = CV_32S, fmt=(int)'i' }; }; +template<> class DataDepth { public: enum { value = CV_32F, fmt=(int)'f' }; }; +template<> class DataDepth { public: enum { value = CV_64F, fmt=(int)'d' }; }; +template class DataDepth<_Tp*> { public: enum { value = CV_USRTYPE1, fmt=(int)'r' }; }; + + +////////////////////////////// Small Matrix /////////////////////////// + +/*! + A short numerical vector. + + This template class represents short numerical vectors (of 1, 2, 3, 4 ... elements) + on which you can perform basic arithmetical operations, access individual elements using [] operator etc. + The vectors are allocated on stack, as opposite to std::valarray, std::vector, cv::Mat etc., + which elements are dynamically allocated in the heap. + + The template takes 2 parameters: + -# _Tp element type + -# cn the number of elements + + In addition to the universal notation like Vec, you can use shorter aliases + for the most popular specialized variants of Vec, e.g. Vec3f ~ Vec. + */ + +struct CV_EXPORTS Matx_AddOp {}; +struct CV_EXPORTS Matx_SubOp {}; +struct CV_EXPORTS Matx_ScaleOp {}; +struct CV_EXPORTS Matx_MulOp {}; +struct CV_EXPORTS Matx_MatMulOp {}; +struct CV_EXPORTS Matx_TOp {}; + +template class CV_EXPORTS Matx +{ +public: + typedef _Tp value_type; + typedef Matx<_Tp, MIN(m, n), 1> diag_type; + typedef Matx<_Tp, m, n> mat_type; + enum { depth = DataDepth<_Tp>::value, rows = m, cols = n, channels = rows*cols, + type = CV_MAKETYPE(depth, channels) }; + + //! default constructor + Matx(); + + Matx(_Tp v0); //!< 1x1 matrix + Matx(_Tp v0, _Tp v1); //!< 1x2 or 2x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2); //!< 1x3 or 3x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 1x4, 2x2 or 4x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 1x5 or 5x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 1x6, 2x3, 3x2 or 6x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 1x7 or 7x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 1x8, 2x4, 4x2 or 8x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 1x9, 3x3 or 9x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 1x10, 2x5 or 5x2 or 10x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11); //!< 1x12, 2x6, 3x4, 4x3, 6x2 or 12x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11, + _Tp v12, _Tp v13, _Tp v14, _Tp v15); //!< 1x16, 4x4 or 16x1 matrix + explicit Matx(const _Tp* vals); //!< initialize from a plain array + + static Matx all(_Tp alpha); + static Matx zeros(); + static Matx ones(); + static Matx eye(); + static Matx diag(const diag_type& d); + static Matx randu(_Tp a, _Tp b); + static Matx randn(_Tp a, _Tp b); + + //! dot product computed with the default precision + _Tp dot(const Matx<_Tp, m, n>& v) const; + + //! dot product computed in double-precision arithmetics + double ddot(const Matx<_Tp, m, n>& v) const; + + //! convertion to another data type + template operator Matx() const; + + //! change the matrix shape + template Matx<_Tp, m1, n1> reshape() const; + + //! extract part of the matrix + template Matx<_Tp, m1, n1> get_minor(int i, int j) const; + + //! extract the matrix row + Matx<_Tp, 1, n> row(int i) const; + + //! extract the matrix column + Matx<_Tp, m, 1> col(int i) const; + + //! extract the matrix diagonal + Matx<_Tp, MIN(m,n), 1> diag() const; + + //! transpose the matrix + Matx<_Tp, n, m> t() const; + + //! invert matrix the matrix + Matx<_Tp, n, m> inv(int method=DECOMP_LU) const; + + //! solve linear system + template Matx<_Tp, n, l> solve(const Matx<_Tp, m, l>& rhs, int flags=DECOMP_LU) const; + Matx<_Tp, n, 1> solve(const Matx<_Tp, m, 1>& rhs, int method) const; + + //! multiply two matrices element-wise + Matx<_Tp, m, n> mul(const Matx<_Tp, m, n>& a) const; + + //! element access + const _Tp& operator ()(int i, int j) const; + _Tp& operator ()(int i, int j); + + //! 1D element access + const _Tp& operator ()(int i) const; + _Tp& operator ()(int i); + + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp); + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp); + template Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp); + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp); + template Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp); + Matx(const Matx<_Tp, n, m>& a, Matx_TOp); + + _Tp val[m*n]; //< matrix elements +}; + + +typedef Matx Matx12f; +typedef Matx Matx12d; +typedef Matx Matx13f; +typedef Matx Matx13d; +typedef Matx Matx14f; +typedef Matx Matx14d; +typedef Matx Matx16f; +typedef Matx Matx16d; + +typedef Matx Matx21f; +typedef Matx Matx21d; +typedef Matx Matx31f; +typedef Matx Matx31d; +typedef Matx Matx41f; +typedef Matx Matx41d; +typedef Matx Matx61f; +typedef Matx Matx61d; + +typedef Matx Matx22f; +typedef Matx Matx22d; +typedef Matx Matx23f; +typedef Matx Matx23d; +typedef Matx Matx32f; +typedef Matx Matx32d; + +typedef Matx Matx33f; +typedef Matx Matx33d; + +typedef Matx Matx34f; +typedef Matx Matx34d; +typedef Matx Matx43f; +typedef Matx Matx43d; + +typedef Matx Matx44f; +typedef Matx Matx44d; +typedef Matx Matx66f; +typedef Matx Matx66d; + + +/*! + A short numerical vector. + + This template class represents short numerical vectors (of 1, 2, 3, 4 ... elements) + on which you can perform basic arithmetical operations, access individual elements using [] operator etc. + The vectors are allocated on stack, as opposite to std::valarray, std::vector, cv::Mat etc., + which elements are dynamically allocated in the heap. + + The template takes 2 parameters: + -# _Tp element type + -# cn the number of elements + + In addition to the universal notation like Vec, you can use shorter aliases + for the most popular specialized variants of Vec, e.g. Vec3f ~ Vec. +*/ +template class CV_EXPORTS Vec : public Matx<_Tp, cn, 1> +{ +public: + typedef _Tp value_type; + enum { depth = DataDepth<_Tp>::value, channels = cn, type = CV_MAKETYPE(depth, channels) }; + + //! default constructor + Vec(); + + Vec(_Tp v0); //!< 1-element vector constructor + Vec(_Tp v0, _Tp v1); //!< 2-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2); //!< 3-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 4-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 5-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 6-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 7-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 8-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 9-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 10-element vector constructor + explicit Vec(const _Tp* values); + + Vec(const Vec<_Tp, cn>& v); + Vec<_Tp, cn>& operator =(const Matx<_Tp, cn, 1>& m); + + static Vec all(_Tp alpha); + + //! per-element multiplication + Vec mul(const Vec<_Tp, cn>& v) const; + + /*! + cross product of the two 3D vectors. + + For other dimensionalities the exception is raised + */ + Vec cross(const Vec& v) const; + //! convertion to another data type + template operator Vec() const; + //! conversion to 4-element CvScalar. + operator CvScalar() const; + + /*! element access */ + const _Tp& operator [](int i) const; + _Tp& operator[](int i); + const _Tp& operator ()(int i) const; + _Tp& operator ()(int i); + + Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp); + Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp); + template Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp); +}; + + +/* \typedef + + Shorter aliases for the most popular specializations of Vec +*/ +typedef Vec Vec2b; +typedef Vec Vec3b; +typedef Vec Vec4b; + +typedef Vec Vec2s; +typedef Vec Vec3s; +typedef Vec Vec4s; + +typedef Vec Vec2w; +typedef Vec Vec3w; +typedef Vec Vec4w; + +typedef Vec Vec2i; +typedef Vec Vec3i; +typedef Vec Vec4i; +typedef Vec Vec6i; +typedef Vec Vec8i; + +typedef Vec Vec2f; +typedef Vec Vec3f; +typedef Vec Vec4f; +typedef Vec Vec6f; + +typedef Vec Vec2d; +typedef Vec Vec3d; +typedef Vec Vec4d; +typedef Vec Vec6d; + + +//////////////////////////////// Complex ////////////////////////////// + +/*! + A complex number class. + + The template class is similar and compatible with std::complex, however it provides slightly + more convenient access to the real and imaginary parts using through the simple field access, as opposite + to std::complex::real() and std::complex::imag(). +*/ +template class CV_EXPORTS Complex +{ +public: + + //! constructors + Complex(); + Complex( _Tp _re, _Tp _im=0 ); + Complex( const std::complex<_Tp>& c ); + + //! conversion to another data type + template operator Complex() const; + //! conjugation + Complex conj() const; + //! conversion to std::complex + operator std::complex<_Tp>() const; + + _Tp re, im; //< the real and the imaginary parts +}; + + +/*! + \typedef +*/ +typedef Complex Complexf; +typedef Complex Complexd; + + +//////////////////////////////// Point_ //////////////////////////////// + +/*! + template 2D point class. + + The class defines a point in 2D space. Data type of the point coordinates is specified + as a template parameter. There are a few shorter aliases available for user convenience. + See cv::Point, cv::Point2i, cv::Point2f and cv::Point2d. +*/ +template class CV_EXPORTS Point_ +{ +public: + typedef _Tp value_type; + + // various constructors + Point_(); + Point_(_Tp _x, _Tp _y); + Point_(const Point_& pt); + Point_(const CvPoint& pt); + Point_(const CvPoint2D32f& pt); + Point_(const Size_<_Tp>& sz); + Point_(const Vec<_Tp, 2>& v); + + Point_& operator = (const Point_& pt); + //! conversion to another data type + template operator Point_<_Tp2>() const; + + //! conversion to the old-style C structures + operator CvPoint() const; + operator CvPoint2D32f() const; + operator Vec<_Tp, 2>() const; + + //! dot product + _Tp dot(const Point_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point_& pt) const; + //! checks whether the point is inside the specified rectangle + bool inside(const Rect_<_Tp>& r) const; + + _Tp x, y; //< the point coordinates +}; + +/*! + template 3D point class. + + The class defines a point in 3D space. Data type of the point coordinates is specified + as a template parameter. + + \see cv::Point3i, cv::Point3f and cv::Point3d +*/ +template class CV_EXPORTS Point3_ +{ +public: + typedef _Tp value_type; + + // various constructors + Point3_(); + Point3_(_Tp _x, _Tp _y, _Tp _z); + Point3_(const Point3_& pt); + explicit Point3_(const Point_<_Tp>& pt); + Point3_(const CvPoint3D32f& pt); + Point3_(const Vec<_Tp, 3>& v); + + Point3_& operator = (const Point3_& pt); + //! conversion to another data type + template operator Point3_<_Tp2>() const; + //! conversion to the old-style CvPoint... + operator CvPoint3D32f() const; + //! conversion to cv::Vec<> + operator Vec<_Tp, 3>() const; + + //! dot product + _Tp dot(const Point3_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point3_& pt) const; + //! cross product of the 2 3D points + Point3_ cross(const Point3_& pt) const; + + _Tp x, y, z; //< the point coordinates +}; + +//////////////////////////////// Size_ //////////////////////////////// + +/*! + The 2D size class + + The class represents the size of a 2D rectangle, image size, matrix size etc. + Normally, cv::Size ~ cv::Size_ is used. +*/ +template class CV_EXPORTS Size_ +{ +public: + typedef _Tp value_type; + + //! various constructors + Size_(); + Size_(_Tp _width, _Tp _height); + Size_(const Size_& sz); + Size_(const CvSize& sz); + Size_(const CvSize2D32f& sz); + Size_(const Point_<_Tp>& pt); + + Size_& operator = (const Size_& sz); + //! the area (width*height) + _Tp area() const; + + //! conversion of another data type. + template operator Size_<_Tp2>() const; + + //! conversion to the old-style OpenCV types + operator CvSize() const; + operator CvSize2D32f() const; + + _Tp width, height; // the width and the height +}; + +//////////////////////////////// Rect_ //////////////////////////////// + +/*! + The 2D up-right rectangle class + + The class represents a 2D rectangle with coordinates of the specified data type. + Normally, cv::Rect ~ cv::Rect_ is used. +*/ +template class CV_EXPORTS Rect_ +{ +public: + typedef _Tp value_type; + + //! various constructors + Rect_(); + Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height); + Rect_(const Rect_& r); + Rect_(const CvRect& r); + Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz); + Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2); + + Rect_& operator = ( const Rect_& r ); + //! the top-left corner + Point_<_Tp> tl() const; + //! the bottom-right corner + Point_<_Tp> br() const; + + //! size (width, height) of the rectangle + Size_<_Tp> size() const; + //! area (width*height) of the rectangle + _Tp area() const; + + //! conversion to another data type + template operator Rect_<_Tp2>() const; + //! conversion to the old-style CvRect + operator CvRect() const; + + //! checks whether the rectangle contains the point + bool contains(const Point_<_Tp>& pt) const; + + _Tp x, y, width, height; //< the top-left corner, as well as width and height of the rectangle +}; + + +/*! + \typedef + + shorter aliases for the most popular cv::Point_<>, cv::Size_<> and cv::Rect_<> specializations +*/ +typedef Point_ Point2i; +typedef Point2i Point; +typedef Size_ Size2i; +typedef Size2i Size; +typedef Rect_ Rect; +typedef Point_ Point2f; +typedef Point_ Point2d; +typedef Size_ Size2f; +typedef Point3_ Point3i; +typedef Point3_ Point3f; +typedef Point3_ Point3d; + + +/*! + The rotated 2D rectangle. + + The class represents rotated (i.e. not up-right) rectangles on a plane. + Each rectangle is described by the center point (mass center), length of each side + (represented by cv::Size2f structure) and the rotation angle in degrees. +*/ +class CV_EXPORTS RotatedRect +{ +public: + //! various constructors + RotatedRect(); + RotatedRect(const Point2f& _center, const Size2f& _size, float _angle); + RotatedRect(const CvBox2D& box); + + //! returns 4 vertices of the rectangle + void points(Point2f pts[]) const; + //! returns the minimal up-right rectangle containing the rotated rectangle + Rect boundingRect() const; + //! conversion to the old-style CvBox2D structure + operator CvBox2D() const; + + Point2f center; //< the rectangle mass center + Size2f size; //< width and height of the rectangle + float angle; //< the rotation angle. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle. +}; + +//////////////////////////////// Scalar_ /////////////////////////////// + +/*! + The template scalar class. + + This is partially specialized cv::Vec class with the number of elements = 4, i.e. a short vector of four elements. + Normally, cv::Scalar ~ cv::Scalar_ is used. +*/ +template class CV_EXPORTS Scalar_ : public Vec<_Tp, 4> +{ +public: + //! various constructors + Scalar_(); + Scalar_(_Tp v0, _Tp v1, _Tp v2=0, _Tp v3=0); + Scalar_(const CvScalar& s); + Scalar_(_Tp v0); + + //! returns a scalar with all elements set to v0 + static Scalar_<_Tp> all(_Tp v0); + //! conversion to the old-style CvScalar + operator CvScalar() const; + + //! conversion to another data type + template operator Scalar_() const; + + //! per-element product + Scalar_<_Tp> mul(const Scalar_<_Tp>& t, double scale=1 ) const; + + // returns (v0, -v1, -v2, -v3) + Scalar_<_Tp> conj() const; + + // returns true iff v1 == v2 == v3 == 0 + bool isReal() const; +}; + +typedef Scalar_ Scalar; + +CV_EXPORTS void scalarToRawData(const Scalar& s, void* buf, int type, int unroll_to=0); + +//////////////////////////////// Range ///////////////////////////////// + +/*! + The 2D range class + + This is the class used to specify a continuous subsequence, i.e. part of a contour, or a column span in a matrix. +*/ +class CV_EXPORTS Range +{ +public: + Range(); + Range(int _start, int _end); + Range(const CvSlice& slice); + int size() const; + bool empty() const; + static Range all(); + operator CvSlice() const; + + int start, end; +}; + +/////////////////////////////// DataType //////////////////////////////// + +/*! + Informative template class for OpenCV "scalars". + + The class is specialized for each primitive numerical type supported by OpenCV (such as unsigned char or float), + as well as for more complex types, like cv::Complex<>, std::complex<>, cv::Vec<> etc. + The common property of all such types (called "scalars", do not confuse it with cv::Scalar_) + is that each of them is basically a tuple of numbers of the same type. Each "scalar" can be represented + by the depth id (CV_8U ... CV_64F) and the number of channels. + OpenCV matrices, 2D or nD, dense or sparse, can store "scalars", + as long as the number of channels does not exceed CV_CN_MAX. +*/ +template class DataType +{ +public: + typedef _Tp value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + + enum { generic_type = 1, depth = -1, channels = 1, fmt=0, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef bool value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef uchar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef schar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef schar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef ushort value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef short value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef int value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef float value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef double value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template class DataType > +{ +public: + typedef Vec<_Tp, cn> value_type; + typedef Vec::work_type, cn> work_type; + typedef _Tp channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = cn, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template class DataType > +{ +public: + typedef std::complex<_Tp> value_type; + typedef value_type work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Complex<_Tp> value_type; + typedef value_type work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Point_<_Tp> value_type; + typedef Point_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Point3_<_Tp> value_type; + typedef Point3_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 3, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Size_<_Tp> value_type; + typedef Size_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Rect_<_Tp> value_type; + typedef Rect_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 4, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Scalar_<_Tp> value_type; + typedef Scalar_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 4, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template<> class DataType +{ +public: + typedef Range value_type; + typedef value_type work_type; + typedef int channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + + +//////////////////// generic_type ref-counting pointer class for C/C++ objects //////////////////////// + +/*! + Smart pointer to dynamically allocated objects. + + This is template pointer-wrapping class that stores the associated reference counter along with the + object pointer. The class is similar to std::smart_ptr<> from the recent addons to the C++ standard, + but is shorter to write :) and self-contained (i.e. does add any dependency on the compiler or an external library). + + Basically, you can use "Ptr ptr" (or faster "const Ptr& ptr" for read-only access) + everywhere instead of "MyObjectType* ptr", where MyObjectType is some C structure or a C++ class. + To make it all work, you need to specialize Ptr<>::delete_obj(), like: + + \code + template<> void Ptr::delete_obj() { call_destructor_func(obj); } + \endcode + + \note{if MyObjectType is a C++ class with a destructor, you do not need to specialize delete_obj(), + since the default implementation calls "delete obj;"} + + \note{Another good property of the class is that the operations on the reference counter are atomic, + i.e. it is safe to use the class in multi-threaded applications} +*/ +template class CV_EXPORTS Ptr +{ +public: + //! empty constructor + Ptr(); + //! take ownership of the pointer. The associated reference counter is allocated and set to 1 + Ptr(_Tp* _obj); + //! calls release() + ~Ptr(); + //! copy constructor. Copies the members and calls addref() + Ptr(const Ptr& ptr); + //! copy operator. Calls ptr.addref() and release() before copying the members + Ptr& operator = (const Ptr& ptr); + //! increments the reference counter + void addref(); + //! decrements the reference counter. If it reaches 0, delete_obj() is called + void release(); + //! deletes the object. Override if needed + void delete_obj(); + //! returns true iff obj==NULL + bool empty() const; + + + //! helper operators making "Ptr ptr" use very similar to "T* ptr". + _Tp* operator -> (); + const _Tp* operator -> () const; + + operator _Tp* (); + operator const _Tp*() const; + +protected: + _Tp* obj; //< the object pointer. + int* refcount; //< the associated reference counter +}; + + +//////////////////////// Input/Output Array Arguments ///////////////////////////////// + +/*! + Proxy datatype for passing Mat's and vector<>'s as input parameters + */ +class CV_EXPORTS _InputArray +{ +public: + enum { KIND_SHIFT=16, NONE=0< _InputArray(const vector<_Tp>& vec); + template _InputArray(const vector >& vec); + _InputArray(const vector& vec); + template _InputArray(const Matx<_Tp, m, n>& matx); + _InputArray(const Scalar& s); + _InputArray(const double& val); + virtual Mat getMat(int i=-1) const; + virtual void getMatVector(vector& mv) const; + virtual int kind() const; + virtual Size size(int i=-1) const; + virtual size_t total(int i=-1) const; + virtual int type(int i=-1) const; + virtual int depth(int i=-1) const; + virtual int channels(int i=-1) const; + virtual bool empty() const; + + int flags; + void* obj; + Size sz; +}; + + +enum +{ + DEPTH_MASK_8U = 1 << CV_8U, + DEPTH_MASK_8S = 1 << CV_8S, + DEPTH_MASK_16U = 1 << CV_16U, + DEPTH_MASK_16S = 1 << CV_16S, + DEPTH_MASK_32S = 1 << CV_32S, + DEPTH_MASK_32F = 1 << CV_32F, + DEPTH_MASK_64F = 1 << CV_64F, + DEPTH_MASK_ALL = (DEPTH_MASK_64F<<1)-1, + DEPTH_MASK_ALL_BUT_8S = DEPTH_MASK_ALL & ~DEPTH_MASK_8S, + DEPTH_MASK_FLT = DEPTH_MASK_32F + DEPTH_MASK_64F +}; + + +/*! + Proxy datatype for passing Mat's and vector<>'s as input parameters + */ +class CV_EXPORTS _OutputArray : public _InputArray +{ +public: + _OutputArray(); + _OutputArray(Mat& m); + template _OutputArray(vector<_Tp>& vec); + template _OutputArray(vector >& vec); + _OutputArray(vector& vec); + template _OutputArray(Matx<_Tp, m, n>& matx); + virtual bool fixedSize() const; + virtual bool fixedType() const; + virtual bool needed() const; + virtual Mat& getMatRef(int i=-1) const; + virtual void create(Size sz, int type, int i=-1, bool allocateVector=false, int fixedDepthMask=0) const; + virtual void create(int rows, int cols, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + virtual void release() const; + virtual void clear() const; +}; + +typedef const _InputArray& InputArray; +typedef InputArray InputArrayOfArrays; +typedef const _OutputArray& OutputArray; +typedef OutputArray OutputArrayOfArrays; +typedef OutputArray InputOutputArray; +typedef OutputArray InputOutputArrayOfArrays; + +CV_EXPORTS OutputArray noArray(); + +/////////////////////////////////////// Mat /////////////////////////////////////////// + +enum { MAGIC_MASK=0xFFFF0000, TYPE_MASK=0x00000FFF, DEPTH_MASK=7 }; + +static inline size_t getElemSize(int type) { return CV_ELEM_SIZE(type); } + +/*! + Custom array allocator + +*/ +class CV_EXPORTS MatAllocator +{ +public: + MatAllocator() {} + virtual ~MatAllocator() {} + virtual void allocate(int dims, const int* sizes, int type, int*& refcount, + uchar*& datastart, uchar*& data, size_t* step) = 0; + virtual void deallocate(int* refcount, uchar* datastart, uchar* data) = 0; +}; + +/*! + The n-dimensional matrix class. + + The class represents an n-dimensional dense numerical array that can act as + a matrix, image, optical flow map, 3-focal tensor etc. + It is very similar to CvMat and CvMatND types from earlier versions of OpenCV, + and similarly to those types, the matrix can be multi-channel. It also fully supports ROI mechanism. + + There are many different ways to create cv::Mat object. Here are the some popular ones: +

+ + Once matrix is created, it will be automatically managed by using reference-counting mechanism + (unless the matrix header is built on top of user-allocated data, + in which case you should handle the data by yourself). + The matrix data will be deallocated when no one points to it; + if you want to release the data pointed by a matrix header before the matrix destructor is called, + use cv::Mat::release(). + + The next important thing to learn about the matrix class is element access. Here is how the matrix is stored. + The elements are stored in row-major order (row by row). The cv::Mat::data member points to the first element of the first row, + cv::Mat::rows contains the number of matrix rows and cv::Mat::cols - the number of matrix columns. There is yet another member, + cv::Mat::step that is used to actually compute address of a matrix element. cv::Mat::step is needed because the matrix can be + a part of another matrix or because there can some padding space in the end of each row for a proper alignment. + + \image html roi.png + + Given these parameters, address of the matrix element M_{ij} is computed as following: + + addr(M_{ij})=M.data + M.step*i + j*M.elemSize() + + if you know the matrix element type, e.g. it is float, then you can use cv::Mat::at() method: + + addr(M_{ij})=&M.at(i,j) + + (where & is used to convert the reference returned by cv::Mat::at() to a pointer). + if you need to process a whole row of matrix, the most efficient way is to get + the pointer to the row first, and then just use plain C operator []: + + \code + // compute sum of positive matrix elements + // (assuming that M is double-precision matrix) + double sum=0; + for(int i = 0; i < M.rows; i++) + { + const double* Mi = M.ptr(i); + for(int j = 0; j < M.cols; j++) + sum += std::max(Mi[j], 0.); + } + \endcode + + Some operations, like the above one, do not actually depend on the matrix shape, + they just process elements of a matrix one by one (or elements from multiple matrices + that are sitting in the same place, e.g. matrix addition). Such operations are called + element-wise and it makes sense to check whether all the input/output matrices are continuous, + i.e. have no gaps in the end of each row, and if yes, process them as a single long row: + + \code + // compute sum of positive matrix elements, optimized variant + double sum=0; + int cols = M.cols, rows = M.rows; + if(M.isContinuous()) + { + cols *= rows; + rows = 1; + } + for(int i = 0; i < rows; i++) + { + const double* Mi = M.ptr(i); + for(int j = 0; j < cols; j++) + sum += std::max(Mi[j], 0.); + } + \endcode + in the case of continuous matrix the outer loop body will be executed just once, + so the overhead will be smaller, which will be especially noticeable in the case of small matrices. + + Finally, there are STL-style iterators that are smart enough to skip gaps between successive rows: + \code + // compute sum of positive matrix elements, iterator-based variant + double sum=0; + MatConstIterator_ it = M.begin(), it_end = M.end(); + for(; it != it_end; ++it) + sum += std::max(*it, 0.); + \endcode + + The matrix iterators are random-access iterators, so they can be passed + to any STL algorithm, including std::sort(). +*/ +class CV_EXPORTS Mat +{ +public: + //! default constructor + Mat(); + //! constructs 2D matrix of the specified size and type + // (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.) + Mat(int _rows, int _cols, int _type); + Mat(Size _size, int _type); + //! constucts 2D matrix and fills it with the specified value _s. + Mat(int _rows, int _cols, int _type, const Scalar& _s); + Mat(Size _size, int _type, const Scalar& _s); + + //! constructs n-dimensional matrix + Mat(int _ndims, const int* _sizes, int _type); + Mat(int _ndims, const int* _sizes, int _type, const Scalar& _s); + + //! copy constructor + Mat(const Mat& m); + //! constructor for matrix headers pointing to user-allocated data + Mat(int _rows, int _cols, int _type, void* _data, size_t _step=AUTO_STEP); + Mat(Size _size, int _type, void* _data, size_t _step=AUTO_STEP); + Mat(int _ndims, const int* _sizes, int _type, void* _data, const size_t* _steps=0); + + //! creates a matrix header for a part of the bigger matrix + Mat(const Mat& m, const Range& rowRange, const Range& colRange=Range::all()); + Mat(const Mat& m, const Rect& roi); + Mat(const Mat& m, const Range* ranges); + //! converts old-style CvMat to the new matrix; the data is not copied by default + Mat(const CvMat* m, bool copyData=false); + //! converts old-style CvMatND to the new matrix; the data is not copied by default + Mat(const CvMatND* m, bool copyData=false); + //! converts old-style IplImage to the new matrix; the data is not copied by default + Mat(const IplImage* img, bool copyData=false); + //! builds matrix from std::vector with or without copying the data + template explicit Mat(const vector<_Tp>& vec, bool copyData=false); + //! builds matrix from cv::Vec; the data is copied by default + template explicit Mat(const Vec<_Tp, n>& vec, + bool copyData=true); + //! builds matrix from cv::Matx; the data is copied by default + template explicit Mat(const Matx<_Tp, m, n>& mtx, + bool copyData=true); + //! builds matrix from a 2D point + template explicit Mat(const Point_<_Tp>& pt, bool copyData=true); + //! builds matrix from a 3D point + template explicit Mat(const Point3_<_Tp>& pt, bool copyData=true); + //! builds matrix from comma initializer + template explicit Mat(const MatCommaInitializer_<_Tp>& commaInitializer); + //! destructor - calls release() + ~Mat(); + //! assignment operators + Mat& operator = (const Mat& m); + Mat& operator = (const MatExpr& expr); + + //! returns a new matrix header for the specified row + Mat row(int y) const; + //! returns a new matrix header for the specified column + Mat col(int x) const; + //! ... for the specified row span + Mat rowRange(int startrow, int endrow) const; + Mat rowRange(const Range& r) const; + //! ... for the specified column span + Mat colRange(int startcol, int endcol) const; + Mat colRange(const Range& r) const; + //! ... for the specified diagonal + // (d=0 - the main diagonal, + // >0 - a diagonal from the lower half, + // <0 - a diagonal from the upper half) + Mat diag(int d=0) const; + //! constructs a square diagonal matrix which main diagonal is vector "d" + static Mat diag(const Mat& d); + + //! returns deep copy of the matrix, i.e. the data is copied + Mat clone() const; + //! copies the matrix content to "m". + // It calls m.create(this->size(), this->type()). + void copyTo( OutputArray m ) const; + //! copies those matrix elements to "m" that are marked with non-zero mask elements. + void copyTo( OutputArray m, InputArray mask ) const; + //! converts matrix to another datatype with optional scalng. See cvConvertScale. + void convertTo( OutputArray m, int rtype, double alpha=1, double beta=0 ) const; + + void assignTo( Mat& m, int type=-1 ) const; + + //! sets every matrix element to s + Mat& operator = (const Scalar& s); + //! sets some of the matrix elements to s, according to the mask + Mat& setTo(InputArray value, InputArray mask=noArray()); + //! creates alternative matrix header for the same data, with different + // number of channels and/or different number of rows. see cvReshape. + Mat reshape(int _cn, int _rows=0) const; + Mat reshape(int _cn, int _newndims, const int* _newsz) const; + + //! matrix transposition by means of matrix expressions + MatExpr t() const; + //! matrix inversion by means of matrix expressions + MatExpr inv(int method=DECOMP_LU) const; + //! per-element matrix multiplication by means of matrix expressions + MatExpr mul(InputArray m, double scale=1) const; + + //! computes cross-product of 2 3D vectors + Mat cross(InputArray m) const; + //! computes dot-product + double dot(InputArray m) const; + + //! Matlab-style matrix initialization + static MatExpr zeros(int rows, int cols, int type); + static MatExpr zeros(Size size, int type); + static MatExpr zeros(int ndims, const int* sz, int type); + static MatExpr ones(int rows, int cols, int type); + static MatExpr ones(Size size, int type); + static MatExpr ones(int ndims, const int* sz, int type); + static MatExpr eye(int rows, int cols, int type); + static MatExpr eye(Size size, int type); + + //! allocates new matrix data unless the matrix already has specified size and type. + // previous data is unreferenced if needed. + void create(int _rows, int _cols, int _type); + void create(Size _size, int _type); + void create(int _ndims, const int* _sizes, int _type); + + //! increases the reference counter; use with care to avoid memleaks + void addref(); + //! decreases reference counter; + // deallocates the data when reference counter reaches 0. + void release(); + + //! deallocates the matrix data + void deallocate(); + //! internal use function; properly re-allocates _size, _step arrays + void copySize(const Mat& m); + + //! reserves enough space to fit sz hyper-planes + void reserve(size_t sz); + //! resizes matrix to the specified number of hyper-planes + void resize(size_t sz); + //! resizes matrix to the specified number of hyper-planes; initializes the newly added elements + void resize(size_t sz, const Scalar& s); + //! internal function + void push_back_(const void* elem); + //! adds element to the end of 1d matrix (or possibly multiple elements when _Tp=Mat) + template void push_back(const _Tp& elem); + template void push_back(const Mat_<_Tp>& elem); + void push_back(const Mat& m); + //! removes several hyper-planes from bottom of the matrix + void pop_back(size_t nelems=1); + + //! locates matrix header within a parent matrix. See below + void locateROI( Size& wholeSize, Point& ofs ) const; + //! moves/resizes the current matrix ROI inside the parent matrix. + Mat& adjustROI( int dtop, int dbottom, int dleft, int dright ); + //! extracts a rectangular sub-matrix + // (this is a generalized form of row, rowRange etc.) + Mat operator()( Range rowRange, Range colRange ) const; + Mat operator()( const Rect& roi ) const; + Mat operator()( const Range* ranges ) const; + + //! converts header to CvMat; no data is copied + operator CvMat() const; + //! converts header to CvMatND; no data is copied + operator CvMatND() const; + //! converts header to IplImage; no data is copied + operator IplImage() const; + + template operator vector<_Tp>() const; + template operator Vec<_Tp, n>() const; + template operator Matx<_Tp, m, n>() const; + + //! returns true iff the matrix data is continuous + // (i.e. when there are no gaps between successive rows). + // similar to CV_IS_MAT_CONT(cvmat->type) + bool isContinuous() const; + + //! returns true if the matrix is a submatrix of another matrix + bool isSubmatrix() const; + + //! returns element size in bytes, + // similar to CV_ELEM_SIZE(cvmat->type) + size_t elemSize() const; + //! returns the size of element channel in bytes. + size_t elemSize1() const; + //! returns element type, similar to CV_MAT_TYPE(cvmat->type) + int type() const; + //! returns element type, similar to CV_MAT_DEPTH(cvmat->type) + int depth() const; + //! returns element type, similar to CV_MAT_CN(cvmat->type) + int channels() const; + //! returns step/elemSize1() + size_t step1(int i=0) const; + //! returns true if matrix data is NULL + bool empty() const; + //! returns the total number of matrix elements + size_t total() const; + + //! returns N if the matrix is 1-channel (N x ptdim) or ptdim-channel (1 x N) or (N x 1); negative number otherwise + int checkVector(int elemChannels, int depth=-1, bool requireContinuous=true) const; + + //! returns pointer to i0-th submatrix along the dimension #0 + uchar* ptr(int i0=0); + const uchar* ptr(int i0=0) const; + + //! returns pointer to (i0,i1) submatrix along the dimensions #0 and #1 + uchar* ptr(int i0, int i1); + const uchar* ptr(int i0, int i1) const; + + //! returns pointer to (i0,i1,i3) submatrix along the dimensions #0, #1, #2 + uchar* ptr(int i0, int i1, int i2); + const uchar* ptr(int i0, int i1, int i2) const; + + //! returns pointer to the matrix element + uchar* ptr(const int* idx); + //! returns read-only pointer to the matrix element + const uchar* ptr(const int* idx) const; + + template uchar* ptr(const Vec& idx); + template const uchar* ptr(const Vec& idx) const; + + //! template version of the above method + template _Tp* ptr(int i0=0); + template const _Tp* ptr(int i0=0) const; + + template _Tp* ptr(int i0, int i1); + template const _Tp* ptr(int i0, int i1) const; + + template _Tp* ptr(int i0, int i1, int i2); + template const _Tp* ptr(int i0, int i1, int i2) const; + + template _Tp* ptr(const int* idx); + template const _Tp* ptr(const int* idx) const; + + template _Tp* ptr(const Vec& idx); + template const _Tp* ptr(const Vec& idx) const; + + //! the same as above, with the pointer dereferencing + template _Tp& at(int i0=0); + template const _Tp& at(int i0=0) const; + + template _Tp& at(int i0, int i1); + template const _Tp& at(int i0, int i1) const; + + template _Tp& at(int i0, int i1, int i2); + template const _Tp& at(int i0, int i1, int i2) const; + + template _Tp& at(const int* idx); + template const _Tp& at(const int* idx) const; + + template _Tp& at(const Vec& idx); + template const _Tp& at(const Vec& idx) const; + + //! special versions for 2D arrays (especially convenient for referencing image pixels) + template _Tp& at(Point pt); + template const _Tp& at(Point pt) const; + + //! template methods for iteration over matrix elements. + // the iterators take care of skipping gaps in the end of rows (if any) + template MatIterator_<_Tp> begin(); + template MatIterator_<_Tp> end(); + template MatConstIterator_<_Tp> begin() const; + template MatConstIterator_<_Tp> end() const; + + enum { MAGIC_VAL=0x42FF0000, AUTO_STEP=0, CONTINUOUS_FLAG=CV_MAT_CONT_FLAG, SUBMATRIX_FLAG=CV_SUBMAT_FLAG }; + + /*! includes several bit-fields: + - the magic signature + - continuity flag + - depth + - number of channels + */ + int flags; + //! the matrix dimensionality, >= 2 + int dims; + //! the number of rows and columns or (-1, -1) when the matrix has more than 2 dimensions + int rows, cols; + //! pointer to the data + uchar* data; + + //! pointer to the reference counter; + // when matrix points to user-allocated data, the pointer is NULL + int* refcount; + + //! helper fields used in locateROI and adjustROI + uchar* datastart; + uchar* dataend; + uchar* datalimit; + + //! custom allocator + MatAllocator* allocator; + + struct CV_EXPORTS MSize + { + MSize(int* _p); + Size operator()() const; + const int& operator[](int i) const; + int& operator[](int i); + operator const int*() const; + bool operator == (const MSize& sz) const; + bool operator != (const MSize& sz) const; + + int* p; + }; + + struct CV_EXPORTS MStep + { + MStep(); + MStep(size_t s); + const size_t& operator[](int i) const; + size_t& operator[](int i); + operator size_t() const; + MStep& operator = (size_t s); + + size_t* p; + size_t buf[2]; + protected: + MStep& operator = (const MStep&); + }; + + MSize size; + MStep step; +}; + + +/*! + Random Number Generator + + The class implements RNG using Multiply-with-Carry algorithm +*/ +class CV_EXPORTS RNG +{ +public: + enum { UNIFORM=0, NORMAL=1 }; + + RNG(); + RNG(uint64 _state); + //! updates the state and returns the next 32-bit unsigned integer random number + unsigned next(); + + operator uchar(); + operator schar(); + operator ushort(); + operator short(); + operator unsigned(); + //! returns a random integer sampled uniformly from [0, N). + unsigned operator()(unsigned N); + unsigned operator ()(); + operator int(); + operator float(); + operator double(); + //! returns uniformly distributed integer random number from [a,b) range + int uniform(int a, int b); + //! returns uniformly distributed floating-point random number from [a,b) range + float uniform(float a, float b); + //! returns uniformly distributed double-precision floating-point random number from [a,b) range + double uniform(double a, double b); + void fill( InputOutputArray mat, int distType, InputArray a, InputArray b ); + //! returns Gaussian random variate with mean zero. + double gaussian(double sigma); + + uint64 state; +}; + + +/*! + Termination criteria in iterative algorithms + */ +class CV_EXPORTS TermCriteria +{ +public: + enum + { + COUNT=1, //!< the maximum number of iterations or elements to compute + MAX_ITER=COUNT, //!< ditto + EPS=2 //!< the desired accuracy or change in parameters at which the iterative algorithm stops + }; + + //! default constructor + TermCriteria(); + //! full constructor + TermCriteria(int _type, int _maxCount, double _epsilon); + //! conversion from CvTermCriteria + TermCriteria(const CvTermCriteria& criteria); + //! conversion from CvTermCriteria + operator CvTermCriteria() const; + + int type; //!< the type of termination criteria: COUNT, EPS or COUNT + EPS + int maxCount; // the maximum number of iterations/elements + double epsilon; // the desired accuracy +}; + + +//! swaps two matrices +CV_EXPORTS void swap(Mat& a, Mat& b); + +//! converts array (CvMat or IplImage) to cv::Mat +CV_EXPORTS Mat cvarrToMat(const CvArr* arr, bool copyData=false, + bool allowND=true, int coiMode=0); +//! extracts Channel of Interest from CvMat or IplImage and makes cv::Mat out of it. +CV_EXPORTS void extractImageCOI(const CvArr* arr, OutputArray coiimg, int coi=-1); +//! inserts single-channel cv::Mat into a multi-channel CvMat or IplImage +CV_EXPORTS void insertImageCOI(InputArray coiimg, CvArr* arr, int coi=-1); + +//! adds one matrix to another (dst = src1 + src2) +CV_EXPORTS_W void add(InputArray src1, InputArray src2, OutputArray dst, + InputArray mask=noArray(), int dtype=-1); +//! subtracts one matrix from another (dst = src1 - src2) +CV_EXPORTS_W void subtract(InputArray src1, InputArray src2, OutputArray dst, + InputArray mask=noArray(), int dtype=-1); + +//! computes element-wise weighted product of the two arrays (dst = scale*src1*src2) +CV_EXPORTS_W void multiply(InputArray src1, InputArray src2, + OutputArray dst, double scale=1, int dtype=-1); + +//! computes element-wise weighted quotient of the two arrays (dst = scale*src1/src2) +CV_EXPORTS_W void divide(InputArray src1, InputArray src2, OutputArray dst, + double scale=1, int dtype=-1); + +//! computes element-wise weighted reciprocal of an array (dst = scale/src2) +CV_EXPORTS_W void divide(double scale, InputArray src2, + OutputArray dst, int dtype=-1); + +//! adds scaled array to another one (dst = alpha*src1 + src2) +CV_EXPORTS_W void scaleAdd(InputArray src1, double alpha, InputArray src2, OutputArray dst); + +//! computes weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma) +CV_EXPORTS_W void addWeighted(InputArray src1, double alpha, InputArray src2, + double beta, double gamma, OutputArray dst, int dtype=-1); + +//! scales array elements, computes absolute values and converts the results to 8-bit unsigned integers: dst(i)=saturate_castabs(src(i)*alpha+beta) +CV_EXPORTS_W void convertScaleAbs(InputArray src, OutputArray dst, + double alpha=1, double beta=0); +//! transforms array of numbers using a lookup table: dst(i)=lut(src(i)) +CV_EXPORTS_W void LUT(InputArray src, InputArray lut, OutputArray dst, + int interpolation=0); + +//! computes sum of array elements +CV_EXPORTS_AS(sumElems) Scalar sum(InputArray src); +//! computes the number of nonzero array elements +CV_EXPORTS_W int countNonZero( InputArray src ); +//! computes mean value of selected array elements +CV_EXPORTS_W Scalar mean(InputArray src, InputArray mask=noArray()); +//! computes mean value and standard deviation of all or selected array elements +CV_EXPORTS_W void meanStdDev(InputArray src, OutputArray mean, OutputArray stddev, + InputArray mask=noArray()); +//! computes norm of the selected array part +CV_EXPORTS_W double norm(InputArray src1, int normType=NORM_L2, InputArray mask=noArray()); +//! computes norm of selected part of the difference between two arrays +CV_EXPORTS_W double norm(InputArray src1, InputArray src2, + int normType=NORM_L2, InputArray mask=noArray()); +//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values +CV_EXPORTS_W void normalize( InputArray src, OutputArray dst, double alpha=1, double beta=0, + int norm_type=NORM_L2, int dtype=-1, InputArray mask=noArray()); + +//! finds global minimum and maximum array elements and returns their values and their locations +CV_EXPORTS_W void minMaxLoc(InputArray src, CV_OUT double* minVal, + CV_OUT double* maxVal=0, CV_OUT Point* minLoc=0, + CV_OUT Point* maxLoc=0, InputArray mask=noArray()); +CV_EXPORTS void minMaxIdx(InputArray src, double* minVal, double* maxVal, + int* minIdx=0, int* maxIdx=0, InputArray mask=noArray()); + +//! transforms 2D matrix to 1D row or column vector by taking sum, minimum, maximum or mean value over all the rows +CV_EXPORTS_W void reduce(InputArray src, OutputArray dst, int dim, int rtype, int dtype=-1); + +//! makes multi-channel array out of several single-channel arrays +CV_EXPORTS void merge(const Mat* mv, size_t count, OutputArray dst); +//! makes multi-channel array out of several single-channel arrays +CV_EXPORTS_W void merge(const vector& mv, OutputArray dst); + +//! copies each plane of a multi-channel array to a dedicated array +CV_EXPORTS void split(const Mat& src, Mat* mvbegin); +//! copies each plane of a multi-channel array to a dedicated array +CV_EXPORTS_W void split(const Mat& m, CV_OUT vector& mv); + +//! copies selected channels from the input arrays to the selected channels of the output arrays +CV_EXPORTS void mixChannels(const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts, + const int* fromTo, size_t npairs); +CV_EXPORTS void mixChannels(const vector& src, vector& dst, + const int* fromTo, size_t npairs); +CV_EXPORTS_W void mixChannels(InputArrayOfArrays src, InputArrayOfArrays dst, + const vector& fromTo); + +//! extracts a single channel from src (coi is 0-based index) +CV_EXPORTS_W void extractChannel(InputArray src, OutputArray dst, int coi); + +//! inserts a single channel to dst (coi is 0-based index) +CV_EXPORTS_W void insertChannel(InputArray src, InputOutputArray dst, int coi); + +//! reverses the order of the rows, columns or both in a matrix +CV_EXPORTS_W void flip(InputArray src, OutputArray dst, int flipCode); + +//! replicates the input matrix the specified number of times in the horizontal and/or vertical direction +CV_EXPORTS_W void repeat(InputArray src, int ny, int nx, OutputArray dst); +CV_EXPORTS Mat repeat(const Mat& src, int ny, int nx); + +CV_EXPORTS void hconcat(const Mat* src, size_t nsrc, OutputArray dst); +CV_EXPORTS void hconcat(InputArray src1, InputArray src2, OutputArray dst); +CV_EXPORTS_W void hconcat(InputArray src, OutputArray dst); + +CV_EXPORTS void vconcat(const Mat* src, size_t nsrc, OutputArray dst); +CV_EXPORTS void vconcat(InputArray src1, InputArray src2, OutputArray dst); +CV_EXPORTS_W void vconcat(InputArray src, OutputArray dst); + +//! computes bitwise conjunction of the two arrays (dst = src1 & src2) +CV_EXPORTS_W void bitwise_and(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! computes bitwise disjunction of the two arrays (dst = src1 | src2) +CV_EXPORTS_W void bitwise_or(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! computes bitwise exclusive-or of the two arrays (dst = src1 ^ src2) +CV_EXPORTS_W void bitwise_xor(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! inverts each bit of array (dst = ~src) +CV_EXPORTS_W void bitwise_not(InputArray src, OutputArray dst, + InputArray mask=noArray()); +//! computes element-wise absolute difference of two arrays (dst = abs(src1 - src2)) +CV_EXPORTS_W void absdiff(InputArray src1, InputArray src2, OutputArray dst); +//! set mask elements for those array elements which are within the element-specific bounding box (dst = lowerb <= src && src < upperb) +CV_EXPORTS_W void inRange(InputArray src, InputArray lowerb, + InputArray upperb, OutputArray dst); +//! compares elements of two arrays (dst = src1 src2) +CV_EXPORTS_W void compare(InputArray src1, InputArray src2, OutputArray dst, int cmpop); +//! computes per-element minimum of two arrays (dst = min(src1, src2)) +CV_EXPORTS_W void min(InputArray src1, InputArray src2, OutputArray dst); +//! computes per-element maximum of two arrays (dst = max(src1, src2)) +CV_EXPORTS_W void max(InputArray src1, InputArray src2, OutputArray dst); + +//! computes per-element minimum of two arrays (dst = min(src1, src2)) +CV_EXPORTS void min(const Mat& src1, const Mat& src2, Mat& dst); +//! computes per-element minimum of array and scalar (dst = min(src1, src2)) +CV_EXPORTS void min(const Mat& src1, double src2, Mat& dst); +//! computes per-element maximum of two arrays (dst = max(src1, src2)) +CV_EXPORTS void max(const Mat& src1, const Mat& src2, Mat& dst); +//! computes per-element maximum of array and scalar (dst = max(src1, src2)) +CV_EXPORTS void max(const Mat& src1, double src2, Mat& dst); + +//! computes square root of each matrix element (dst = src**0.5) +CV_EXPORTS_W void sqrt(InputArray src, OutputArray dst); +//! raises the input matrix elements to the specified power (b = a**power) +CV_EXPORTS_W void pow(InputArray src, double power, OutputArray dst); +//! computes exponent of each matrix element (dst = e**src) +CV_EXPORTS_W void exp(InputArray src, OutputArray dst); +//! computes natural logarithm of absolute value of each matrix element: dst = log(abs(src)) +CV_EXPORTS_W void log(InputArray src, OutputArray dst); +//! computes cube root of the argument +CV_EXPORTS_W float cubeRoot(float val); +//! computes the angle in degrees (0..360) of the vector (x,y) +CV_EXPORTS_W float fastAtan2(float y, float x); +//! converts polar coordinates to Cartesian +CV_EXPORTS_W void polarToCart(InputArray magnitude, InputArray angle, + OutputArray x, OutputArray y, bool angleInDegrees=false); +//! converts Cartesian coordinates to polar +CV_EXPORTS_W void cartToPolar(InputArray x, InputArray y, + OutputArray magnitude, OutputArray angle, + bool angleInDegrees=false); +//! computes angle (angle(i)) of each (x(i), y(i)) vector +CV_EXPORTS_W void phase(InputArray x, InputArray y, OutputArray angle, + bool angleInDegrees=false); +//! computes magnitude (magnitude(i)) of each (x(i), y(i)) vector +CV_EXPORTS_W void magnitude(InputArray x, InputArray y, OutputArray magnitude); +//! checks that each matrix element is within the specified range. +CV_EXPORTS_W bool checkRange(InputArray a, bool quiet=true, CV_OUT Point* pt=0, + double minVal=-DBL_MAX, double maxVal=DBL_MAX); +//! implements generalized matrix product algorithm GEMM from BLAS +CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha, + InputArray src3, double gamma, OutputArray dst, int flags=0); +//! multiplies matrix by its transposition from the left or from the right +CV_EXPORTS_W void mulTransposed( InputArray src, OutputArray dst, bool aTa, + InputArray delta=noArray(), + double scale=1, int dtype=-1 ); +//! transposes the matrix +CV_EXPORTS_W void transpose(InputArray src, OutputArray dst); +//! performs affine transformation of each element of multi-channel input matrix +CV_EXPORTS_W void transform(InputArray src, OutputArray dst, InputArray m ); +//! performs perspective transformation of each element of multi-channel input matrix +CV_EXPORTS_W void perspectiveTransform(InputArray src, OutputArray dst, InputArray m ); + +//! extends the symmetrical matrix from the lower half or from the upper half +CV_EXPORTS_W void completeSymm(InputOutputArray mtx, bool lowerToUpper=false); +//! initializes scaled identity matrix +CV_EXPORTS_W void setIdentity(InputOutputArray mtx, const Scalar& s=Scalar(1)); +//! computes determinant of a square matrix +CV_EXPORTS_W double determinant(InputArray mtx); +//! computes trace of a matrix +CV_EXPORTS_W Scalar trace(InputArray mtx); +//! computes inverse or pseudo-inverse matrix +CV_EXPORTS_W double invert(InputArray src, OutputArray dst, int flags=DECOMP_LU); +//! solves linear system or a least-square problem +CV_EXPORTS_W bool solve(InputArray src1, InputArray src2, + OutputArray dst, int flags=DECOMP_LU); + +enum +{ + SORT_EVERY_ROW=0, + SORT_EVERY_COLUMN=1, + SORT_ASCENDING=0, + SORT_DESCENDING=16 +}; + +//! sorts independently each matrix row or each matrix column +CV_EXPORTS_W void sort(InputArray src, OutputArray dst, int flags); +//! sorts independently each matrix row or each matrix column +CV_EXPORTS_W void sortIdx(InputArray src, OutputArray dst, int flags); +//! finds real roots of a cubic polynomial +CV_EXPORTS_W int solveCubic(InputArray coeffs, OutputArray roots); +//! finds real and complex roots of a polynomial +CV_EXPORTS_W double solvePoly(InputArray coeffs, OutputArray roots, int maxIters=300); +//! finds eigenvalues of a symmetric matrix +CV_EXPORTS bool eigen(InputArray src, OutputArray eigenvalues, int lowindex=-1, + int highindex=-1); +//! finds eigenvalues and eigenvectors of a symmetric matrix +CV_EXPORTS bool eigen(InputArray src, OutputArray eigenvalues, + OutputArray eigenvectors, + int lowindex=-1, int highindex=-1); +CV_EXPORTS_W bool eigen(InputArray src, bool computeEigenvectors, + OutputArray eigenvalues, OutputArray eigenvectors); + +enum +{ + COVAR_SCRAMBLED=0, + COVAR_NORMAL=1, + COVAR_USE_AVG=2, + COVAR_SCALE=4, + COVAR_ROWS=8, + COVAR_COLS=16 +}; + +//! computes covariation matrix of a set of samples +CV_EXPORTS void calcCovarMatrix( const Mat* samples, int nsamples, Mat& covar, Mat& mean, + int flags, int ctype=CV_64F); +//! computes covariation matrix of a set of samples +CV_EXPORTS_W void calcCovarMatrix( InputArray samples, OutputArray covar, + OutputArray mean, int flags, int ctype=CV_64F); + +/*! + Principal Component Analysis + + The class PCA is used to compute the special basis for a set of vectors. + The basis will consist of eigenvectors of the covariance matrix computed + from the input set of vectors. After PCA is performed, vectors can be transformed from + the original high-dimensional space to the subspace formed by a few most + prominent eigenvectors (called the principal components), + corresponding to the largest eigenvalues of the covariation matrix. + Thus the dimensionality of the vector and the correlation between the coordinates is reduced. + + The following sample is the function that takes two matrices. The first one stores the set + of vectors (a row per vector) that is used to compute PCA, the second one stores another + "test" set of vectors (a row per vector) that are first compressed with PCA, + then reconstructed back and then the reconstruction error norm is computed and printed for each vector. + + \code + using namespace cv; + + PCA compressPCA(const Mat& pcaset, int maxComponents, + const Mat& testset, Mat& compressed) + { + PCA pca(pcaset, // pass the data + Mat(), // we do not have a pre-computed mean vector, + // so let the PCA engine to compute it + CV_PCA_DATA_AS_ROW, // indicate that the vectors + // are stored as matrix rows + // (use CV_PCA_DATA_AS_COL if the vectors are + // the matrix columns) + maxComponents // specify, how many principal components to retain + ); + // if there is no test data, just return the computed basis, ready-to-use + if( !testset.data ) + return pca; + CV_Assert( testset.cols == pcaset.cols ); + + compressed.create(testset.rows, maxComponents, testset.type()); + + Mat reconstructed; + for( int i = 0; i < testset.rows; i++ ) + { + Mat vec = testset.row(i), coeffs = compressed.row(i), reconstructed; + // compress the vector, the result will be stored + // in the i-th row of the output matrix + pca.project(vec, coeffs); + // and then reconstruct it + pca.backProject(coeffs, reconstructed); + // and measure the error + printf("%d. diff = %g\n", i, norm(vec, reconstructed, NORM_L2)); + } + return pca; + } + \endcode +*/ +class CV_EXPORTS PCA +{ +public: + //! default constructor + PCA(); + //! the constructor that performs PCA + PCA(InputArray data, InputArray mean, int flags, int maxComponents=0); + //! operator that performs PCA. The previously stored data, if any, is released + PCA& operator()(InputArray data, InputArray mean, int flags, int maxComponents=0); + //! projects vector from the original space to the principal components subspace + Mat project(InputArray vec) const; + //! projects vector from the original space to the principal components subspace + void project(InputArray vec, OutputArray result) const; + //! reconstructs the original vector from the projection + Mat backProject(InputArray vec) const; + //! reconstructs the original vector from the projection + void backProject(InputArray vec, OutputArray result) const; + + Mat eigenvectors; //!< eigenvectors of the covariation matrix + Mat eigenvalues; //!< eigenvalues of the covariation matrix + Mat mean; //!< mean value subtracted before the projection and added after the back projection +}; + +CV_EXPORTS_W void PCACompute(InputArray data, CV_OUT InputOutputArray mean, + OutputArray eigenvectors, int maxComponents=0); + +CV_EXPORTS_W void PCAProject(InputArray data, InputArray mean, + InputArray eigenvectors, OutputArray result); + +CV_EXPORTS_W void PCABackProject(InputArray data, InputArray mean, + InputArray eigenvectors, OutputArray result); + + +/*! + Singular Value Decomposition class + + The class is used to compute Singular Value Decomposition of a floating-point matrix and then + use it to solve least-square problems, under-determined linear systems, invert matrices, + compute condition numbers etc. + + For a bit faster operation you can pass flags=SVD::MODIFY_A|... to modify the decomposed matrix + when it is not necessarily to preserve it. If you want to compute condition number of a matrix + or absolute value of its determinant - you do not need SVD::u or SVD::vt, + so you can pass flags=SVD::NO_UV|... . Another flag SVD::FULL_UV indicates that the full-size SVD::u and SVD::vt + must be computed, which is not necessary most of the time. +*/ +class CV_EXPORTS SVD +{ +public: + enum { MODIFY_A=1, NO_UV=2, FULL_UV=4 }; + //! the default constructor + SVD(); + //! the constructor that performs SVD + SVD( InputArray src, int flags=0 ); + //! the operator that performs SVD. The previously allocated SVD::u, SVD::w are SVD::vt are released. + SVD& operator ()( InputArray src, int flags=0 ); + + //! decomposes matrix and stores the results to user-provided matrices + static void compute( InputArray src, OutputArray w, + OutputArray u, OutputArray vt, int flags=0 ); + //! computes singular values of a matrix + static void compute( InputArray src, OutputArray w, int flags=0 ); + //! performs back substitution + static void backSubst( InputArray w, InputArray u, + InputArray vt, InputArray rhs, + OutputArray dst ); + + template static void compute( const Matx<_Tp, m, n>& a, + Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt ); + template static void compute( const Matx<_Tp, m, n>& a, + Matx<_Tp, nm, 1>& w ); + template static void backSubst( const Matx<_Tp, nm, 1>& w, + const Matx<_Tp, m, nm>& u, const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, Matx<_Tp, n, nb>& dst ); + + //! finds dst = arg min_{|dst|=1} |m*dst| + static void solveZ( InputArray src, OutputArray dst ); + //! performs back substitution, so that dst is the solution or pseudo-solution of m*dst = rhs, where m is the decomposed matrix + void backSubst( InputArray rhs, OutputArray dst ) const; + + Mat u, w, vt; +}; + +//! computes SVD of src +CV_EXPORTS_W void SVDecomp( InputArray src, CV_OUT OutputArray w, + CV_OUT OutputArray u, CV_OUT OutputArray vt, int flags=0 ); + +//! performs back substitution for the previously computed SVD +CV_EXPORTS_W void SVBackSubst( InputArray w, InputArray u, InputArray vt, + InputArray rhs, CV_OUT OutputArray dst ); + +//! computes Mahalanobis distance between two vectors: sqrt((v1-v2)'*icovar*(v1-v2)), where icovar is the inverse covariation matrix +CV_EXPORTS_W double Mahalanobis(InputArray v1, InputArray v2, InputArray icovar); +//! a synonym for Mahalanobis +CV_EXPORTS double Mahalonobis(InputArray v1, InputArray v2, InputArray icovar); + +//! performs forward or inverse 1D or 2D Discrete Fourier Transformation +CV_EXPORTS_W void dft(InputArray src, OutputArray dst, int flags=0, int nonzeroRows=0); +//! performs inverse 1D or 2D Discrete Fourier Transformation +CV_EXPORTS_W void idft(InputArray src, OutputArray dst, int flags=0, int nonzeroRows=0); +//! performs forward or inverse 1D or 2D Discrete Cosine Transformation +CV_EXPORTS_W void dct(InputArray src, OutputArray dst, int flags=0); +//! performs inverse 1D or 2D Discrete Cosine Transformation +CV_EXPORTS_W void idct(InputArray src, OutputArray dst, int flags=0); +//! computes element-wise product of the two Fourier spectrums. The second spectrum can optionally be conjugated before the multiplication +CV_EXPORTS_W void mulSpectrums(InputArray a, InputArray b, OutputArray c, + int flags, bool conjB=false); +//! computes the minimal vector size vecsize1 >= vecsize so that the dft() of the vector of length vecsize1 can be computed efficiently +CV_EXPORTS_W int getOptimalDFTSize(int vecsize); + +/*! + Various k-Means flags +*/ +enum +{ + KMEANS_RANDOM_CENTERS=0, // Chooses random centers for k-Means initialization + KMEANS_PP_CENTERS=2, // Uses k-Means++ algorithm for initialization + KMEANS_USE_INITIAL_LABELS=1 // Uses the user-provided labels for K-Means initialization +}; +//! clusters the input data using k-Means algorithm +CV_EXPORTS_W double kmeans( InputArray data, int K, CV_OUT InputOutputArray bestLabels, + TermCriteria criteria, int attempts, + int flags, OutputArray centers=noArray() ); + +//! returns the thread-local Random number generator +CV_EXPORTS RNG& theRNG(); + +//! returns the next unifomly-distributed random number of the specified type +template static inline _Tp randu() { return (_Tp)theRNG(); } + +//! fills array with uniformly-distributed random numbers from the range [low, high) +CV_EXPORTS_W void randu(InputOutputArray dst, InputArray low, InputArray high); + +//! fills array with normally-distributed random numbers with the specified mean and the standard deviation +CV_EXPORTS_W void randn(InputOutputArray dst, InputArray mean, InputArray stddev); + +//! shuffles the input array elements +CV_EXPORTS void randShuffle(InputOutputArray dst, double iterFactor=1., RNG* rng=0); +CV_EXPORTS_AS(randShuffle) void randShuffle_(InputOutputArray dst, double iterFactor=1.); + +//! draws the line segment (pt1, pt2) in the image +CV_EXPORTS_W void line(Mat& img, Point pt1, Point pt2, const Scalar& color, + int thickness=1, int lineType=8, int shift=0); + +//! draws the rectangle outline or a solid rectangle with the opposite corners pt1 and pt2 in the image +CV_EXPORTS_W void rectangle(Mat& img, Point pt1, Point pt2, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws the rectangle outline or a solid rectangle covering rec in the image +CV_EXPORTS void rectangle(Mat& img, Rect rec, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws the circle outline or a solid circle in the image +CV_EXPORTS_W void circle(Mat& img, Point center, int radius, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws an elliptic arc, ellipse sector or a rotated ellipse in the image +CV_EXPORTS_W void ellipse(Mat& img, Point center, Size axes, + double angle, double startAngle, double endAngle, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws a rotated ellipse in the image +CV_EXPORTS_W void ellipse(Mat& img, const RotatedRect& box, const Scalar& color, + int thickness=1, int lineType=8); + +//! draws a filled convex polygon in the image +CV_EXPORTS void fillConvexPoly(Mat& img, const Point* pts, int npts, + const Scalar& color, int lineType=8, + int shift=0); +CV_EXPORTS_W void fillConvexPoly(InputOutputArray img, InputArray points, + const Scalar& color, int lineType=8, + int shift=0); + +//! fills an area bounded by one or more polygons +CV_EXPORTS void fillPoly(Mat& img, const Point** pts, + const int* npts, int ncontours, + const Scalar& color, int lineType=8, int shift=0, + Point offset=Point() ); + +CV_EXPORTS_W void fillPoly(InputOutputArray img, InputArrayOfArrays pts, + const Scalar& color, int lineType=8, int shift=0, + Point offset=Point() ); + +//! draws one or more polygonal curves +CV_EXPORTS void polylines(Mat& img, const Point** pts, const int* npts, + int ncontours, bool isClosed, const Scalar& color, + int thickness=1, int lineType=8, int shift=0 ); + +CV_EXPORTS_W void polylines(InputOutputArray img, InputArrayOfArrays pts, + bool isClosed, const Scalar& color, + int thickness=1, int lineType=8, int shift=0 ); + +//! clips the line segment by the rectangle Rect(0, 0, imgSize.width, imgSize.height) +CV_EXPORTS bool clipLine(Size imgSize, CV_IN_OUT Point& pt1, CV_IN_OUT Point& pt2); + +//! clips the line segment by the rectangle imgRect +CV_EXPORTS_W bool clipLine(Rect imgRect, CV_OUT CV_IN_OUT Point& pt1, CV_OUT CV_IN_OUT Point& pt2); + +/*! + Line iterator class + + The class is used to iterate over all the pixels on the raster line + segment connecting two specified points. +*/ +class CV_EXPORTS LineIterator +{ +public: + //! intializes the iterator + LineIterator( const Mat& img, Point pt1, Point pt2, + int connectivity=8, bool leftToRight=false ); + //! returns pointer to the current pixel + uchar* operator *(); + //! prefix increment operator (++it). shifts iterator to the next pixel + LineIterator& operator ++(); + //! postfix increment operator (it++). shifts iterator to the next pixel + LineIterator operator ++(int); + //! returns coordinates of the current pixel + Point pos() const; + + uchar* ptr; + const uchar* ptr0; + int step, elemSize; + int err, count; + int minusDelta, plusDelta; + int minusStep, plusStep; +}; + +//! converts elliptic arc to a polygonal curve +CV_EXPORTS_W void ellipse2Poly( Point center, Size axes, int angle, + int arcStart, int arcEnd, int delta, + CV_OUT vector& pts ); + +enum +{ + FONT_HERSHEY_SIMPLEX = 0, + FONT_HERSHEY_PLAIN = 1, + FONT_HERSHEY_DUPLEX = 2, + FONT_HERSHEY_COMPLEX = 3, + FONT_HERSHEY_TRIPLEX = 4, + FONT_HERSHEY_COMPLEX_SMALL = 5, + FONT_HERSHEY_SCRIPT_SIMPLEX = 6, + FONT_HERSHEY_SCRIPT_COMPLEX = 7, + FONT_ITALIC = 16 +}; + +//! renders text string in the image +CV_EXPORTS_W void putText( Mat& img, const string& text, Point org, + int fontFace, double fontScale, Scalar color, + int thickness=1, int linetype=8, + bool bottomLeftOrigin=false ); + +//! returns bounding box of the text string +CV_EXPORTS_W Size getTextSize(const string& text, int fontFace, + double fontScale, int thickness, + CV_OUT int* baseLine); + +///////////////////////////////// Mat_<_Tp> //////////////////////////////////// + +/*! + Template matrix class derived from Mat + + The class Mat_ is a "thin" template wrapper on top of cv::Mat. It does not have any extra data fields, + nor it or cv::Mat have any virtual methods and thus references or pointers to these two classes + can be safely converted one to another. But do it with care, for example: + + \code + // create 100x100 8-bit matrix + Mat M(100,100,CV_8U); + // this will compile fine. no any data conversion will be done. + Mat_& M1 = (Mat_&)M; + // the program will likely crash at the statement below + M1(99,99) = 1.f; + \endcode + + While cv::Mat is sufficient in most cases, cv::Mat_ can be more convenient if you use a lot of element + access operations and if you know matrix type at compile time. + Note that cv::Mat::at<_Tp>(int y, int x) and cv::Mat_<_Tp>::operator ()(int y, int x) do absolutely the + same thing and run at the same speed, but the latter is certainly shorter: + + \code + Mat_ M(20,20); + for(int i = 0; i < M.rows; i++) + for(int j = 0; j < M.cols; j++) + M(i,j) = 1./(i+j+1); + Mat E, V; + eigen(M,E,V); + cout << E.at(0,0)/E.at(M.rows-1,0); + \endcode + + It is easy to use Mat_ for multi-channel images/matrices - just pass cv::Vec as cv::Mat_ template parameter: + + \code + // allocate 320x240 color image and fill it with green (in RGB space) + Mat_ img(240, 320, Vec3b(0,255,0)); + // now draw a diagonal white line + for(int i = 0; i < 100; i++) + img(i,i)=Vec3b(255,255,255); + // and now modify the 2nd (red) channel of each pixel + for(int i = 0; i < img.rows; i++) + for(int j = 0; j < img.cols; j++) + img(i,j)[2] ^= (uchar)(i ^ j); // img(y,x)[c] accesses c-th channel of the pixel (x,y) + \endcode +*/ +template class CV_EXPORTS Mat_ : public Mat +{ +public: + typedef _Tp value_type; + typedef typename DataType<_Tp>::channel_type channel_type; + typedef MatIterator_<_Tp> iterator; + typedef MatConstIterator_<_Tp> const_iterator; + + //! default constructor + Mat_(); + //! equivalent to Mat(_rows, _cols, DataType<_Tp>::type) + Mat_(int _rows, int _cols); + //! constructor that sets each matrix element to specified value + Mat_(int _rows, int _cols, const _Tp& value); + //! equivalent to Mat(_size, DataType<_Tp>::type) + explicit Mat_(Size _size); + //! constructor that sets each matrix element to specified value + Mat_(Size _size, const _Tp& value); + //! n-dim array constructor + Mat_(int _ndims, const int* _sizes); + //! n-dim array constructor that sets each matrix element to specified value + Mat_(int _ndims, const int* _sizes, const _Tp& value); + //! copy/conversion contructor. If m is of different type, it's converted + Mat_(const Mat& m); + //! copy constructor + Mat_(const Mat_& m); + //! constructs a matrix on top of user-allocated data. step is in bytes(!!!), regardless of the type + Mat_(int _rows, int _cols, _Tp* _data, size_t _step=AUTO_STEP); + //! constructs n-dim matrix on top of user-allocated data. steps are in bytes(!!!), regardless of the type + Mat_(int _ndims, const int* _sizes, _Tp* _data, const size_t* _steps=0); + //! selects a submatrix + Mat_(const Mat_& m, const Range& rowRange, const Range& colRange=Range::all()); + //! selects a submatrix + Mat_(const Mat_& m, const Rect& roi); + //! selects a submatrix, n-dim version + Mat_(const Mat_& m, const Range* ranges); + //! from a matrix expression + explicit Mat_(const MatExpr& e); + //! makes a matrix out of Vec, std::vector, Point_ or Point3_. The matrix will have a single column + explicit Mat_(const vector<_Tp>& vec, bool copyData=false); + template explicit Mat_(const Vec::channel_type, n>& vec, bool copyData=true); + template explicit Mat_(const Matx::channel_type, m, n>& mtx, bool copyData=true); + explicit Mat_(const Point_::channel_type>& pt, bool copyData=true); + explicit Mat_(const Point3_::channel_type>& pt, bool copyData=true); + explicit Mat_(const MatCommaInitializer_<_Tp>& commaInitializer); + + Mat_& operator = (const Mat& m); + Mat_& operator = (const Mat_& m); + //! set all the elements to s. + Mat_& operator = (const _Tp& s); + //! assign a matrix expression + Mat_& operator = (const MatExpr& e); + + //! iterators; they are smart enough to skip gaps in the end of rows + iterator begin(); + iterator end(); + const_iterator begin() const; + const_iterator end() const; + + //! equivalent to Mat::create(_rows, _cols, DataType<_Tp>::type) + void create(int _rows, int _cols); + //! equivalent to Mat::create(_size, DataType<_Tp>::type) + void create(Size _size); + //! equivalent to Mat::create(_ndims, _sizes, DatType<_Tp>::type) + void create(int _ndims, const int* _sizes); + //! cross-product + Mat_ cross(const Mat_& m) const; + //! data type conversion + template operator Mat_() const; + //! overridden forms of Mat::row() etc. + Mat_ row(int y) const; + Mat_ col(int x) const; + Mat_ diag(int d=0) const; + Mat_ clone() const; + + //! overridden forms of Mat::elemSize() etc. + size_t elemSize() const; + size_t elemSize1() const; + int type() const; + int depth() const; + int channels() const; + size_t step1(int i=0) const; + //! returns step()/sizeof(_Tp) + size_t stepT(int i=0) const; + + //! overridden forms of Mat::zeros() etc. Data type is omitted, of course + static MatExpr zeros(int rows, int cols); + static MatExpr zeros(Size size); + static MatExpr zeros(int _ndims, const int* _sizes); + static MatExpr ones(int rows, int cols); + static MatExpr ones(Size size); + static MatExpr ones(int _ndims, const int* _sizes); + static MatExpr eye(int rows, int cols); + static MatExpr eye(Size size); + + //! some more overriden methods + Mat_ reshape(int _rows) const; + Mat_& adjustROI( int dtop, int dbottom, int dleft, int dright ); + Mat_ operator()( const Range& rowRange, const Range& colRange ) const; + Mat_ operator()( const Rect& roi ) const; + Mat_ operator()( const Range* ranges ) const; + + //! more convenient forms of row and element access operators + _Tp* operator [](int y); + const _Tp* operator [](int y) const; + + //! returns reference to the specified element + _Tp& operator ()(const int* idx); + //! returns read-only reference to the specified element + const _Tp& operator ()(const int* idx) const; + + //! returns reference to the specified element + template _Tp& operator ()(const Vec& idx); + //! returns read-only reference to the specified element + template const _Tp& operator ()(const Vec& idx) const; + + //! returns reference to the specified element (1D case) + _Tp& operator ()(int idx0); + //! returns read-only reference to the specified element (1D case) + const _Tp& operator ()(int idx0) const; + //! returns reference to the specified element (2D case) + _Tp& operator ()(int idx0, int idx1); + //! returns read-only reference to the specified element (2D case) + const _Tp& operator ()(int idx0, int idx1) const; + //! returns reference to the specified element (3D case) + _Tp& operator ()(int idx0, int idx1, int idx2); + //! returns read-only reference to the specified element (3D case) + const _Tp& operator ()(int idx0, int idx1, int idx2) const; + + _Tp& operator ()(Point pt); + const _Tp& operator ()(Point pt) const; + + //! conversion to vector. + operator vector<_Tp>() const; + //! conversion to Vec + template operator Vec::channel_type, n>() const; + //! conversion to Matx + template operator Matx::channel_type, m, n>() const; +}; + +typedef Mat_ Mat1b; +typedef Mat_ Mat2b; +typedef Mat_ Mat3b; +typedef Mat_ Mat4b; + +typedef Mat_ Mat1s; +typedef Mat_ Mat2s; +typedef Mat_ Mat3s; +typedef Mat_ Mat4s; + +typedef Mat_ Mat1w; +typedef Mat_ Mat2w; +typedef Mat_ Mat3w; +typedef Mat_ Mat4w; + +typedef Mat_ Mat1i; +typedef Mat_ Mat2i; +typedef Mat_ Mat3i; +typedef Mat_ Mat4i; + +typedef Mat_ Mat1f; +typedef Mat_ Mat2f; +typedef Mat_ Mat3f; +typedef Mat_ Mat4f; + +typedef Mat_ Mat1d; +typedef Mat_ Mat2d; +typedef Mat_ Mat3d; +typedef Mat_ Mat4d; + +//////////// Iterators & Comma initializers ////////////////// + +class CV_EXPORTS MatConstIterator +{ +public: + typedef uchar* value_type; + typedef ptrdiff_t difference_type; + typedef const uchar** pointer; + typedef uchar* reference; + typedef std::random_access_iterator_tag iterator_category; + + //! default constructor + MatConstIterator(); + //! constructor that sets the iterator to the beginning of the matrix + MatConstIterator(const Mat* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, const int* _idx); + //! copy constructor + MatConstIterator(const MatConstIterator& it); + + //! copy operator + MatConstIterator& operator = (const MatConstIterator& it); + //! returns the current matrix element + uchar* operator *() const; + //! returns the i-th matrix element, relative to the current + uchar* operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatConstIterator& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatConstIterator& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatConstIterator& operator --(); + //! decrements the iterator + MatConstIterator operator --(int); + //! increments the iterator + MatConstIterator& operator ++(); + //! increments the iterator + MatConstIterator operator ++(int); + //! returns the current iterator position + Point pos() const; + //! returns the current iterator position + void pos(int* _idx) const; + ptrdiff_t lpos() const; + void seek(ptrdiff_t ofs, bool relative=false); + void seek(const int* _idx, bool relative=false); + + const Mat* m; + size_t elemSize; + uchar* ptr; + uchar* sliceStart; + uchar* sliceEnd; +}; + +/*! + Matrix read-only iterator + + */ +template +class CV_EXPORTS MatConstIterator_ : public MatConstIterator +{ +public: + typedef _Tp value_type; + typedef ptrdiff_t difference_type; + typedef const _Tp* pointer; + typedef const _Tp& reference; + typedef std::random_access_iterator_tag iterator_category; + + //! default constructor + MatConstIterator_(); + //! constructor that sets the iterator to the beginning of the matrix + MatConstIterator_(const Mat_<_Tp>* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, const int* _idx); + //! copy constructor + MatConstIterator_(const MatConstIterator_& it); + + //! copy operator + MatConstIterator_& operator = (const MatConstIterator_& it); + //! returns the current matrix element + _Tp operator *() const; + //! returns the i-th matrix element, relative to the current + _Tp operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatConstIterator_& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatConstIterator_& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatConstIterator_& operator --(); + //! decrements the iterator + MatConstIterator_ operator --(int); + //! increments the iterator + MatConstIterator_& operator ++(); + //! increments the iterator + MatConstIterator_ operator ++(int); + //! returns the current iterator position + Point pos() const; +}; + + +/*! + Matrix read-write iterator + +*/ +template +class CV_EXPORTS MatIterator_ : public MatConstIterator_<_Tp> +{ +public: + typedef _Tp* pointer; + typedef _Tp& reference; + typedef std::random_access_iterator_tag iterator_category; + + //! the default constructor + MatIterator_(); + //! constructor that sets the iterator to the beginning of the matrix + MatIterator_(Mat_<_Tp>* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(Mat_<_Tp>* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(const Mat_<_Tp>* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(const Mat_<_Tp>* _m, const int* _idx); + //! copy constructor + MatIterator_(const MatIterator_& it); + //! copy operator + MatIterator_& operator = (const MatIterator_<_Tp>& it ); + + //! returns the current matrix element + _Tp& operator *() const; + //! returns the i-th matrix element, relative to the current + _Tp& operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatIterator_& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatIterator_& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatIterator_& operator --(); + //! decrements the iterator + MatIterator_ operator --(int); + //! increments the iterator + MatIterator_& operator ++(); + //! increments the iterator + MatIterator_ operator ++(int); +}; + +template class CV_EXPORTS MatOp_Iter_; + +/*! + Comma-separated Matrix Initializer + + The class instances are usually not created explicitly. + Instead, they are created on "matrix << firstValue" operator. + + The sample below initializes 2x2 rotation matrix: + + \code + double angle = 30, a = cos(angle*CV_PI/180), b = sin(angle*CV_PI/180); + Mat R = (Mat_(2,2) << a, -b, b, a); + \endcode +*/ +template class CV_EXPORTS MatCommaInitializer_ +{ +public: + //! the constructor, created by "matrix << firstValue" operator, where matrix is cv::Mat + MatCommaInitializer_(Mat_<_Tp>* _m); + //! the operator that takes the next value and put it to the matrix + template MatCommaInitializer_<_Tp>& operator , (T2 v); + //! another form of conversion operator + Mat_<_Tp> operator *() const; + operator Mat_<_Tp>() const; +protected: + MatIterator_<_Tp> it; +}; + + +template class CV_EXPORTS MatxCommaInitializer +{ +public: + MatxCommaInitializer(Matx<_Tp, m, n>* _mtx); + template MatxCommaInitializer<_Tp, m, n>& operator , (T2 val); + Matx<_Tp, m, n> operator *() const; + + Matx<_Tp, m, n>* dst; + int idx; +}; + +template class CV_EXPORTS VecCommaInitializer : public MatxCommaInitializer<_Tp, m, 1> +{ +public: + VecCommaInitializer(Vec<_Tp, m>* _vec); + template VecCommaInitializer<_Tp, m>& operator , (T2 val); + Vec<_Tp, m> operator *() const; +}; + +/*! + Automatically Allocated Buffer Class + + The class is used for temporary buffers in functions and methods. + If a temporary buffer is usually small (a few K's of memory), + but its size depends on the parameters, it makes sense to create a small + fixed-size array on stack and use it if it's large enough. If the required buffer size + is larger than the fixed size, another buffer of sufficient size is allocated dynamically + and released after the processing. Therefore, in typical cases, when the buffer size is small, + there is no overhead associated with malloc()/free(). + At the same time, there is no limit on the size of processed data. + + This is what AutoBuffer does. The template takes 2 parameters - type of the buffer elements and + the number of stack-allocated elements. Here is how the class is used: + + \code + void my_func(const cv::Mat& m) + { + cv::AutoBuffer buf; // create automatic buffer containing 1000 floats + + buf.allocate(m.rows); // if m.rows <= 1000, the pre-allocated buffer is used, + // otherwise the buffer of "m.rows" floats will be allocated + // dynamically and deallocated in cv::AutoBuffer destructor + ... + } + \endcode +*/ +template class CV_EXPORTS AutoBuffer +{ +public: + typedef _Tp value_type; + enum { buffer_padding = (int)((16 + sizeof(_Tp) - 1)/sizeof(_Tp)) }; + + //! the default contructor + AutoBuffer(); + //! constructor taking the real buffer size + AutoBuffer(size_t _size); + //! destructor. calls deallocate() + ~AutoBuffer(); + + //! allocates the new buffer of size _size. if the _size is small enough, stack-allocated buffer is used + void allocate(size_t _size); + //! deallocates the buffer if it was dynamically allocated + void deallocate(); + //! returns pointer to the real buffer, stack-allocated or head-allocated + operator _Tp* (); + //! returns read-only pointer to the real buffer, stack-allocated or head-allocated + operator const _Tp* () const; + +protected: + //! pointer to the real buffer, can point to buf if the buffer is small enough + _Tp* ptr; + //! size of the real buffer + size_t size; + //! pre-allocated buffer + _Tp buf[fixed_size+buffer_padding]; +}; + +/////////////////////////// multi-dimensional dense matrix ////////////////////////// + +/*! + n-Dimensional Dense Matrix Iterator Class. + + The class cv::NAryMatIterator is used for iterating over one or more n-dimensional dense arrays (cv::Mat's). + + The iterator is completely different from cv::Mat_ and cv::SparseMat_ iterators. + It iterates through the slices (or planes), not the elements, where "slice" is a continuous part of the arrays. + + Here is the example on how the iterator can be used to normalize 3D histogram: + + \code + void normalizeColorHist(Mat& hist) + { + #if 1 + // intialize iterator (the style is different from STL). + // after initialization the iterator will contain + // the number of slices or planes + // the iterator will go through + Mat* arrays[] = { &hist, 0 }; + Mat planes[1]; + NAryMatIterator it(arrays, planes); + double s = 0; + // iterate through the matrix. on each iteration + // it.planes[i] (of type Mat) will be set to the current plane of + // i-th n-dim matrix passed to the iterator constructor. + for(int p = 0; p < it.nplanes; p++, ++it) + s += sum(it.planes[0])[0]; + it = NAryMatIterator(hist); + s = 1./s; + for(int p = 0; p < it.nplanes; p++, ++it) + it.planes[0] *= s; + #elif 1 + // this is a shorter implementation of the above + // using built-in operations on Mat + double s = sum(hist)[0]; + hist.convertTo(hist, hist.type(), 1./s, 0); + #else + // and this is even shorter one + // (assuming that the histogram elements are non-negative) + normalize(hist, hist, 1, 0, NORM_L1); + #endif + } + \endcode + + You can iterate through several matrices simultaneously as long as they have the same geometry + (dimensionality and all the dimension sizes are the same), which is useful for binary + and n-ary operations on such matrices. Just pass those matrices to cv::MatNDIterator. + Then, during the iteration it.planes[0], it.planes[1], ... will + be the slices of the corresponding matrices +*/ +class CV_EXPORTS NAryMatIterator +{ +public: + //! the default constructor + NAryMatIterator(); + //! the full constructor taking arbitrary number of n-dim matrices + NAryMatIterator(const Mat** arrays, uchar** ptrs, int narrays=-1); + //! the full constructor taking arbitrary number of n-dim matrices + NAryMatIterator(const Mat** arrays, Mat* planes, int narrays=-1); + //! the separate iterator initialization method + void init(const Mat** arrays, Mat* planes, uchar** ptrs, int narrays=-1); + + //! proceeds to the next plane of every iterated matrix + NAryMatIterator& operator ++(); + //! proceeds to the next plane of every iterated matrix (postfix increment operator) + NAryMatIterator operator ++(int); + + //! the iterated arrays + const Mat** arrays; + //! the current planes + Mat* planes; + //! data pointers + uchar** ptrs; + //! the number of arrays + int narrays; + //! the number of hyper-planes that the iterator steps through + size_t nplanes; + //! the size of each segment (in elements) + size_t size; +protected: + int iterdepth; + size_t idx; +}; + +//typedef NAryMatIterator NAryMatNDIterator; + +typedef void (*ConvertData)(const void* from, void* to, int cn); +typedef void (*ConvertScaleData)(const void* from, void* to, int cn, double alpha, double beta); + +//! returns the function for converting pixels from one data type to another +CV_EXPORTS ConvertData getConvertElem(int fromType, int toType); +//! returns the function for converting pixels from one data type to another with the optional scaling +CV_EXPORTS ConvertScaleData getConvertScaleElem(int fromType, int toType); + + +/////////////////////////// multi-dimensional sparse matrix ////////////////////////// + +class SparseMatIterator; +class SparseMatConstIterator; +template class SparseMatIterator_; +template class SparseMatConstIterator_; + +/*! + Sparse matrix class. + + The class represents multi-dimensional sparse numerical arrays. Such a sparse array can store elements + of any type that cv::Mat is able to store. "Sparse" means that only non-zero elements + are stored (though, as a result of some operations on a sparse matrix, some of its stored elements + can actually become 0. It's user responsibility to detect such elements and delete them using cv::SparseMat::erase(). + The non-zero elements are stored in a hash table that grows when it's filled enough, + so that the search time remains O(1) in average. Elements can be accessed using the following methods: + +
    +
  1. Query operations: cv::SparseMat::ptr() and the higher-level cv::SparseMat::ref(), + cv::SparseMat::value() and cv::SparseMat::find, for example: + \code + const int dims = 5; + int size[] = {10, 10, 10, 10, 10}; + SparseMat sparse_mat(dims, size, CV_32F); + for(int i = 0; i < 1000; i++) + { + int idx[dims]; + for(int k = 0; k < dims; k++) + idx[k] = rand()%sparse_mat.size(k); + sparse_mat.ref(idx) += 1.f; + } + \endcode + +
  2. Sparse matrix iterators. Like cv::Mat iterators and unlike cv::Mat iterators, the sparse matrix iterators are STL-style, + that is, the iteration is done as following: + \code + // prints elements of a sparse floating-point matrix and the sum of elements. + SparseMatConstIterator_ + it = sparse_mat.begin(), + it_end = sparse_mat.end(); + double s = 0; + int dims = sparse_mat.dims(); + for(; it != it_end; ++it) + { + // print element indices and the element value + const Node* n = it.node(); + printf("(") + for(int i = 0; i < dims; i++) + printf("%3d%c", n->idx[i], i < dims-1 ? ',' : ')'); + printf(": %f\n", *it); + s += *it; + } + printf("Element sum is %g\n", s); + \endcode + If you run this loop, you will notice that elements are enumerated + in no any logical order (lexicographical etc.), + they come in the same order as they stored in the hash table, i.e. semi-randomly. + + You may collect pointers to the nodes and sort them to get the proper ordering. + Note, however, that pointers to the nodes may become invalid when you add more + elements to the matrix; this is because of possible buffer reallocation. + +
  3. A combination of the above 2 methods when you need to process 2 or more sparse + matrices simultaneously, e.g. this is how you can compute unnormalized + cross-correlation of the 2 floating-point sparse matrices: + \code + double crossCorr(const SparseMat& a, const SparseMat& b) + { + const SparseMat *_a = &a, *_b = &b; + // if b contains less elements than a, + // it's faster to iterate through b + if(_a->nzcount() > _b->nzcount()) + std::swap(_a, _b); + SparseMatConstIterator_ it = _a->begin(), + it_end = _a->end(); + double ccorr = 0; + for(; it != it_end; ++it) + { + // take the next element from the first matrix + float avalue = *it; + const Node* anode = it.node(); + // and try to find element with the same index in the second matrix. + // since the hash value depends only on the element index, + // we reuse hashvalue stored in the node + float bvalue = _b->value(anode->idx,&anode->hashval); + ccorr += avalue*bvalue; + } + return ccorr; + } + \endcode +
+*/ +class CV_EXPORTS SparseMat +{ +public: + typedef SparseMatIterator iterator; + typedef SparseMatConstIterator const_iterator; + + //! the sparse matrix header + struct CV_EXPORTS Hdr + { + Hdr(int _dims, const int* _sizes, int _type); + void clear(); + int refcount; + int dims; + int valueOffset; + size_t nodeSize; + size_t nodeCount; + size_t freeList; + vector pool; + vector hashtab; + int size[CV_MAX_DIM]; + }; + + //! sparse matrix node - element of a hash table + struct CV_EXPORTS Node + { + //! hash value + size_t hashval; + //! index of the next node in the same hash table entry + size_t next; + //! index of the matrix element + int idx[CV_MAX_DIM]; + }; + + //! default constructor + SparseMat(); + //! creates matrix of the specified size and type + SparseMat(int dims, const int* _sizes, int _type); + //! copy constructor + SparseMat(const SparseMat& m); + //! converts dense 2d matrix to the sparse form + /*! + \param m the input matrix + \param try1d if true and m is a single-column matrix (Nx1), + then the sparse matrix will be 1-dimensional. + */ + explicit SparseMat(const Mat& m); + //! converts old-style sparse matrix to the new-style. All the data is copied + SparseMat(const CvSparseMat* m); + //! the destructor + ~SparseMat(); + + //! assignment operator. This is O(1) operation, i.e. no data is copied + SparseMat& operator = (const SparseMat& m); + //! equivalent to the corresponding constructor + SparseMat& operator = (const Mat& m); + + //! creates full copy of the matrix + SparseMat clone() const; + + //! copies all the data to the destination matrix. All the previous content of m is erased + void copyTo( SparseMat& m ) const; + //! converts sparse matrix to dense matrix. + void copyTo( Mat& m ) const; + //! multiplies all the matrix elements by the specified scale factor alpha and converts the results to the specified data type + void convertTo( SparseMat& m, int rtype, double alpha=1 ) const; + //! converts sparse matrix to dense n-dim matrix with optional type conversion and scaling. + /*! + \param rtype The output matrix data type. When it is =-1, the output array will have the same data type as (*this) + \param alpha The scale factor + \param beta The optional delta added to the scaled values before the conversion + */ + void convertTo( Mat& m, int rtype, double alpha=1, double beta=0 ) const; + + // not used now + void assignTo( SparseMat& m, int type=-1 ) const; + + //! reallocates sparse matrix. + /*! + If the matrix already had the proper size and type, + it is simply cleared with clear(), otherwise, + the old matrix is released (using release()) and the new one is allocated. + */ + void create(int dims, const int* _sizes, int _type); + //! sets all the sparse matrix elements to 0, which means clearing the hash table. + void clear(); + //! manually increments the reference counter to the header. + void addref(); + // decrements the header reference counter. When the counter reaches 0, the header and all the underlying data are deallocated. + void release(); + + //! converts sparse matrix to the old-style representation; all the elements are copied. + operator CvSparseMat*() const; + //! returns the size of each element in bytes (not including the overhead - the space occupied by SparseMat::Node elements) + size_t elemSize() const; + //! returns elemSize()/channels() + size_t elemSize1() const; + + //! returns type of sparse matrix elements + int type() const; + //! returns the depth of sparse matrix elements + int depth() const; + //! returns the number of channels + int channels() const; + + //! returns the array of sizes, or NULL if the matrix is not allocated + const int* size() const; + //! returns the size of i-th matrix dimension (or 0) + int size(int i) const; + //! returns the matrix dimensionality + int dims() const; + //! returns the number of non-zero elements (=the number of hash table nodes) + size_t nzcount() const; + + //! computes the element hash value (1D case) + size_t hash(int i0) const; + //! computes the element hash value (2D case) + size_t hash(int i0, int i1) const; + //! computes the element hash value (3D case) + size_t hash(int i0, int i1, int i2) const; + //! computes the element hash value (nD case) + size_t hash(const int* idx) const; + + //@{ + /*! + specialized variants for 1D, 2D, 3D cases and the generic_type one for n-D case. + + return pointer to the matrix element. +
    +
  • if the element is there (it's non-zero), the pointer to it is returned +
  • if it's not there and createMissing=false, NULL pointer is returned +
  • if it's not there and createMissing=true, then the new element + is created and initialized with 0. Pointer to it is returned +
  • if the optional hashval pointer is not NULL, the element hash value is + not computed, but *hashval is taken instead. +
+ */ + //! returns pointer to the specified element (1D case) + uchar* ptr(int i0, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (2D case) + uchar* ptr(int i0, int i1, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (3D case) + uchar* ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (nD case) + uchar* ptr(const int* idx, bool createMissing, size_t* hashval=0); + //@} + + //@{ + /*! + return read-write reference to the specified sparse matrix element. + + ref<_Tp>(i0,...[,hashval]) is equivalent to *(_Tp*)ptr(i0,...,true[,hashval]). + The methods always return a valid reference. + If the element did not exist, it is created and initialiazed with 0. + */ + //! returns reference to the specified element (1D case) + template _Tp& ref(int i0, size_t* hashval=0); + //! returns reference to the specified element (2D case) + template _Tp& ref(int i0, int i1, size_t* hashval=0); + //! returns reference to the specified element (3D case) + template _Tp& ref(int i0, int i1, int i2, size_t* hashval=0); + //! returns reference to the specified element (nD case) + template _Tp& ref(const int* idx, size_t* hashval=0); + //@} + + //@{ + /*! + return value of the specified sparse matrix element. + + value<_Tp>(i0,...[,hashval]) is equivalent + + \code + { const _Tp* p = find<_Tp>(i0,...[,hashval]); return p ? *p : _Tp(); } + \endcode + + That is, if the element did not exist, the methods return 0. + */ + //! returns value of the specified element (1D case) + template _Tp value(int i0, size_t* hashval=0) const; + //! returns value of the specified element (2D case) + template _Tp value(int i0, int i1, size_t* hashval=0) const; + //! returns value of the specified element (3D case) + template _Tp value(int i0, int i1, int i2, size_t* hashval=0) const; + //! returns value of the specified element (nD case) + template _Tp value(const int* idx, size_t* hashval=0) const; + //@} + + //@{ + /*! + Return pointer to the specified sparse matrix element if it exists + + find<_Tp>(i0,...[,hashval]) is equivalent to (_const Tp*)ptr(i0,...false[,hashval]). + + If the specified element does not exist, the methods return NULL. + */ + //! returns pointer to the specified element (1D case) + template const _Tp* find(int i0, size_t* hashval=0) const; + //! returns pointer to the specified element (2D case) + template const _Tp* find(int i0, int i1, size_t* hashval=0) const; + //! returns pointer to the specified element (3D case) + template const _Tp* find(int i0, int i1, int i2, size_t* hashval=0) const; + //! returns pointer to the specified element (nD case) + template const _Tp* find(const int* idx, size_t* hashval=0) const; + + //! erases the specified element (2D case) + void erase(int i0, int i1, size_t* hashval=0); + //! erases the specified element (3D case) + void erase(int i0, int i1, int i2, size_t* hashval=0); + //! erases the specified element (nD case) + void erase(const int* idx, size_t* hashval=0); + + //@{ + /*! + return the sparse matrix iterator pointing to the first sparse matrix element + */ + //! returns the sparse matrix iterator at the matrix beginning + SparseMatIterator begin(); + //! returns the sparse matrix iterator at the matrix beginning + template SparseMatIterator_<_Tp> begin(); + //! returns the read-only sparse matrix iterator at the matrix beginning + SparseMatConstIterator begin() const; + //! returns the read-only sparse matrix iterator at the matrix beginning + template SparseMatConstIterator_<_Tp> begin() const; + //@} + /*! + return the sparse matrix iterator pointing to the element following the last sparse matrix element + */ + //! returns the sparse matrix iterator at the matrix end + SparseMatIterator end(); + //! returns the read-only sparse matrix iterator at the matrix end + SparseMatConstIterator end() const; + //! returns the typed sparse matrix iterator at the matrix end + template SparseMatIterator_<_Tp> end(); + //! returns the typed read-only sparse matrix iterator at the matrix end + template SparseMatConstIterator_<_Tp> end() const; + + //! returns the value stored in the sparse martix node + template _Tp& value(Node* n); + //! returns the value stored in the sparse martix node + template const _Tp& value(const Node* n) const; + + ////////////// some internal-use methods /////////////// + Node* node(size_t nidx); + const Node* node(size_t nidx) const; + + uchar* newNode(const int* idx, size_t hashval); + void removeNode(size_t hidx, size_t nidx, size_t previdx); + void resizeHashTab(size_t newsize); + + enum { MAGIC_VAL=0x42FD0000, MAX_DIM=CV_MAX_DIM, HASH_SCALE=0x5bd1e995, HASH_BIT=0x80000000 }; + + int flags; + Hdr* hdr; +}; + +//! finds global minimum and maximum sparse array elements and returns their values and their locations +CV_EXPORTS void minMaxLoc(const SparseMat& a, double* minVal, + double* maxVal, int* minIdx=0, int* maxIdx=0); +//! computes norm of a sparse matrix +CV_EXPORTS double norm( const SparseMat& src, int normType ); +//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values +CV_EXPORTS void normalize( const SparseMat& src, SparseMat& dst, double alpha, int normType ); + +/*! + Read-Only Sparse Matrix Iterator. + Here is how to use the iterator to compute the sum of floating-point sparse matrix elements: + + \code + SparseMatConstIterator it = m.begin(), it_end = m.end(); + double s = 0; + CV_Assert( m.type() == CV_32F ); + for( ; it != it_end; ++it ) + s += it.value(); + \endcode +*/ +class CV_EXPORTS SparseMatConstIterator +{ +public: + //! the default constructor + SparseMatConstIterator(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatConstIterator(const SparseMat* _m); + //! the copy constructor + SparseMatConstIterator(const SparseMatConstIterator& it); + + //! the assignment operator + SparseMatConstIterator& operator = (const SparseMatConstIterator& it); + + //! template method returning the current matrix element + template const _Tp& value() const; + //! returns the current node of the sparse matrix. it.node->idx is the current element index + const SparseMat::Node* node() const; + + //! moves iterator to the previous element + SparseMatConstIterator& operator --(); + //! moves iterator to the previous element + SparseMatConstIterator operator --(int); + //! moves iterator to the next element + SparseMatConstIterator& operator ++(); + //! moves iterator to the next element + SparseMatConstIterator operator ++(int); + + //! moves iterator to the element after the last element + void seekEnd(); + + const SparseMat* m; + size_t hashidx; + uchar* ptr; +}; + +/*! + Read-write Sparse Matrix Iterator + + The class is similar to cv::SparseMatConstIterator, + but can be used for in-place modification of the matrix elements. +*/ +class CV_EXPORTS SparseMatIterator : public SparseMatConstIterator +{ +public: + //! the default constructor + SparseMatIterator(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatIterator(SparseMat* _m); + //! the full constructor setting the iterator to the specified sparse matrix element + SparseMatIterator(SparseMat* _m, const int* idx); + //! the copy constructor + SparseMatIterator(const SparseMatIterator& it); + + //! the assignment operator + SparseMatIterator& operator = (const SparseMatIterator& it); + //! returns read-write reference to the current sparse matrix element + template _Tp& value() const; + //! returns pointer to the current sparse matrix node. it.node->idx is the index of the current element (do not modify it!) + SparseMat::Node* node() const; + + //! moves iterator to the next element + SparseMatIterator& operator ++(); + //! moves iterator to the next element + SparseMatIterator operator ++(int); +}; + +/*! + The Template Sparse Matrix class derived from cv::SparseMat + + The class provides slightly more convenient operations for accessing elements. + + \code + SparseMat m; + ... + SparseMat_ m_ = (SparseMat_&)m; + m_.ref(1)++; // equivalent to m.ref(1)++; + m_.ref(2) += m_(3); // equivalent to m.ref(2) += m.value(3); + \endcode +*/ +template class CV_EXPORTS SparseMat_ : public SparseMat +{ +public: + typedef SparseMatIterator_<_Tp> iterator; + typedef SparseMatConstIterator_<_Tp> const_iterator; + + //! the default constructor + SparseMat_(); + //! the full constructor equivelent to SparseMat(dims, _sizes, DataType<_Tp>::type) + SparseMat_(int dims, const int* _sizes); + //! the copy constructor. If DataType<_Tp>.type != m.type(), the m elements are converted + SparseMat_(const SparseMat& m); + //! the copy constructor. This is O(1) operation - no data is copied + SparseMat_(const SparseMat_& m); + //! converts dense matrix to the sparse form + SparseMat_(const Mat& m); + //! converts the old-style sparse matrix to the C++ class. All the elements are copied + SparseMat_(const CvSparseMat* m); + //! the assignment operator. If DataType<_Tp>.type != m.type(), the m elements are converted + SparseMat_& operator = (const SparseMat& m); + //! the assignment operator. This is O(1) operation - no data is copied + SparseMat_& operator = (const SparseMat_& m); + //! converts dense matrix to the sparse form + SparseMat_& operator = (const Mat& m); + + //! makes full copy of the matrix. All the elements are duplicated + SparseMat_ clone() const; + //! equivalent to cv::SparseMat::create(dims, _sizes, DataType<_Tp>::type) + void create(int dims, const int* _sizes); + //! converts sparse matrix to the old-style CvSparseMat. All the elements are copied + operator CvSparseMat*() const; + + //! returns type of the matrix elements + int type() const; + //! returns depth of the matrix elements + int depth() const; + //! returns the number of channels in each matrix element + int channels() const; + + //! equivalent to SparseMat::ref<_Tp>(i0, hashval) + _Tp& ref(int i0, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(i0, i1, hashval) + _Tp& ref(int i0, int i1, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(i0, i1, i2, hashval) + _Tp& ref(int i0, int i1, int i2, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(idx, hashval) + _Tp& ref(const int* idx, size_t* hashval=0); + + //! equivalent to SparseMat::value<_Tp>(i0, hashval) + _Tp operator()(int i0, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(i0, i1, hashval) + _Tp operator()(int i0, int i1, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(i0, i1, i2, hashval) + _Tp operator()(int i0, int i1, int i2, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(idx, hashval) + _Tp operator()(const int* idx, size_t* hashval=0) const; + + //! returns sparse matrix iterator pointing to the first sparse matrix element + SparseMatIterator_<_Tp> begin(); + //! returns read-only sparse matrix iterator pointing to the first sparse matrix element + SparseMatConstIterator_<_Tp> begin() const; + //! returns sparse matrix iterator pointing to the element following the last sparse matrix element + SparseMatIterator_<_Tp> end(); + //! returns read-only sparse matrix iterator pointing to the element following the last sparse matrix element + SparseMatConstIterator_<_Tp> end() const; +}; + + +/*! + Template Read-Only Sparse Matrix Iterator Class. + + This is the derived from SparseMatConstIterator class that + introduces more convenient operator *() for accessing the current element. +*/ +template class CV_EXPORTS SparseMatConstIterator_ : public SparseMatConstIterator +{ +public: + typedef std::forward_iterator_tag iterator_category; + + //! the default constructor + SparseMatConstIterator_(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatConstIterator_(const SparseMat_<_Tp>* _m); + //! the copy constructor + SparseMatConstIterator_(const SparseMatConstIterator_& it); + + //! the assignment operator + SparseMatConstIterator_& operator = (const SparseMatConstIterator_& it); + //! the element access operator + const _Tp& operator *() const; + + //! moves iterator to the next element + SparseMatConstIterator_& operator ++(); + //! moves iterator to the next element + SparseMatConstIterator_ operator ++(int); +}; + +/*! + Template Read-Write Sparse Matrix Iterator Class. + + This is the derived from cv::SparseMatConstIterator_ class that + introduces more convenient operator *() for accessing the current element. +*/ +template class CV_EXPORTS SparseMatIterator_ : public SparseMatConstIterator_<_Tp> +{ +public: + typedef std::forward_iterator_tag iterator_category; + + //! the default constructor + SparseMatIterator_(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatIterator_(SparseMat_<_Tp>* _m); + //! the copy constructor + SparseMatIterator_(const SparseMatIterator_& it); + + //! the assignment operator + SparseMatIterator_& operator = (const SparseMatIterator_& it); + //! returns the reference to the current element + _Tp& operator *() const; + + //! moves the iterator to the next element + SparseMatIterator_& operator ++(); + //! moves the iterator to the next element + SparseMatIterator_ operator ++(int); +}; + +//////////////////// Fast Nearest-Neighbor Search Structure //////////////////// + +/*! + Fast Nearest Neighbor Search Class. + + The class implements D. Lowe BBF (Best-Bin-First) algorithm for the last + approximate (or accurate) nearest neighbor search in multi-dimensional spaces. + + First, a set of vectors is passed to KDTree::KDTree() constructor + or KDTree::build() method, where it is reordered. + + Then arbitrary vectors can be passed to KDTree::findNearest() methods, which + find the K nearest neighbors among the vectors from the initial set. + The user can balance between the speed and accuracy of the search by varying Emax + parameter, which is the number of leaves that the algorithm checks. + The larger parameter values yield more accurate results at the expense of lower processing speed. + + \code + KDTree T(points, false); + const int K = 3, Emax = INT_MAX; + int idx[K]; + float dist[K]; + T.findNearest(query_vec, K, Emax, idx, 0, dist); + CV_Assert(dist[0] <= dist[1] && dist[1] <= dist[2]); + \endcode +*/ +class CV_EXPORTS_W KDTree +{ +public: + /*! + The node of the search tree. + */ + struct Node + { + Node() : idx(-1), left(-1), right(-1), boundary(0.f) {} + Node(int _idx, int _left, int _right, float _boundary) + : idx(_idx), left(_left), right(_right), boundary(_boundary) {} + //! split dimension; >=0 for nodes (dim), < 0 for leaves (index of the point) + int idx; + //! node indices of the left and the right branches + int left, right; + //! go to the left if query_vec[node.idx]<=node.boundary, otherwise go to the right + float boundary; + }; + + //! the default constructor + CV_WRAP KDTree(); + //! the full constructor that builds the search tree + CV_WRAP KDTree(InputArray points, bool copyAndReorderPoints=false); + //! the full constructor that builds the search tree + CV_WRAP KDTree(InputArray points, InputArray _labels, + bool copyAndReorderPoints=false); + //! builds the search tree + CV_WRAP void build(InputArray points, bool copyAndReorderPoints=false); + //! builds the search tree + CV_WRAP void build(InputArray points, InputArray labels, + bool copyAndReorderPoints=false); + //! finds the K nearest neighbors of "vec" while looking at Emax (at most) leaves + CV_WRAP int findNearest(InputArray vec, int K, int Emax, + OutputArray neighborsIdx, + OutputArray neighbors=noArray(), + OutputArray dist=noArray(), + OutputArray labels=noArray()) const; + //! finds all the points from the initial set that belong to the specified box + CV_WRAP void findOrthoRange(InputArray minBounds, + InputArray maxBounds, + OutputArray neighborsIdx, + OutputArray neighbors=noArray(), + OutputArray labels=noArray()) const; + //! returns vectors with the specified indices + CV_WRAP void getPoints(InputArray idx, OutputArray pts, + OutputArray labels=noArray()) const; + //! return a vector with the specified index + const float* getPoint(int ptidx, int* label=0) const; + //! returns the search space dimensionality + CV_WRAP int dims() const; + + vector nodes; //!< all the tree nodes + CV_PROP Mat points; //!< all the points. It can be a reordered copy of the input vector set or the original vector set. + CV_PROP vector labels; //!< the parallel array of labels. + CV_PROP int maxDepth; //!< maximum depth of the search tree. Do not modify it + CV_PROP_RW int normType; //!< type of the distance (cv::NORM_L1 or cv::NORM_L2) used for search. Initially set to cv::NORM_L2, but you can modify it +}; + +//////////////////////////////////////// XML & YAML I/O //////////////////////////////////// + +class CV_EXPORTS FileNode; + +/*! + XML/YAML File Storage Class. + + The class describes an object associated with XML or YAML file. + It can be used to store data to such a file or read and decode the data. + + The storage is organized as a tree of nested sequences (or lists) and mappings. + Sequence is a heterogenious array, which elements are accessed by indices or sequentially using an iterator. + Mapping is analogue of std::map or C structure, which elements are accessed by names. + The most top level structure is a mapping. + Leaves of the file storage tree are integers, floating-point numbers and text strings. + + For example, the following code: + + \code + // open file storage for writing. Type of the file is determined from the extension + FileStorage fs("test.yml", FileStorage::WRITE); + fs << "test_int" << 5 << "test_real" << 3.1 << "test_string" << "ABCDEFGH"; + fs << "test_mat" << Mat::eye(3,3,CV_32F); + + fs << "test_list" << "[" << 0.0000000000001 << 2 << CV_PI << -3435345 << "2-502 2-029 3egegeg" << + "{:" << "month" << 12 << "day" << 31 << "year" << 1969 << "}" << "]"; + fs << "test_map" << "{" << "x" << 1 << "y" << 2 << "width" << 100 << "height" << 200 << "lbp" << "[:"; + + const uchar arr[] = {0, 1, 1, 0, 1, 1, 0, 1}; + fs.writeRaw("u", arr, (int)(sizeof(arr)/sizeof(arr[0]))); + + fs << "]" << "}"; + \endcode + + will produce the following file: + + \verbatim + %YAML:1.0 + test_int: 5 + test_real: 3.1000000000000001e+00 + test_string: ABCDEFGH + test_mat: !!opencv-matrix + rows: 3 + cols: 3 + dt: f + data: [ 1., 0., 0., 0., 1., 0., 0., 0., 1. ] + test_list: + - 1.0000000000000000e-13 + - 2 + - 3.1415926535897931e+00 + - -3435345 + - "2-502 2-029 3egegeg" + - { month:12, day:31, year:1969 } + test_map: + x: 1 + y: 2 + width: 100 + height: 200 + lbp: [ 0, 1, 1, 0, 1, 1, 0, 1 ] + \endverbatim + + and to read the file above, the following code can be used: + + \code + // open file storage for reading. + // Type of the file is determined from the content, not the extension + FileStorage fs("test.yml", FileStorage::READ); + int test_int = (int)fs["test_int"]; + double test_real = (double)fs["test_real"]; + string test_string = (string)fs["test_string"]; + + Mat M; + fs["test_mat"] >> M; + + FileNode tl = fs["test_list"]; + CV_Assert(tl.type() == FileNode::SEQ && tl.size() == 6); + double tl0 = (double)tl[0]; + int tl1 = (int)tl[1]; + double tl2 = (double)tl[2]; + int tl3 = (int)tl[3]; + string tl4 = (string)tl[4]; + CV_Assert(tl[5].type() == FileNode::MAP && tl[5].size() == 3); + + int month = (int)tl[5]["month"]; + int day = (int)tl[5]["day"]; + int year = (int)tl[5]["year"]; + + FileNode tm = fs["test_map"]; + + int x = (int)tm["x"]; + int y = (int)tm["y"]; + int width = (int)tm["width"]; + int height = (int)tm["height"]; + + int lbp_val = 0; + FileNodeIterator it = tm["lbp"].begin(); + + for(int k = 0; k < 8; k++, ++it) + lbp_val |= ((int)*it) << k; + \endcode +*/ +class CV_EXPORTS_W FileStorage +{ +public: + //! file storage mode + enum + { + READ=0, //! read mode + WRITE=1, //! write mode + APPEND=2 //! append mode + }; + enum + { + UNDEFINED=0, + VALUE_EXPECTED=1, + NAME_EXPECTED=2, + INSIDE_MAP=4 + }; + //! the default constructor + CV_WRAP FileStorage(); + //! the full constructor that opens file storage for reading or writing + CV_WRAP FileStorage(const string& filename, int flags, const string& encoding=string()); + //! the constructor that takes pointer to the C FileStorage structure + FileStorage(CvFileStorage* fs); + //! the destructor. calls release() + virtual ~FileStorage(); + + //! opens file storage for reading or writing. The previous storage is closed with release() + CV_WRAP virtual bool open(const string& filename, int flags, const string& encoding=string()); + //! returns true if the object is associated with currently opened file. + CV_WRAP virtual bool isOpened() const; + //! closes the file and releases all the memory buffers + CV_WRAP virtual void release(); + + //! returns the first element of the top-level mapping + CV_WRAP FileNode getFirstTopLevelNode() const; + //! returns the top-level mapping. YAML supports multiple streams + CV_WRAP FileNode root(int streamidx=0) const; + //! returns the specified element of the top-level mapping + FileNode operator[](const string& nodename) const; + //! returns the specified element of the top-level mapping + CV_WRAP FileNode operator[](const char* nodename) const; + + //! returns pointer to the underlying C FileStorage structure + CvFileStorage* operator *() { return fs; } + //! returns pointer to the underlying C FileStorage structure + const CvFileStorage* operator *() const { return fs; } + //! writes one or more numbers of the specified format to the currently written structure + void writeRaw( const string& fmt, const uchar* vec, size_t len ); + //! writes the registered C structure (CvMat, CvMatND, CvSeq). See cvWrite() + void writeObj( const string& name, const void* obj ); + + //! returns the normalized object name for the specified file name + static string getDefaultObjectName(const string& filename); + + Ptr fs; //!< the underlying C FileStorage structure + string elname; //!< the currently written element + vector structs; //!< the stack of written structures + int state; //!< the writer state +}; + +class CV_EXPORTS FileNodeIterator; + +/*! + File Storage Node class + + The node is used to store each and every element of the file storage opened for reading - + from the primitive objects, such as numbers and text strings, to the complex nodes: + sequences, mappings and the registered objects. + + Note that file nodes are only used for navigating file storages opened for reading. + When a file storage is opened for writing, no data is stored in memory after it is written. +*/ +class CV_EXPORTS_W_SIMPLE FileNode +{ +public: + //! type of the file storage node + enum + { + NONE=0, //!< empty node + INT=1, //!< an integer + REAL=2, //!< floating-point number + FLOAT=REAL, //!< synonym or REAL + STR=3, //!< text string in UTF-8 encoding + STRING=STR, //!< synonym for STR + REF=4, //!< integer of size size_t. Typically used for storing complex dynamic structures where some elements reference the others + SEQ=5, //!< sequence + MAP=6, //!< mapping + TYPE_MASK=7, + FLOW=8, //!< compact representation of a sequence or mapping. Used only by YAML writer + USER=16, //!< a registered object (e.g. a matrix) + EMPTY=32, //!< empty structure (sequence or mapping) + NAMED=64 //!< the node has a name (i.e. it is element of a mapping) + }; + //! the default constructor + CV_WRAP FileNode(); + //! the full constructor wrapping CvFileNode structure. + FileNode(const CvFileStorage* fs, const CvFileNode* node); + //! the copy constructor + FileNode(const FileNode& node); + //! returns element of a mapping node + FileNode operator[](const string& nodename) const; + //! returns element of a mapping node + CV_WRAP FileNode operator[](const char* nodename) const; + //! returns element of a sequence node + CV_WRAP FileNode operator[](int i) const; + //! returns type of the node + CV_WRAP int type() const; + + //! returns true if the node is empty + CV_WRAP bool empty() const; + //! returns true if the node is a "none" object + CV_WRAP bool isNone() const; + //! returns true if the node is a sequence + CV_WRAP bool isSeq() const; + //! returns true if the node is a mapping + CV_WRAP bool isMap() const; + //! returns true if the node is an integer + CV_WRAP bool isInt() const; + //! returns true if the node is a floating-point number + CV_WRAP bool isReal() const; + //! returns true if the node is a text string + CV_WRAP bool isString() const; + //! returns true if the node has a name + CV_WRAP bool isNamed() const; + //! returns the node name or an empty string if the node is nameless + CV_WRAP string name() const; + //! returns the number of elements in the node, if it is a sequence or mapping, or 1 otherwise. + CV_WRAP size_t size() const; + //! returns the node content as an integer. If the node stores floating-point number, it is rounded. + operator int() const; + //! returns the node content as float + operator float() const; + //! returns the node content as double + operator double() const; + //! returns the node content as text string + operator string() const; + + //! returns pointer to the underlying file node + CvFileNode* operator *(); + //! returns pointer to the underlying file node + const CvFileNode* operator* () const; + + //! returns iterator pointing to the first node element + FileNodeIterator begin() const; + //! returns iterator pointing to the element following the last node element + FileNodeIterator end() const; + + //! reads node elements to the buffer with the specified format + void readRaw( const string& fmt, uchar* vec, size_t len ) const; + //! reads the registered object and returns pointer to it + void* readObj() const; + + // do not use wrapper pointer classes for better efficiency + const CvFileStorage* fs; + const CvFileNode* node; +}; + + +/*! + File Node Iterator + + The class is used for iterating sequences (usually) and mappings. + */ +class CV_EXPORTS FileNodeIterator +{ +public: + //! the default constructor + FileNodeIterator(); + //! the full constructor set to the ofs-th element of the node + FileNodeIterator(const CvFileStorage* fs, const CvFileNode* node, size_t ofs=0); + //! the copy constructor + FileNodeIterator(const FileNodeIterator& it); + //! returns the currently observed element + FileNode operator *() const; + //! accesses the currently observed element methods + FileNode operator ->() const; + + //! moves iterator to the next node + FileNodeIterator& operator ++ (); + //! moves iterator to the next node + FileNodeIterator operator ++ (int); + //! moves iterator to the previous node + FileNodeIterator& operator -- (); + //! moves iterator to the previous node + FileNodeIterator operator -- (int); + //! moves iterator forward by the specified offset (possibly negative) + FileNodeIterator& operator += (int); + //! moves iterator backward by the specified offset (possibly negative) + FileNodeIterator& operator -= (int); + + //! reads the next maxCount elements (or less, if the sequence/mapping last element occurs earlier) to the buffer with the specified format + FileNodeIterator& readRaw( const string& fmt, uchar* vec, + size_t maxCount=(size_t)INT_MAX ); + + const CvFileStorage* fs; + const CvFileNode* container; + CvSeqReader reader; + size_t remaining; +}; + +////////////// convenient wrappers for operating old-style dynamic structures ////////////// + +template class SeqIterator; + +typedef Ptr MemStorage; + +/*! + Template Sequence Class derived from CvSeq + + The class provides more convenient access to sequence elements, + STL-style operations and iterators. + + \note The class is targeted for simple data types, + i.e. no constructors or destructors + are called for the sequence elements. +*/ +template class CV_EXPORTS Seq +{ +public: + typedef SeqIterator<_Tp> iterator; + typedef SeqIterator<_Tp> const_iterator; + + //! the default constructor + Seq(); + //! the constructor for wrapping CvSeq structure. The real element type in CvSeq should match _Tp. + Seq(const CvSeq* seq); + //! creates the empty sequence that resides in the specified storage + Seq(MemStorage& storage, int headerSize = sizeof(CvSeq)); + //! returns read-write reference to the specified element + _Tp& operator [](int idx); + //! returns read-only reference to the specified element + const _Tp& operator[](int idx) const; + //! returns iterator pointing to the beginning of the sequence + SeqIterator<_Tp> begin() const; + //! returns iterator pointing to the element following the last sequence element + SeqIterator<_Tp> end() const; + //! returns the number of elements in the sequence + size_t size() const; + //! returns the type of sequence elements (CV_8UC1 ... CV_64FC(CV_CN_MAX) ...) + int type() const; + //! returns the depth of sequence elements (CV_8U ... CV_64F) + int depth() const; + //! returns the number of channels in each sequence element + int channels() const; + //! returns the size of each sequence element + size_t elemSize() const; + //! returns index of the specified sequence element + size_t index(const _Tp& elem) const; + //! appends the specified element to the end of the sequence + void push_back(const _Tp& elem); + //! appends the specified element to the front of the sequence + void push_front(const _Tp& elem); + //! appends zero or more elements to the end of the sequence + void push_back(const _Tp* elems, size_t count); + //! appends zero or more elements to the front of the sequence + void push_front(const _Tp* elems, size_t count); + //! inserts the specified element to the specified position + void insert(int idx, const _Tp& elem); + //! inserts zero or more elements to the specified position + void insert(int idx, const _Tp* elems, size_t count); + //! removes element at the specified position + void remove(int idx); + //! removes the specified subsequence + void remove(const Range& r); + + //! returns reference to the first sequence element + _Tp& front(); + //! returns read-only reference to the first sequence element + const _Tp& front() const; + //! returns reference to the last sequence element + _Tp& back(); + //! returns read-only reference to the last sequence element + const _Tp& back() const; + //! returns true iff the sequence contains no elements + bool empty() const; + + //! removes all the elements from the sequence + void clear(); + //! removes the first element from the sequence + void pop_front(); + //! removes the last element from the sequence + void pop_back(); + //! removes zero or more elements from the beginning of the sequence + void pop_front(_Tp* elems, size_t count); + //! removes zero or more elements from the end of the sequence + void pop_back(_Tp* elems, size_t count); + + //! copies the whole sequence or the sequence slice to the specified vector + void copyTo(vector<_Tp>& vec, const Range& range=Range::all()) const; + //! returns the vector containing all the sequence elements + operator vector<_Tp>() const; + + CvSeq* seq; +}; + + +/*! + STL-style Sequence Iterator inherited from the CvSeqReader structure +*/ +template class CV_EXPORTS SeqIterator : public CvSeqReader +{ +public: + //! the default constructor + SeqIterator(); + //! the constructor setting the iterator to the beginning or to the end of the sequence + SeqIterator(const Seq<_Tp>& seq, bool seekEnd=false); + //! positions the iterator within the sequence + void seek(size_t pos); + //! reports the current iterator position + size_t tell() const; + //! returns reference to the current sequence element + _Tp& operator *(); + //! returns read-only reference to the current sequence element + const _Tp& operator *() const; + //! moves iterator to the next sequence element + SeqIterator& operator ++(); + //! moves iterator to the next sequence element + SeqIterator operator ++(int) const; + //! moves iterator to the previous sequence element + SeqIterator& operator --(); + //! moves iterator to the previous sequence element + SeqIterator operator --(int) const; + + //! moves iterator forward by the specified offset (possibly negative) + SeqIterator& operator +=(int); + //! moves iterator backward by the specified offset (possibly negative) + SeqIterator& operator -=(int); + + // this is index of the current element module seq->total*2 + // (to distinguish between 0 and seq->total) + int index; +}; + + +#if 0 +class CV_EXPORTS AlgorithmImpl; + +/*! + Base class for high-level OpenCV algorithms +*/ +class CV_EXPORTS Algorithm +{ +public: + virtual ~Algorithm(); + virtual string name() const; + + template _Tp get(int paramId) const; + template bool set(int paramId, const _Tp& value); + string paramName(int paramId) const; + string paramHelp(int paramId) const; + int paramType(int paramId) const; + int findParam(const string& name) const; + template _Tp paramDefaultValue(int paramId) const; + template bool paramRange(int paramId, _Tp& minVal, _Tp& maxVal) const; + + virtual void getParams(vector& ids) const; + virtual void write(vector& buf) const; + virtual bool read(const vector& buf); + + typedef Algorithm* (*Constructor)(void); + static void add(const string& name, Constructor create); + static void getList(vector& algorithms); + static Ptr create(const string& name); + +protected: + template void addParam(int propId, _Tp& value, bool readOnly, const string& name, + const string& help=string(), const _Tp& defaultValue=_Tp(), + _Tp (Algorithm::*getter)()=0, bool (Algorithm::*setter)(const _Tp&)=0); + template void setParamRange(int propId, const _Tp& minVal, const _Tp& maxVal); + + bool set_(int paramId, int argType, const void* value); + void get_(int paramId, int argType, void* value); + void paramDefaultValue_(int paramId, int argType, void* value); + void paramRange_(int paramId, int argType, void* minval, void* maxval); + void addParam_(int propId, int argType, void* value, bool readOnly, const string& name, + const string& help, const void* defaultValue, void* getter, void* setter); + void setParamRange_(int propId, int argType, const void* minVal, const void* maxVal); + + Ptr impl; +}; +#endif + +/*! +"\nThe CommandLineParser class is designed for command line arguments parsing\n" + "Keys map: \n" + "Before you start to work with CommandLineParser you have to create a map for keys.\n" + " It will look like this\n" + " const char* keys =\n" + " {\n" + " { s| string| 123asd |string parameter}\n" + " { d| digit | 100 |digit parameter }\n" + " { c|noCamera|false |without camera }\n" + " { 1| |some text|help }\n" + " { 2| |333 |another help }\n" + " };\n" + "Usage syntax: \n" + " \"{\" - start of parameter string.\n" + " \"}\" - end of parameter string\n" + " \"|\" - separator between short name, full name, default value and help\n" + "Supported syntax: \n" + " --key1=arg1 \n" + " -key2=arg2 \n" + "Usage: \n" + " Imagine that the input parameters are next:\n" + " -s=string_value --digit=250 --noCamera lena.jpg 10000\n" + " CommandLineParser parser(argc, argv, keys) - create a parser object\n" + " parser.get(\"s\" or \"string\") will return you first parameter value\n" + " parser.get(\"s\", false or \"string\", false) will return you first parameter value\n" + " without spaces in end and begin\n" + " parser.get(\"d\" or \"digit\") will return you second parameter value.\n" + " It also works with 'unsigned int', 'double', and 'float' types>\n" + " parser.get(\"c\" or \"noCamera\") will return you true .\n" + " If you enter this key in commandline>\n" + " It return you false otherwise.\n" + " parser.get(\"1\") will return you the first argument without parameter (lena.jpg) \n" + " parser.get(\"2\") will return you the second argument without parameter (10000)\n" + " It also works with 'unsigned int', 'double', and 'float' types \n" +*/ +class CV_EXPORTS CommandLineParser +{ + public: + + //! the default constructor + CommandLineParser(int argc, const char* argv[], const char* key_map); + + //! get parameter, you can choose: delete spaces in end and begin or not + template + _Tp get(const std::string& name, bool space_delete=true) + { + if (!has(name)) + { + return _Tp(); + } + std::string str = getString(name); + return analizeValue<_Tp>(str, space_delete); + } + + //! print short name, full name, current value and help for all params + void printParams(); + + protected: + std::map > data; + std::string getString(const std::string& name); + + bool has(const std::string& keys); + + template + _Tp analizeValue(const std::string& str, bool space_delete=false); + + template + static _Tp getData(const std::string& str) + { + _Tp res; + std::stringstream s1(str); + s1 >> res; + return res; + } + + template + _Tp fromStringNumber(const std::string& str);//the default conversion function for numbers + + }; + +template<> CV_EXPORTS +bool CommandLineParser::get(const std::string& name, bool space_delete); + +template<> CV_EXPORTS +std::string CommandLineParser::analizeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +int CommandLineParser::analizeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +unsigned CommandLineParser::analizeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +float CommandLineParser::analizeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +double CommandLineParser::analizeValue(const std::string& str, bool space_delete); + +} + +#endif // __cplusplus + +#include "opencv2/core/operations.hpp" +#include "opencv2/core/mat.hpp" + +#endif /*__OPENCV_CORE_HPP__*/ diff --git a/include/opencv2/core/core_c.h b/include/opencv2/core/core_c.h new file mode 100644 index 0000000..05d8c72 --- /dev/null +++ b/include/opencv2/core/core_c.h @@ -0,0 +1,1885 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +#ifndef __OPENCV_CORE_C_H__ +#define __OPENCV_CORE_C_H__ + +#include "opencv2/core/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************************\ +* Array allocation, deallocation, initialization and access to elements * +\****************************************************************************************/ + +/* wrapper. + If there is no enough memory, the function + (as well as other OpenCV functions that call cvAlloc) + raises an error. */ +CVAPI(void*) cvAlloc( size_t size ); + +/* wrapper. + Here and further all the memory releasing functions + (that all call cvFree) take double pointer in order to + to clear pointer to the data after releasing it. + Passing pointer to NULL pointer is Ok: nothing happens in this case +*/ +CVAPI(void) cvFree_( void* ptr ); +#define cvFree(ptr) (cvFree_(*(ptr)), *(ptr)=0) + +/* Allocates and initializes IplImage header */ +CVAPI(IplImage*) cvCreateImageHeader( CvSize size, int depth, int channels ); + +/* Inializes IplImage header */ +CVAPI(IplImage*) cvInitImageHeader( IplImage* image, CvSize size, int depth, + int channels, int origin CV_DEFAULT(0), + int align CV_DEFAULT(4)); + +/* Creates IPL image (header and data) */ +CVAPI(IplImage*) cvCreateImage( CvSize size, int depth, int channels ); + +/* Releases (i.e. deallocates) IPL image header */ +CVAPI(void) cvReleaseImageHeader( IplImage** image ); + +/* Releases IPL image header and data */ +CVAPI(void) cvReleaseImage( IplImage** image ); + +/* Creates a copy of IPL image (widthStep may differ) */ +CVAPI(IplImage*) cvCloneImage( const IplImage* image ); + +/* Sets a Channel Of Interest (only a few functions support COI) - + use cvCopy to extract the selected channel and/or put it back */ +CVAPI(void) cvSetImageCOI( IplImage* image, int coi ); + +/* Retrieves image Channel Of Interest */ +CVAPI(int) cvGetImageCOI( const IplImage* image ); + +/* Sets image ROI (region of interest) (COI is not changed) */ +CVAPI(void) cvSetImageROI( IplImage* image, CvRect rect ); + +/* Resets image ROI and COI */ +CVAPI(void) cvResetImageROI( IplImage* image ); + +/* Retrieves image ROI */ +CVAPI(CvRect) cvGetImageROI( const IplImage* image ); + +/* Allocates and initalizes CvMat header */ +CVAPI(CvMat*) cvCreateMatHeader( int rows, int cols, int type ); + +#define CV_AUTOSTEP 0x7fffffff + +/* Initializes CvMat header */ +CVAPI(CvMat*) cvInitMatHeader( CvMat* mat, int rows, int cols, + int type, void* data CV_DEFAULT(NULL), + int step CV_DEFAULT(CV_AUTOSTEP) ); + +/* Allocates and initializes CvMat header and allocates data */ +CVAPI(CvMat*) cvCreateMat( int rows, int cols, int type ); + +/* Releases CvMat header and deallocates matrix data + (reference counting is used for data) */ +CVAPI(void) cvReleaseMat( CvMat** mat ); + +/* Decrements CvMat data reference counter and deallocates the data if + it reaches 0 */ +CV_INLINE void cvDecRefData( CvArr* arr ) +{ + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + mat->data.ptr = NULL; + if( mat->refcount != NULL && --*mat->refcount == 0 ) + cvFree( &mat->refcount ); + mat->refcount = NULL; + } + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + mat->data.ptr = NULL; + if( mat->refcount != NULL && --*mat->refcount == 0 ) + cvFree( &mat->refcount ); + mat->refcount = NULL; + } +} + +/* Increments CvMat data reference counter */ +CV_INLINE int cvIncRefData( CvArr* arr ) +{ + int refcount = 0; + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + if( mat->refcount != NULL ) + refcount = ++*mat->refcount; + } + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + if( mat->refcount != NULL ) + refcount = ++*mat->refcount; + } + return refcount; +} + + +/* Creates an exact copy of the input matrix (except, may be, step value) */ +CVAPI(CvMat*) cvCloneMat( const CvMat* mat ); + + +/* Makes a new matrix from subrectangle of input array. + No data is copied */ +CVAPI(CvMat*) cvGetSubRect( const CvArr* arr, CvMat* submat, CvRect rect ); +#define cvGetSubArr cvGetSubRect + +/* Selects row span of the input array: arr(start_row:delta_row:end_row,:) + (end_row is not included into the span). */ +CVAPI(CvMat*) cvGetRows( const CvArr* arr, CvMat* submat, + int start_row, int end_row, + int delta_row CV_DEFAULT(1)); + +CV_INLINE CvMat* cvGetRow( const CvArr* arr, CvMat* submat, int row ) +{ + return cvGetRows( arr, submat, row, row + 1, 1 ); +} + + +/* Selects column span of the input array: arr(:,start_col:end_col) + (end_col is not included into the span) */ +CVAPI(CvMat*) cvGetCols( const CvArr* arr, CvMat* submat, + int start_col, int end_col ); + +CV_INLINE CvMat* cvGetCol( const CvArr* arr, CvMat* submat, int col ) +{ + return cvGetCols( arr, submat, col, col + 1 ); +} + +/* Select a diagonal of the input array. + (diag = 0 means the main diagonal, >0 means a diagonal above the main one, + <0 - below the main one). + The diagonal will be represented as a column (nx1 matrix). */ +CVAPI(CvMat*) cvGetDiag( const CvArr* arr, CvMat* submat, + int diag CV_DEFAULT(0)); + +/* low-level scalar <-> raw data conversion functions */ +CVAPI(void) cvScalarToRawData( const CvScalar* scalar, void* data, int type, + int extend_to_12 CV_DEFAULT(0) ); + +CVAPI(void) cvRawDataToScalar( const void* data, int type, CvScalar* scalar ); + +/* Allocates and initializes CvMatND header */ +CVAPI(CvMatND*) cvCreateMatNDHeader( int dims, const int* sizes, int type ); + +/* Allocates and initializes CvMatND header and allocates data */ +CVAPI(CvMatND*) cvCreateMatND( int dims, const int* sizes, int type ); + +/* Initializes preallocated CvMatND header */ +CVAPI(CvMatND*) cvInitMatNDHeader( CvMatND* mat, int dims, const int* sizes, + int type, void* data CV_DEFAULT(NULL) ); + +/* Releases CvMatND */ +CV_INLINE void cvReleaseMatND( CvMatND** mat ) +{ + cvReleaseMat( (CvMat**)mat ); +} + +/* Creates a copy of CvMatND (except, may be, steps) */ +CVAPI(CvMatND*) cvCloneMatND( const CvMatND* mat ); + +/* Allocates and initializes CvSparseMat header and allocates data */ +CVAPI(CvSparseMat*) cvCreateSparseMat( int dims, const int* sizes, int type ); + +/* Releases CvSparseMat */ +CVAPI(void) cvReleaseSparseMat( CvSparseMat** mat ); + +/* Creates a copy of CvSparseMat (except, may be, zero items) */ +CVAPI(CvSparseMat*) cvCloneSparseMat( const CvSparseMat* mat ); + +/* Initializes sparse array iterator + (returns the first node or NULL if the array is empty) */ +CVAPI(CvSparseNode*) cvInitSparseMatIterator( const CvSparseMat* mat, + CvSparseMatIterator* mat_iterator ); + +// returns next sparse array node (or NULL if there is no more nodes) +CV_INLINE CvSparseNode* cvGetNextSparseNode( CvSparseMatIterator* mat_iterator ) +{ + if( mat_iterator->node->next ) + return mat_iterator->node = mat_iterator->node->next; + else + { + int idx; + for( idx = ++mat_iterator->curidx; idx < mat_iterator->mat->hashsize; idx++ ) + { + CvSparseNode* node = (CvSparseNode*)mat_iterator->mat->hashtable[idx]; + if( node ) + { + mat_iterator->curidx = idx; + return mat_iterator->node = node; + } + } + return NULL; + } +} + +/**************** matrix iterator: used for n-ary operations on dense arrays *********/ + +#define CV_MAX_ARR 10 + +typedef struct CvNArrayIterator +{ + int count; /* number of arrays */ + int dims; /* number of dimensions to iterate */ + CvSize size; /* maximal common linear size: { width = size, height = 1 } */ + uchar* ptr[CV_MAX_ARR]; /* pointers to the array slices */ + int stack[CV_MAX_DIM]; /* for internal use */ + CvMatND* hdr[CV_MAX_ARR]; /* pointers to the headers of the + matrices that are processed */ +} +CvNArrayIterator; + +#define CV_NO_DEPTH_CHECK 1 +#define CV_NO_CN_CHECK 2 +#define CV_NO_SIZE_CHECK 4 + +/* initializes iterator that traverses through several arrays simulteneously + (the function together with cvNextArraySlice is used for + N-ari element-wise operations) */ +CVAPI(int) cvInitNArrayIterator( int count, CvArr** arrs, + const CvArr* mask, CvMatND* stubs, + CvNArrayIterator* array_iterator, + int flags CV_DEFAULT(0) ); + +/* returns zero value if iteration is finished, non-zero (slice length) otherwise */ +CVAPI(int) cvNextNArraySlice( CvNArrayIterator* array_iterator ); + + +/* Returns type of array elements: + CV_8UC1 ... CV_64FC4 ... */ +CVAPI(int) cvGetElemType( const CvArr* arr ); + +/* Retrieves number of an array dimensions and + optionally sizes of the dimensions */ +CVAPI(int) cvGetDims( const CvArr* arr, int* sizes CV_DEFAULT(NULL) ); + + +/* Retrieves size of a particular array dimension. + For 2d arrays cvGetDimSize(arr,0) returns number of rows (image height) + and cvGetDimSize(arr,1) returns number of columns (image width) */ +CVAPI(int) cvGetDimSize( const CvArr* arr, int index ); + + +/* ptr = &arr(idx0,idx1,...). All indexes are zero-based, + the major dimensions go first (e.g. (y,x) for 2D, (z,y,x) for 3D */ +CVAPI(uchar*) cvPtr1D( const CvArr* arr, int idx0, int* type CV_DEFAULT(NULL)); +CVAPI(uchar*) cvPtr2D( const CvArr* arr, int idx0, int idx1, int* type CV_DEFAULT(NULL) ); +CVAPI(uchar*) cvPtr3D( const CvArr* arr, int idx0, int idx1, int idx2, + int* type CV_DEFAULT(NULL)); + +/* For CvMat or IplImage number of indices should be 2 + (row index (y) goes first, column index (x) goes next). + For CvMatND or CvSparseMat number of infices should match number of and + indices order should match the array dimension order. */ +CVAPI(uchar*) cvPtrND( const CvArr* arr, const int* idx, int* type CV_DEFAULT(NULL), + int create_node CV_DEFAULT(1), + unsigned* precalc_hashval CV_DEFAULT(NULL)); + +/* value = arr(idx0,idx1,...) */ +CVAPI(CvScalar) cvGet1D( const CvArr* arr, int idx0 ); +CVAPI(CvScalar) cvGet2D( const CvArr* arr, int idx0, int idx1 ); +CVAPI(CvScalar) cvGet3D( const CvArr* arr, int idx0, int idx1, int idx2 ); +CVAPI(CvScalar) cvGetND( const CvArr* arr, const int* idx ); + +/* for 1-channel arrays */ +CVAPI(double) cvGetReal1D( const CvArr* arr, int idx0 ); +CVAPI(double) cvGetReal2D( const CvArr* arr, int idx0, int idx1 ); +CVAPI(double) cvGetReal3D( const CvArr* arr, int idx0, int idx1, int idx2 ); +CVAPI(double) cvGetRealND( const CvArr* arr, const int* idx ); + +/* arr(idx0,idx1,...) = value */ +CVAPI(void) cvSet1D( CvArr* arr, int idx0, CvScalar value ); +CVAPI(void) cvSet2D( CvArr* arr, int idx0, int idx1, CvScalar value ); +CVAPI(void) cvSet3D( CvArr* arr, int idx0, int idx1, int idx2, CvScalar value ); +CVAPI(void) cvSetND( CvArr* arr, const int* idx, CvScalar value ); + +/* for 1-channel arrays */ +CVAPI(void) cvSetReal1D( CvArr* arr, int idx0, double value ); +CVAPI(void) cvSetReal2D( CvArr* arr, int idx0, int idx1, double value ); +CVAPI(void) cvSetReal3D( CvArr* arr, int idx0, + int idx1, int idx2, double value ); +CVAPI(void) cvSetRealND( CvArr* arr, const int* idx, double value ); + +/* clears element of ND dense array, + in case of sparse arrays it deletes the specified node */ +CVAPI(void) cvClearND( CvArr* arr, const int* idx ); + +/* Converts CvArr (IplImage or CvMat,...) to CvMat. + If the last parameter is non-zero, function can + convert multi(>2)-dimensional array to CvMat as long as + the last array's dimension is continous. The resultant + matrix will be have appropriate (a huge) number of rows */ +CVAPI(CvMat*) cvGetMat( const CvArr* arr, CvMat* header, + int* coi CV_DEFAULT(NULL), + int allowND CV_DEFAULT(0)); + +/* Converts CvArr (IplImage or CvMat) to IplImage */ +CVAPI(IplImage*) cvGetImage( const CvArr* arr, IplImage* image_header ); + + +/* Changes a shape of multi-dimensional array. + new_cn == 0 means that number of channels remains unchanged. + new_dims == 0 means that number and sizes of dimensions remain the same + (unless they need to be changed to set the new number of channels) + if new_dims == 1, there is no need to specify new dimension sizes + The resultant configuration should be achievable w/o data copying. + If the resultant array is sparse, CvSparseMat header should be passed + to the function else if the result is 1 or 2 dimensional, + CvMat header should be passed to the function + else CvMatND header should be passed */ +CVAPI(CvArr*) cvReshapeMatND( const CvArr* arr, + int sizeof_header, CvArr* header, + int new_cn, int new_dims, int* new_sizes ); + +#define cvReshapeND( arr, header, new_cn, new_dims, new_sizes ) \ + cvReshapeMatND( (arr), sizeof(*(header)), (header), \ + (new_cn), (new_dims), (new_sizes)) + +CVAPI(CvMat*) cvReshape( const CvArr* arr, CvMat* header, + int new_cn, int new_rows CV_DEFAULT(0) ); + +/* Repeats source 2d array several times in both horizontal and + vertical direction to fill destination array */ +CVAPI(void) cvRepeat( const CvArr* src, CvArr* dst ); + +/* Allocates array data */ +CVAPI(void) cvCreateData( CvArr* arr ); + +/* Releases array data */ +CVAPI(void) cvReleaseData( CvArr* arr ); + +/* Attaches user data to the array header. The step is reffered to + the pre-last dimension. That is, all the planes of the array + must be joint (w/o gaps) */ +CVAPI(void) cvSetData( CvArr* arr, void* data, int step ); + +/* Retrieves raw data of CvMat, IplImage or CvMatND. + In the latter case the function raises an error if + the array can not be represented as a matrix */ +CVAPI(void) cvGetRawData( const CvArr* arr, uchar** data, + int* step CV_DEFAULT(NULL), + CvSize* roi_size CV_DEFAULT(NULL)); + +/* Returns width and height of array in elements */ +CVAPI(CvSize) cvGetSize( const CvArr* arr ); + +/* Copies source array to destination array */ +CVAPI(void) cvCopy( const CvArr* src, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Sets all or "masked" elements of input array + to the same value*/ +CVAPI(void) cvSet( CvArr* arr, CvScalar value, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Clears all the array elements (sets them to 0) */ +CVAPI(void) cvSetZero( CvArr* arr ); +#define cvZero cvSetZero + + +/* Splits a multi-channel array into the set of single-channel arrays or + extracts particular [color] plane */ +CVAPI(void) cvSplit( const CvArr* src, CvArr* dst0, CvArr* dst1, + CvArr* dst2, CvArr* dst3 ); + +/* Merges a set of single-channel arrays into the single multi-channel array + or inserts one particular [color] plane to the array */ +CVAPI(void) cvMerge( const CvArr* src0, const CvArr* src1, + const CvArr* src2, const CvArr* src3, + CvArr* dst ); + +/* Copies several channels from input arrays to + certain channels of output arrays */ +CVAPI(void) cvMixChannels( const CvArr** src, int src_count, + CvArr** dst, int dst_count, + const int* from_to, int pair_count ); + +/* Performs linear transformation on every source array element: + dst(x,y,c) = scale*src(x,y,c)+shift. + Arbitrary combination of input and output array depths are allowed + (number of channels must be the same), thus the function can be used + for type conversion */ +CVAPI(void) cvConvertScale( const CvArr* src, CvArr* dst, + double scale CV_DEFAULT(1), + double shift CV_DEFAULT(0) ); +#define cvCvtScale cvConvertScale +#define cvScale cvConvertScale +#define cvConvert( src, dst ) cvConvertScale( (src), (dst), 1, 0 ) + + +/* Performs linear transformation on every source array element, + stores absolute value of the result: + dst(x,y,c) = abs(scale*src(x,y,c)+shift). + destination array must have 8u type. + In other cases one may use cvConvertScale + cvAbsDiffS */ +CVAPI(void) cvConvertScaleAbs( const CvArr* src, CvArr* dst, + double scale CV_DEFAULT(1), + double shift CV_DEFAULT(0) ); +#define cvCvtScaleAbs cvConvertScaleAbs + + +/* checks termination criteria validity and + sets eps to default_eps (if it is not set), + max_iter to default_max_iters (if it is not set) +*/ +CVAPI(CvTermCriteria) cvCheckTermCriteria( CvTermCriteria criteria, + double default_eps, + int default_max_iters ); + +/****************************************************************************************\ +* Arithmetic, logic and comparison operations * +\****************************************************************************************/ + +/* dst(mask) = src1(mask) + src2(mask) */ +CVAPI(void) cvAdd( const CvArr* src1, const CvArr* src2, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src(mask) + value */ +CVAPI(void) cvAddS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src1(mask) - src2(mask) */ +CVAPI(void) cvSub( const CvArr* src1, const CvArr* src2, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src(mask) - value = src(mask) + (-value) */ +CV_INLINE void cvSubS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)) +{ + cvAddS( src, cvScalar( -value.val[0], -value.val[1], -value.val[2], -value.val[3]), + dst, mask ); +} + +/* dst(mask) = value - src(mask) */ +CVAPI(void) cvSubRS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) * src2(idx) * scale + (scaled element-wise multiplication of 2 arrays) */ +CVAPI(void) cvMul( const CvArr* src1, const CvArr* src2, + CvArr* dst, double scale CV_DEFAULT(1) ); + +/* element-wise division/inversion with scaling: + dst(idx) = src1(idx) * scale / src2(idx) + or dst(idx) = scale / src2(idx) if src1 == 0 */ +CVAPI(void) cvDiv( const CvArr* src1, const CvArr* src2, + CvArr* dst, double scale CV_DEFAULT(1)); + +/* dst = src1 * scale + src2 */ +CVAPI(void) cvScaleAdd( const CvArr* src1, CvScalar scale, + const CvArr* src2, CvArr* dst ); +#define cvAXPY( A, real_scalar, B, C ) cvScaleAdd(A, cvRealScalar(real_scalar), B, C) + +/* dst = src1 * alpha + src2 * beta + gamma */ +CVAPI(void) cvAddWeighted( const CvArr* src1, double alpha, + const CvArr* src2, double beta, + double gamma, CvArr* dst ); + +/* result = sum_i(src1(i) * src2(i)) (results for all channels are accumulated together) */ +CVAPI(double) cvDotProduct( const CvArr* src1, const CvArr* src2 ); + +/* dst(idx) = src1(idx) & src2(idx) */ +CVAPI(void) cvAnd( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) & value */ +CVAPI(void) cvAndS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) | src2(idx) */ +CVAPI(void) cvOr( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) | value */ +CVAPI(void) cvOrS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) ^ src2(idx) */ +CVAPI(void) cvXor( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) ^ value */ +CVAPI(void) cvXorS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = ~src(idx) */ +CVAPI(void) cvNot( const CvArr* src, CvArr* dst ); + +/* dst(idx) = lower(idx) <= src(idx) < upper(idx) */ +CVAPI(void) cvInRange( const CvArr* src, const CvArr* lower, + const CvArr* upper, CvArr* dst ); + +/* dst(idx) = lower <= src(idx) < upper */ +CVAPI(void) cvInRangeS( const CvArr* src, CvScalar lower, + CvScalar upper, CvArr* dst ); + +#define CV_CMP_EQ 0 +#define CV_CMP_GT 1 +#define CV_CMP_GE 2 +#define CV_CMP_LT 3 +#define CV_CMP_LE 4 +#define CV_CMP_NE 5 + +/* The comparison operation support single-channel arrays only. + Destination image should be 8uC1 or 8sC1 */ + +/* dst(idx) = src1(idx) _cmp_op_ src2(idx) */ +CVAPI(void) cvCmp( const CvArr* src1, const CvArr* src2, CvArr* dst, int cmp_op ); + +/* dst(idx) = src1(idx) _cmp_op_ value */ +CVAPI(void) cvCmpS( const CvArr* src, double value, CvArr* dst, int cmp_op ); + +/* dst(idx) = min(src1(idx),src2(idx)) */ +CVAPI(void) cvMin( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(idx) = max(src1(idx),src2(idx)) */ +CVAPI(void) cvMax( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(idx) = min(src(idx),value) */ +CVAPI(void) cvMinS( const CvArr* src, double value, CvArr* dst ); + +/* dst(idx) = max(src(idx),value) */ +CVAPI(void) cvMaxS( const CvArr* src, double value, CvArr* dst ); + +/* dst(x,y,c) = abs(src1(x,y,c) - src2(x,y,c)) */ +CVAPI(void) cvAbsDiff( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(x,y,c) = abs(src(x,y,c) - value(c)) */ +CVAPI(void) cvAbsDiffS( const CvArr* src, CvArr* dst, CvScalar value ); +#define cvAbs( src, dst ) cvAbsDiffS( (src), (dst), cvScalarAll(0)) + +/****************************************************************************************\ +* Math operations * +\****************************************************************************************/ + +/* Does cartesian->polar coordinates conversion. + Either of output components (magnitude or angle) is optional */ +CVAPI(void) cvCartToPolar( const CvArr* x, const CvArr* y, + CvArr* magnitude, CvArr* angle CV_DEFAULT(NULL), + int angle_in_degrees CV_DEFAULT(0)); + +/* Does polar->cartesian coordinates conversion. + Either of output components (magnitude or angle) is optional. + If magnitude is missing it is assumed to be all 1's */ +CVAPI(void) cvPolarToCart( const CvArr* magnitude, const CvArr* angle, + CvArr* x, CvArr* y, + int angle_in_degrees CV_DEFAULT(0)); + +/* Does powering: dst(idx) = src(idx)^power */ +CVAPI(void) cvPow( const CvArr* src, CvArr* dst, double power ); + +/* Does exponention: dst(idx) = exp(src(idx)). + Overflow is not handled yet. Underflow is handled. + Maximal relative error is ~7e-6 for single-precision input */ +CVAPI(void) cvExp( const CvArr* src, CvArr* dst ); + +/* Calculates natural logarithms: dst(idx) = log(abs(src(idx))). + Logarithm of 0 gives large negative number(~-700) + Maximal relative error is ~3e-7 for single-precision output +*/ +CVAPI(void) cvLog( const CvArr* src, CvArr* dst ); + +/* Fast arctangent calculation */ +CVAPI(float) cvFastArctan( float y, float x ); + +/* Fast cubic root calculation */ +CVAPI(float) cvCbrt( float value ); + +/* Checks array values for NaNs, Infs or simply for too large numbers + (if CV_CHECK_RANGE is set). If CV_CHECK_QUIET is set, + no runtime errors is raised (function returns zero value in case of "bad" values). + Otherwise cvError is called */ +#define CV_CHECK_RANGE 1 +#define CV_CHECK_QUIET 2 +CVAPI(int) cvCheckArr( const CvArr* arr, int flags CV_DEFAULT(0), + double min_val CV_DEFAULT(0), double max_val CV_DEFAULT(0)); +#define cvCheckArray cvCheckArr + +#define CV_RAND_UNI 0 +#define CV_RAND_NORMAL 1 +CVAPI(void) cvRandArr( CvRNG* rng, CvArr* arr, int dist_type, + CvScalar param1, CvScalar param2 ); + +CVAPI(void) cvRandShuffle( CvArr* mat, CvRNG* rng, + double iter_factor CV_DEFAULT(1.)); + +#define CV_SORT_EVERY_ROW 0 +#define CV_SORT_EVERY_COLUMN 1 +#define CV_SORT_ASCENDING 0 +#define CV_SORT_DESCENDING 16 + +CVAPI(void) cvSort( const CvArr* src, CvArr* dst CV_DEFAULT(NULL), + CvArr* idxmat CV_DEFAULT(NULL), + int flags CV_DEFAULT(0)); + +/* Finds real roots of a cubic equation */ +CVAPI(int) cvSolveCubic( const CvMat* coeffs, CvMat* roots ); + +/* Finds all real and complex roots of a polynomial equation */ +CVAPI(void) cvSolvePoly(const CvMat* coeffs, CvMat *roots2, + int maxiter CV_DEFAULT(20), int fig CV_DEFAULT(100)); + +/****************************************************************************************\ +* Matrix operations * +\****************************************************************************************/ + +/* Calculates cross product of two 3d vectors */ +CVAPI(void) cvCrossProduct( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* Matrix transform: dst = A*B + C, C is optional */ +#define cvMatMulAdd( src1, src2, src3, dst ) cvGEMM( (src1), (src2), 1., (src3), 1., (dst), 0 ) +#define cvMatMul( src1, src2, dst ) cvMatMulAdd( (src1), (src2), NULL, (dst)) + +#define CV_GEMM_A_T 1 +#define CV_GEMM_B_T 2 +#define CV_GEMM_C_T 4 +/* Extended matrix transform: + dst = alpha*op(A)*op(B) + beta*op(C), where op(X) is X or X^T */ +CVAPI(void) cvGEMM( const CvArr* src1, const CvArr* src2, double alpha, + const CvArr* src3, double beta, CvArr* dst, + int tABC CV_DEFAULT(0)); +#define cvMatMulAddEx cvGEMM + +/* Transforms each element of source array and stores + resultant vectors in destination array */ +CVAPI(void) cvTransform( const CvArr* src, CvArr* dst, + const CvMat* transmat, + const CvMat* shiftvec CV_DEFAULT(NULL)); +#define cvMatMulAddS cvTransform + +/* Does perspective transform on every element of input array */ +CVAPI(void) cvPerspectiveTransform( const CvArr* src, CvArr* dst, + const CvMat* mat ); + +/* Calculates (A-delta)*(A-delta)^T (order=0) or (A-delta)^T*(A-delta) (order=1) */ +CVAPI(void) cvMulTransposed( const CvArr* src, CvArr* dst, int order, + const CvArr* delta CV_DEFAULT(NULL), + double scale CV_DEFAULT(1.) ); + +/* Tranposes matrix. Square matrices can be transposed in-place */ +CVAPI(void) cvTranspose( const CvArr* src, CvArr* dst ); +#define cvT cvTranspose + +/* Completes the symmetric matrix from the lower (LtoR=0) or from the upper (LtoR!=0) part */ +CVAPI(void) cvCompleteSymm( CvMat* matrix, int LtoR CV_DEFAULT(0) ); + +/* Mirror array data around horizontal (flip=0), + vertical (flip=1) or both(flip=-1) axises: + cvFlip(src) flips images vertically and sequences horizontally (inplace) */ +CVAPI(void) cvFlip( const CvArr* src, CvArr* dst CV_DEFAULT(NULL), + int flip_mode CV_DEFAULT(0)); +#define cvMirror cvFlip + + +#define CV_SVD_MODIFY_A 1 +#define CV_SVD_U_T 2 +#define CV_SVD_V_T 4 + +/* Performs Singular Value Decomposition of a matrix */ +CVAPI(void) cvSVD( CvArr* A, CvArr* W, CvArr* U CV_DEFAULT(NULL), + CvArr* V CV_DEFAULT(NULL), int flags CV_DEFAULT(0)); + +/* Performs Singular Value Back Substitution (solves A*X = B): + flags must be the same as in cvSVD */ +CVAPI(void) cvSVBkSb( const CvArr* W, const CvArr* U, + const CvArr* V, const CvArr* B, + CvArr* X, int flags ); + +#define CV_LU 0 +#define CV_SVD 1 +#define CV_SVD_SYM 2 +#define CV_CHOLESKY 3 +#define CV_QR 4 +#define CV_NORMAL 16 + +/* Inverts matrix */ +CVAPI(double) cvInvert( const CvArr* src, CvArr* dst, + int method CV_DEFAULT(CV_LU)); +#define cvInv cvInvert + +/* Solves linear system (src1)*(dst) = (src2) + (returns 0 if src1 is a singular and CV_LU method is used) */ +CVAPI(int) cvSolve( const CvArr* src1, const CvArr* src2, CvArr* dst, + int method CV_DEFAULT(CV_LU)); + +/* Calculates determinant of input matrix */ +CVAPI(double) cvDet( const CvArr* mat ); + +/* Calculates trace of the matrix (sum of elements on the main diagonal) */ +CVAPI(CvScalar) cvTrace( const CvArr* mat ); + +/* Finds eigen values and vectors of a symmetric matrix */ +CVAPI(void) cvEigenVV( CvArr* mat, CvArr* evects, CvArr* evals, + double eps CV_DEFAULT(0), + int lowindex CV_DEFAULT(-1), + int highindex CV_DEFAULT(-1)); + +///* Finds selected eigen values and vectors of a symmetric matrix */ +//CVAPI(void) cvSelectedEigenVV( CvArr* mat, CvArr* evects, CvArr* evals, +// int lowindex, int highindex ); + +/* Makes an identity matrix (mat_ij = i == j) */ +CVAPI(void) cvSetIdentity( CvArr* mat, CvScalar value CV_DEFAULT(cvRealScalar(1)) ); + +/* Fills matrix with given range of numbers */ +CVAPI(CvArr*) cvRange( CvArr* mat, double start, double end ); + +/* Calculates covariation matrix for a set of vectors */ +/* transpose([v1-avg, v2-avg,...]) * [v1-avg,v2-avg,...] */ +#define CV_COVAR_SCRAMBLED 0 + +/* [v1-avg, v2-avg,...] * transpose([v1-avg,v2-avg,...]) */ +#define CV_COVAR_NORMAL 1 + +/* do not calc average (i.e. mean vector) - use the input vector instead + (useful for calculating covariance matrix by parts) */ +#define CV_COVAR_USE_AVG 2 + +/* scale the covariance matrix coefficients by number of the vectors */ +#define CV_COVAR_SCALE 4 + +/* all the input vectors are stored in a single matrix, as its rows */ +#define CV_COVAR_ROWS 8 + +/* all the input vectors are stored in a single matrix, as its columns */ +#define CV_COVAR_COLS 16 + +CVAPI(void) cvCalcCovarMatrix( const CvArr** vects, int count, + CvArr* cov_mat, CvArr* avg, int flags ); + +#define CV_PCA_DATA_AS_ROW 0 +#define CV_PCA_DATA_AS_COL 1 +#define CV_PCA_USE_AVG 2 +CVAPI(void) cvCalcPCA( const CvArr* data, CvArr* mean, + CvArr* eigenvals, CvArr* eigenvects, int flags ); + +CVAPI(void) cvProjectPCA( const CvArr* data, const CvArr* mean, + const CvArr* eigenvects, CvArr* result ); + +CVAPI(void) cvBackProjectPCA( const CvArr* proj, const CvArr* mean, + const CvArr* eigenvects, CvArr* result ); + +/* Calculates Mahalanobis(weighted) distance */ +CVAPI(double) cvMahalanobis( const CvArr* vec1, const CvArr* vec2, const CvArr* mat ); +#define cvMahalonobis cvMahalanobis + +/****************************************************************************************\ +* Array Statistics * +\****************************************************************************************/ + +/* Finds sum of array elements */ +CVAPI(CvScalar) cvSum( const CvArr* arr ); + +/* Calculates number of non-zero pixels */ +CVAPI(int) cvCountNonZero( const CvArr* arr ); + +/* Calculates mean value of array elements */ +CVAPI(CvScalar) cvAvg( const CvArr* arr, const CvArr* mask CV_DEFAULT(NULL) ); + +/* Calculates mean and standard deviation of pixel values */ +CVAPI(void) cvAvgSdv( const CvArr* arr, CvScalar* mean, CvScalar* std_dev, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Finds global minimum, maximum and their positions */ +CVAPI(void) cvMinMaxLoc( const CvArr* arr, double* min_val, double* max_val, + CvPoint* min_loc CV_DEFAULT(NULL), + CvPoint* max_loc CV_DEFAULT(NULL), + const CvArr* mask CV_DEFAULT(NULL) ); + +/* types of array norm */ +#define CV_C 1 +#define CV_L1 2 +#define CV_L2 4 +#define CV_NORM_MASK 7 +#define CV_RELATIVE 8 +#define CV_DIFF 16 +#define CV_MINMAX 32 + +#define CV_DIFF_C (CV_DIFF | CV_C) +#define CV_DIFF_L1 (CV_DIFF | CV_L1) +#define CV_DIFF_L2 (CV_DIFF | CV_L2) +#define CV_RELATIVE_C (CV_RELATIVE | CV_C) +#define CV_RELATIVE_L1 (CV_RELATIVE | CV_L1) +#define CV_RELATIVE_L2 (CV_RELATIVE | CV_L2) + +/* Finds norm, difference norm or relative difference norm for an array (or two arrays) */ +CVAPI(double) cvNorm( const CvArr* arr1, const CvArr* arr2 CV_DEFAULT(NULL), + int norm_type CV_DEFAULT(CV_L2), + const CvArr* mask CV_DEFAULT(NULL) ); + +CVAPI(void) cvNormalize( const CvArr* src, CvArr* dst, + double a CV_DEFAULT(1.), double b CV_DEFAULT(0.), + int norm_type CV_DEFAULT(CV_L2), + const CvArr* mask CV_DEFAULT(NULL) ); + + +#define CV_REDUCE_SUM 0 +#define CV_REDUCE_AVG 1 +#define CV_REDUCE_MAX 2 +#define CV_REDUCE_MIN 3 + +CVAPI(void) cvReduce( const CvArr* src, CvArr* dst, int dim CV_DEFAULT(-1), + int op CV_DEFAULT(CV_REDUCE_SUM) ); + +/****************************************************************************************\ +* Discrete Linear Transforms and Related Functions * +\****************************************************************************************/ + +#define CV_DXT_FORWARD 0 +#define CV_DXT_INVERSE 1 +#define CV_DXT_SCALE 2 /* divide result by size of array */ +#define CV_DXT_INV_SCALE (CV_DXT_INVERSE + CV_DXT_SCALE) +#define CV_DXT_INVERSE_SCALE CV_DXT_INV_SCALE +#define CV_DXT_ROWS 4 /* transform each row individually */ +#define CV_DXT_MUL_CONJ 8 /* conjugate the second argument of cvMulSpectrums */ + +/* Discrete Fourier Transform: + complex->complex, + real->ccs (forward), + ccs->real (inverse) */ +CVAPI(void) cvDFT( const CvArr* src, CvArr* dst, int flags, + int nonzero_rows CV_DEFAULT(0) ); +#define cvFFT cvDFT + +/* Multiply results of DFTs: DFT(X)*DFT(Y) or DFT(X)*conj(DFT(Y)) */ +CVAPI(void) cvMulSpectrums( const CvArr* src1, const CvArr* src2, + CvArr* dst, int flags ); + +/* Finds optimal DFT vector size >= size0 */ +CVAPI(int) cvGetOptimalDFTSize( int size0 ); + +/* Discrete Cosine Transform */ +CVAPI(void) cvDCT( const CvArr* src, CvArr* dst, int flags ); + +/****************************************************************************************\ +* Dynamic data structures * +\****************************************************************************************/ + +/* Calculates length of sequence slice (with support of negative indices). */ +CVAPI(int) cvSliceLength( CvSlice slice, const CvSeq* seq ); + + +/* Creates new memory storage. + block_size == 0 means that default, + somewhat optimal size, is used (currently, it is 64K) */ +CVAPI(CvMemStorage*) cvCreateMemStorage( int block_size CV_DEFAULT(0)); + + +/* Creates a memory storage that will borrow memory blocks from parent storage */ +CVAPI(CvMemStorage*) cvCreateChildMemStorage( CvMemStorage* parent ); + + +/* Releases memory storage. All the children of a parent must be released before + the parent. A child storage returns all the blocks to parent when it is released */ +CVAPI(void) cvReleaseMemStorage( CvMemStorage** storage ); + + +/* Clears memory storage. This is the only way(!!!) (besides cvRestoreMemStoragePos) + to reuse memory allocated for the storage - cvClearSeq,cvClearSet ... + do not free any memory. + A child storage returns all the blocks to the parent when it is cleared */ +CVAPI(void) cvClearMemStorage( CvMemStorage* storage ); + +/* Remember a storage "free memory" position */ +CVAPI(void) cvSaveMemStoragePos( const CvMemStorage* storage, CvMemStoragePos* pos ); + +/* Restore a storage "free memory" position */ +CVAPI(void) cvRestoreMemStoragePos( CvMemStorage* storage, CvMemStoragePos* pos ); + +/* Allocates continuous buffer of the specified size in the storage */ +CVAPI(void*) cvMemStorageAlloc( CvMemStorage* storage, size_t size ); + +/* Allocates string in memory storage */ +CVAPI(CvString) cvMemStorageAllocString( CvMemStorage* storage, const char* ptr, + int len CV_DEFAULT(-1) ); + +/* Creates new empty sequence that will reside in the specified storage */ +CVAPI(CvSeq*) cvCreateSeq( int seq_flags, int header_size, + int elem_size, CvMemStorage* storage ); + +/* Changes default size (granularity) of sequence blocks. + The default size is ~1Kbyte */ +CVAPI(void) cvSetSeqBlockSize( CvSeq* seq, int delta_elems ); + + +/* Adds new element to the end of sequence. Returns pointer to the element */ +CVAPI(schar*) cvSeqPush( CvSeq* seq, const void* element CV_DEFAULT(NULL)); + + +/* Adds new element to the beginning of sequence. Returns pointer to it */ +CVAPI(schar*) cvSeqPushFront( CvSeq* seq, const void* element CV_DEFAULT(NULL)); + + +/* Removes the last element from sequence and optionally saves it */ +CVAPI(void) cvSeqPop( CvSeq* seq, void* element CV_DEFAULT(NULL)); + + +/* Removes the first element from sequence and optioanally saves it */ +CVAPI(void) cvSeqPopFront( CvSeq* seq, void* element CV_DEFAULT(NULL)); + + +#define CV_FRONT 1 +#define CV_BACK 0 +/* Adds several new elements to the end of sequence */ +CVAPI(void) cvSeqPushMulti( CvSeq* seq, const void* elements, + int count, int in_front CV_DEFAULT(0) ); + +/* Removes several elements from the end of sequence and optionally saves them */ +CVAPI(void) cvSeqPopMulti( CvSeq* seq, void* elements, + int count, int in_front CV_DEFAULT(0) ); + +/* Inserts a new element in the middle of sequence. + cvSeqInsert(seq,0,elem) == cvSeqPushFront(seq,elem) */ +CVAPI(schar*) cvSeqInsert( CvSeq* seq, int before_index, + const void* element CV_DEFAULT(NULL)); + +/* Removes specified sequence element */ +CVAPI(void) cvSeqRemove( CvSeq* seq, int index ); + + +/* Removes all the elements from the sequence. The freed memory + can be reused later only by the same sequence unless cvClearMemStorage + or cvRestoreMemStoragePos is called */ +CVAPI(void) cvClearSeq( CvSeq* seq ); + + +/* Retrieves pointer to specified sequence element. + Negative indices are supported and mean counting from the end + (e.g -1 means the last sequence element) */ +CVAPI(schar*) cvGetSeqElem( const CvSeq* seq, int index ); + +/* Calculates index of the specified sequence element. + Returns -1 if element does not belong to the sequence */ +CVAPI(int) cvSeqElemIdx( const CvSeq* seq, const void* element, + CvSeqBlock** block CV_DEFAULT(NULL) ); + +/* Initializes sequence writer. The new elements will be added to the end of sequence */ +CVAPI(void) cvStartAppendToSeq( CvSeq* seq, CvSeqWriter* writer ); + + +/* Combination of cvCreateSeq and cvStartAppendToSeq */ +CVAPI(void) cvStartWriteSeq( int seq_flags, int header_size, + int elem_size, CvMemStorage* storage, + CvSeqWriter* writer ); + +/* Closes sequence writer, updates sequence header and returns pointer + to the resultant sequence + (which may be useful if the sequence was created using cvStartWriteSeq)) +*/ +CVAPI(CvSeq*) cvEndWriteSeq( CvSeqWriter* writer ); + + +/* Updates sequence header. May be useful to get access to some of previously + written elements via cvGetSeqElem or sequence reader */ +CVAPI(void) cvFlushSeqWriter( CvSeqWriter* writer ); + + +/* Initializes sequence reader. + The sequence can be read in forward or backward direction */ +CVAPI(void) cvStartReadSeq( const CvSeq* seq, CvSeqReader* reader, + int reverse CV_DEFAULT(0) ); + + +/* Returns current sequence reader position (currently observed sequence element) */ +CVAPI(int) cvGetSeqReaderPos( CvSeqReader* reader ); + + +/* Changes sequence reader position. It may seek to an absolute or + to relative to the current position */ +CVAPI(void) cvSetSeqReaderPos( CvSeqReader* reader, int index, + int is_relative CV_DEFAULT(0)); + +/* Copies sequence content to a continuous piece of memory */ +CVAPI(void*) cvCvtSeqToArray( const CvSeq* seq, void* elements, + CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ) ); + +/* Creates sequence header for array. + After that all the operations on sequences that do not alter the content + can be applied to the resultant sequence */ +CVAPI(CvSeq*) cvMakeSeqHeaderForArray( int seq_type, int header_size, + int elem_size, void* elements, int total, + CvSeq* seq, CvSeqBlock* block ); + +/* Extracts sequence slice (with or without copying sequence elements) */ +CVAPI(CvSeq*) cvSeqSlice( const CvSeq* seq, CvSlice slice, + CvMemStorage* storage CV_DEFAULT(NULL), + int copy_data CV_DEFAULT(0)); + +CV_INLINE CvSeq* cvCloneSeq( const CvSeq* seq, CvMemStorage* storage CV_DEFAULT(NULL)) +{ + return cvSeqSlice( seq, CV_WHOLE_SEQ, storage, 1 ); +} + +/* Removes sequence slice */ +CVAPI(void) cvSeqRemoveSlice( CvSeq* seq, CvSlice slice ); + +/* Inserts a sequence or array into another sequence */ +CVAPI(void) cvSeqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr ); + +/* a < b ? -1 : a > b ? 1 : 0 */ +typedef int (CV_CDECL* CvCmpFunc)(const void* a, const void* b, void* userdata ); + +/* Sorts sequence in-place given element comparison function */ +CVAPI(void) cvSeqSort( CvSeq* seq, CvCmpFunc func, void* userdata CV_DEFAULT(NULL) ); + +/* Finds element in a [sorted] sequence */ +CVAPI(schar*) cvSeqSearch( CvSeq* seq, const void* elem, CvCmpFunc func, + int is_sorted, int* elem_idx, + void* userdata CV_DEFAULT(NULL) ); + +/* Reverses order of sequence elements in-place */ +CVAPI(void) cvSeqInvert( CvSeq* seq ); + +/* Splits sequence into one or more equivalence classes using the specified criteria */ +CVAPI(int) cvSeqPartition( const CvSeq* seq, CvMemStorage* storage, + CvSeq** labels, CvCmpFunc is_equal, void* userdata ); + +/************ Internal sequence functions ************/ +CVAPI(void) cvChangeSeqBlock( void* reader, int direction ); +CVAPI(void) cvCreateSeqBlock( CvSeqWriter* writer ); + + +/* Creates a new set */ +CVAPI(CvSet*) cvCreateSet( int set_flags, int header_size, + int elem_size, CvMemStorage* storage ); + +/* Adds new element to the set and returns pointer to it */ +CVAPI(int) cvSetAdd( CvSet* set_header, CvSetElem* elem CV_DEFAULT(NULL), + CvSetElem** inserted_elem CV_DEFAULT(NULL) ); + +/* Fast variant of cvSetAdd */ +CV_INLINE CvSetElem* cvSetNew( CvSet* set_header ) +{ + CvSetElem* elem = set_header->free_elems; + if( elem ) + { + set_header->free_elems = elem->next_free; + elem->flags = elem->flags & CV_SET_ELEM_IDX_MASK; + set_header->active_count++; + } + else + cvSetAdd( set_header, NULL, (CvSetElem**)&elem ); + return elem; +} + +/* Removes set element given its pointer */ +CV_INLINE void cvSetRemoveByPtr( CvSet* set_header, void* elem ) +{ + CvSetElem* _elem = (CvSetElem*)elem; + assert( _elem->flags >= 0 /*&& (elem->flags & CV_SET_ELEM_IDX_MASK) < set_header->total*/ ); + _elem->next_free = set_header->free_elems; + _elem->flags = (_elem->flags & CV_SET_ELEM_IDX_MASK) | CV_SET_ELEM_FREE_FLAG; + set_header->free_elems = _elem; + set_header->active_count--; +} + +/* Removes element from the set by its index */ +CVAPI(void) cvSetRemove( CvSet* set_header, int index ); + +/* Returns a set element by index. If the element doesn't belong to the set, + NULL is returned */ +CV_INLINE CvSetElem* cvGetSetElem( const CvSet* set_header, int index ) +{ + CvSetElem* elem = (CvSetElem*)cvGetSeqElem( (CvSeq*)set_header, index ); + return elem && CV_IS_SET_ELEM( elem ) ? elem : 0; +} + +/* Removes all the elements from the set */ +CVAPI(void) cvClearSet( CvSet* set_header ); + +/* Creates new graph */ +CVAPI(CvGraph*) cvCreateGraph( int graph_flags, int header_size, + int vtx_size, int edge_size, + CvMemStorage* storage ); + +/* Adds new vertex to the graph */ +CVAPI(int) cvGraphAddVtx( CvGraph* graph, const CvGraphVtx* vtx CV_DEFAULT(NULL), + CvGraphVtx** inserted_vtx CV_DEFAULT(NULL) ); + + +/* Removes vertex from the graph together with all incident edges */ +CVAPI(int) cvGraphRemoveVtx( CvGraph* graph, int index ); +CVAPI(int) cvGraphRemoveVtxByPtr( CvGraph* graph, CvGraphVtx* vtx ); + + +/* Link two vertices specifed by indices or pointers if they + are not connected or return pointer to already existing edge + connecting the vertices. + Functions return 1 if a new edge was created, 0 otherwise */ +CVAPI(int) cvGraphAddEdge( CvGraph* graph, + int start_idx, int end_idx, + const CvGraphEdge* edge CV_DEFAULT(NULL), + CvGraphEdge** inserted_edge CV_DEFAULT(NULL) ); + +CVAPI(int) cvGraphAddEdgeByPtr( CvGraph* graph, + CvGraphVtx* start_vtx, CvGraphVtx* end_vtx, + const CvGraphEdge* edge CV_DEFAULT(NULL), + CvGraphEdge** inserted_edge CV_DEFAULT(NULL) ); + +/* Remove edge connecting two vertices */ +CVAPI(void) cvGraphRemoveEdge( CvGraph* graph, int start_idx, int end_idx ); +CVAPI(void) cvGraphRemoveEdgeByPtr( CvGraph* graph, CvGraphVtx* start_vtx, + CvGraphVtx* end_vtx ); + +/* Find edge connecting two vertices */ +CVAPI(CvGraphEdge*) cvFindGraphEdge( const CvGraph* graph, int start_idx, int end_idx ); +CVAPI(CvGraphEdge*) cvFindGraphEdgeByPtr( const CvGraph* graph, + const CvGraphVtx* start_vtx, + const CvGraphVtx* end_vtx ); +#define cvGraphFindEdge cvFindGraphEdge +#define cvGraphFindEdgeByPtr cvFindGraphEdgeByPtr + +/* Remove all vertices and edges from the graph */ +CVAPI(void) cvClearGraph( CvGraph* graph ); + + +/* Count number of edges incident to the vertex */ +CVAPI(int) cvGraphVtxDegree( const CvGraph* graph, int vtx_idx ); +CVAPI(int) cvGraphVtxDegreeByPtr( const CvGraph* graph, const CvGraphVtx* vtx ); + + +/* Retrieves graph vertex by given index */ +#define cvGetGraphVtx( graph, idx ) (CvGraphVtx*)cvGetSetElem((CvSet*)(graph), (idx)) + +/* Retrieves index of a graph vertex given its pointer */ +#define cvGraphVtxIdx( graph, vtx ) ((vtx)->flags & CV_SET_ELEM_IDX_MASK) + +/* Retrieves index of a graph edge given its pointer */ +#define cvGraphEdgeIdx( graph, edge ) ((edge)->flags & CV_SET_ELEM_IDX_MASK) + +#define cvGraphGetVtxCount( graph ) ((graph)->active_count) +#define cvGraphGetEdgeCount( graph ) ((graph)->edges->active_count) + +#define CV_GRAPH_VERTEX 1 +#define CV_GRAPH_TREE_EDGE 2 +#define CV_GRAPH_BACK_EDGE 4 +#define CV_GRAPH_FORWARD_EDGE 8 +#define CV_GRAPH_CROSS_EDGE 16 +#define CV_GRAPH_ANY_EDGE 30 +#define CV_GRAPH_NEW_TREE 32 +#define CV_GRAPH_BACKTRACKING 64 +#define CV_GRAPH_OVER -1 + +#define CV_GRAPH_ALL_ITEMS -1 + +/* flags for graph vertices and edges */ +#define CV_GRAPH_ITEM_VISITED_FLAG (1 << 30) +#define CV_IS_GRAPH_VERTEX_VISITED(vtx) \ + (((CvGraphVtx*)(vtx))->flags & CV_GRAPH_ITEM_VISITED_FLAG) +#define CV_IS_GRAPH_EDGE_VISITED(edge) \ + (((CvGraphEdge*)(edge))->flags & CV_GRAPH_ITEM_VISITED_FLAG) +#define CV_GRAPH_SEARCH_TREE_NODE_FLAG (1 << 29) +#define CV_GRAPH_FORWARD_EDGE_FLAG (1 << 28) + +typedef struct CvGraphScanner +{ + CvGraphVtx* vtx; /* current graph vertex (or current edge origin) */ + CvGraphVtx* dst; /* current graph edge destination vertex */ + CvGraphEdge* edge; /* current edge */ + + CvGraph* graph; /* the graph */ + CvSeq* stack; /* the graph vertex stack */ + int index; /* the lower bound of certainly visited vertices */ + int mask; /* event mask */ +} +CvGraphScanner; + +/* Creates new graph scanner. */ +CVAPI(CvGraphScanner*) cvCreateGraphScanner( CvGraph* graph, + CvGraphVtx* vtx CV_DEFAULT(NULL), + int mask CV_DEFAULT(CV_GRAPH_ALL_ITEMS)); + +/* Releases graph scanner. */ +CVAPI(void) cvReleaseGraphScanner( CvGraphScanner** scanner ); + +/* Get next graph element */ +CVAPI(int) cvNextGraphItem( CvGraphScanner* scanner ); + +/* Creates a copy of graph */ +CVAPI(CvGraph*) cvCloneGraph( const CvGraph* graph, CvMemStorage* storage ); + +/****************************************************************************************\ +* Drawing * +\****************************************************************************************/ + +/****************************************************************************************\ +* Drawing functions work with images/matrices of arbitrary type. * +* For color images the channel order is BGR[A] * +* Antialiasing is supported only for 8-bit image now. * +* All the functions include parameter color that means rgb value (that may be * +* constructed with CV_RGB macro) for color images and brightness * +* for grayscale images. * +* If a drawn figure is partially or completely outside of the image, it is clipped.* +\****************************************************************************************/ + +#define CV_RGB( r, g, b ) cvScalar( (b), (g), (r), 0 ) +#define CV_FILLED -1 + +#define CV_AA 16 + +/* Draws 4-connected, 8-connected or antialiased line segment connecting two points */ +CVAPI(void) cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +/* Draws a rectangle given two opposite corners of the rectangle (pt1 & pt2), + if thickness<0 (e.g. thickness == CV_FILLED), the filled box is drawn */ +CVAPI(void) cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + int shift CV_DEFAULT(0)); + +/* Draws a rectangle specified by a CvRect structure */ +CVAPI(void) cvRectangleR( CvArr* img, CvRect r, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + int shift CV_DEFAULT(0)); + + +/* Draws a circle with specified center and radius. + Thickness works in the same way as with cvRectangle */ +CVAPI(void) cvCircle( CvArr* img, CvPoint center, int radius, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +/* Draws ellipse outline, filled ellipse, elliptic arc or filled elliptic sector, + depending on , and parameters. The resultant figure + is rotated by . All the angles are in degrees */ +CVAPI(void) cvEllipse( CvArr* img, CvPoint center, CvSize axes, + double angle, double start_angle, double end_angle, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +CV_INLINE void cvEllipseBox( CvArr* img, CvBox2D box, CvScalar color, + int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ) +{ + CvSize axes; + axes.width = cvRound(box.size.width*0.5); + axes.height = cvRound(box.size.height*0.5); + + cvEllipse( img, cvPointFrom32f( box.center ), axes, box.angle, + 0, 360, color, thickness, line_type, shift ); +} + +/* Fills convex or monotonous polygon. */ +CVAPI(void) cvFillConvexPoly( CvArr* img, const CvPoint* pts, int npts, CvScalar color, + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +/* Fills an area bounded by one or more arbitrary polygons */ +CVAPI(void) cvFillPoly( CvArr* img, CvPoint** pts, const int* npts, + int contours, CvScalar color, + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +/* Draws one or more polygonal curves */ +CVAPI(void) cvPolyLine( CvArr* img, CvPoint** pts, const int* npts, int contours, + int is_closed, CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +#define cvDrawRect cvRectangle +#define cvDrawLine cvLine +#define cvDrawCircle cvCircle +#define cvDrawEllipse cvEllipse +#define cvDrawPolyLine cvPolyLine + +/* Clips the line segment connecting *pt1 and *pt2 + by the rectangular window + (0<=xptr will point + to pt1 (or pt2, see left_to_right description) location in the image. + Returns the number of pixels on the line between the ending points. */ +CVAPI(int) cvInitLineIterator( const CvArr* image, CvPoint pt1, CvPoint pt2, + CvLineIterator* line_iterator, + int connectivity CV_DEFAULT(8), + int left_to_right CV_DEFAULT(0)); + +/* Moves iterator to the next line point */ +#define CV_NEXT_LINE_POINT( line_iterator ) \ +{ \ + int _line_iterator_mask = (line_iterator).err < 0 ? -1 : 0; \ + (line_iterator).err += (line_iterator).minus_delta + \ + ((line_iterator).plus_delta & _line_iterator_mask); \ + (line_iterator).ptr += (line_iterator).minus_step + \ + ((line_iterator).plus_step & _line_iterator_mask); \ +} + + +/* basic font types */ +#define CV_FONT_HERSHEY_SIMPLEX 0 +#define CV_FONT_HERSHEY_PLAIN 1 +#define CV_FONT_HERSHEY_DUPLEX 2 +#define CV_FONT_HERSHEY_COMPLEX 3 +#define CV_FONT_HERSHEY_TRIPLEX 4 +#define CV_FONT_HERSHEY_COMPLEX_SMALL 5 +#define CV_FONT_HERSHEY_SCRIPT_SIMPLEX 6 +#define CV_FONT_HERSHEY_SCRIPT_COMPLEX 7 + +/* font flags */ +#define CV_FONT_ITALIC 16 + +#define CV_FONT_VECTOR0 CV_FONT_HERSHEY_SIMPLEX + + +/* Font structure */ +typedef struct CvFont +{ + const char* nameFont; //Qt:nameFont + CvScalar color; //Qt:ColorFont -> cvScalar(blue_component, green_component, red\_component[, alpha_component]) + int font_face; //Qt: bool italic /* =CV_FONT_* */ + const int* ascii; /* font data and metrics */ + const int* greek; + const int* cyrillic; + float hscale, vscale; + float shear; /* slope coefficient: 0 - normal, >0 - italic */ + int thickness; //Qt: weight /* letters thickness */ + float dx; /* horizontal interval between letters */ + int line_type; //Qt: PointSize +} +CvFont; + +/* Initializes font structure used further in cvPutText */ +CVAPI(void) cvInitFont( CvFont* font, int font_face, + double hscale, double vscale, + double shear CV_DEFAULT(0), + int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8)); + +CV_INLINE CvFont cvFont( double scale, int thickness CV_DEFAULT(1) ) +{ + CvFont font; + cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, scale, scale, 0, thickness, CV_AA ); + return font; +} + +/* Renders text stroke with specified font and color at specified location. + CvFont should be initialized with cvInitFont */ +CVAPI(void) cvPutText( CvArr* img, const char* text, CvPoint org, + const CvFont* font, CvScalar color ); + +/* Calculates bounding box of text stroke (useful for alignment) */ +CVAPI(void) cvGetTextSize( const char* text_string, const CvFont* font, + CvSize* text_size, int* baseline ); + + + +/* Unpacks color value, if arrtype is CV_8UC?, is treated as + packed color value, otherwise the first channels (depending on arrtype) + of destination scalar are set to the same value = */ +CVAPI(CvScalar) cvColorToScalar( double packed_color, int arrtype ); + +/* Returns the polygon points which make up the given ellipse. The ellipse is define by + the box of size 'axes' rotated 'angle' around the 'center'. A partial sweep + of the ellipse arc can be done by spcifying arc_start and arc_end to be something + other than 0 and 360, respectively. The input array 'pts' must be large enough to + hold the result. The total number of points stored into 'pts' is returned by this + function. */ +CVAPI(int) cvEllipse2Poly( CvPoint center, CvSize axes, + int angle, int arc_start, int arc_end, CvPoint * pts, int delta ); + +/* Draws contour outlines or filled interiors on the image */ +CVAPI(void) cvDrawContours( CvArr *img, CvSeq* contour, + CvScalar external_color, CvScalar hole_color, + int max_level, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + CvPoint offset CV_DEFAULT(cvPoint(0,0))); + +/* Does look-up transformation. Elements of the source array + (that should be 8uC1 or 8sC1) are used as indexes in lutarr 256-element table */ +CVAPI(void) cvLUT( const CvArr* src, CvArr* dst, const CvArr* lut ); + + +/******************* Iteration through the sequence tree *****************/ +typedef struct CvTreeNodeIterator +{ + const void* node; + int level; + int max_level; +} +CvTreeNodeIterator; + +CVAPI(void) cvInitTreeNodeIterator( CvTreeNodeIterator* tree_iterator, + const void* first, int max_level ); +CVAPI(void*) cvNextTreeNode( CvTreeNodeIterator* tree_iterator ); +CVAPI(void*) cvPrevTreeNode( CvTreeNodeIterator* tree_iterator ); + +/* Inserts sequence into tree with specified "parent" sequence. + If parent is equal to frame (e.g. the most external contour), + then added contour will have null pointer to parent. */ +CVAPI(void) cvInsertNodeIntoTree( void* node, void* parent, void* frame ); + +/* Removes contour from tree (together with the contour children). */ +CVAPI(void) cvRemoveNodeFromTree( void* node, void* frame ); + +/* Gathers pointers to all the sequences, + accessible from the , to the single sequence */ +CVAPI(CvSeq*) cvTreeToNodeSeq( const void* first, int header_size, + CvMemStorage* storage ); + +/* The function implements the K-means algorithm for clustering an array of sample + vectors in a specified number of classes */ +#define CV_KMEANS_USE_INITIAL_LABELS 1 +CVAPI(int) cvKMeans2( const CvArr* samples, int cluster_count, CvArr* labels, + CvTermCriteria termcrit, int attempts CV_DEFAULT(1), + CvRNG* rng CV_DEFAULT(0), int flags CV_DEFAULT(0), + CvArr* _centers CV_DEFAULT(0), double* compactness CV_DEFAULT(0) ); + +/****************************************************************************************\ +* System functions * +\****************************************************************************************/ + +/* Add the function pointers table with associated information to the IPP primitives list */ +CVAPI(int) cvRegisterModule( const CvModuleInfo* module_info ); + +/* Loads optimized functions from IPP, MKL etc. or switches back to pure C code */ +CVAPI(int) cvUseOptimized( int on_off ); + +/* Retrieves information about the registered modules and loaded optimized plugins */ +CVAPI(void) cvGetModuleInfo( const char* module_name, + const char** version, + const char** loaded_addon_plugins ); + +typedef void* (CV_CDECL *CvAllocFunc)(size_t size, void* userdata); +typedef int (CV_CDECL *CvFreeFunc)(void* pptr, void* userdata); + +/* Set user-defined memory managment functions (substitutors for malloc and free) that + will be called by cvAlloc, cvFree and higher-level functions (e.g. cvCreateImage) */ +CVAPI(void) cvSetMemoryManager( CvAllocFunc alloc_func CV_DEFAULT(NULL), + CvFreeFunc free_func CV_DEFAULT(NULL), + void* userdata CV_DEFAULT(NULL)); + + +typedef IplImage* (CV_STDCALL* Cv_iplCreateImageHeader) + (int,int,int,char*,char*,int,int,int,int,int, + IplROI*,IplImage*,void*,IplTileInfo*); +typedef void (CV_STDCALL* Cv_iplAllocateImageData)(IplImage*,int,int); +typedef void (CV_STDCALL* Cv_iplDeallocate)(IplImage*,int); +typedef IplROI* (CV_STDCALL* Cv_iplCreateROI)(int,int,int,int,int); +typedef IplImage* (CV_STDCALL* Cv_iplCloneImage)(const IplImage*); + +/* Makes OpenCV use IPL functions for IplImage allocation/deallocation */ +CVAPI(void) cvSetIPLAllocators( Cv_iplCreateImageHeader create_header, + Cv_iplAllocateImageData allocate_data, + Cv_iplDeallocate deallocate, + Cv_iplCreateROI create_roi, + Cv_iplCloneImage clone_image ); + +#define CV_TURN_ON_IPL_COMPATIBILITY() \ + cvSetIPLAllocators( iplCreateImageHeader, iplAllocateImage, \ + iplDeallocate, iplCreateROI, iplCloneImage ) + +/****************************************************************************************\ +* Data Persistence * +\****************************************************************************************/ + +/********************************** High-level functions ********************************/ + +/* opens existing or creates new file storage */ +CVAPI(CvFileStorage*) cvOpenFileStorage( const char* filename, CvMemStorage* memstorage, + int flags, const char* encoding CV_DEFAULT(NULL) ); + +/* closes file storage and deallocates buffers */ +CVAPI(void) cvReleaseFileStorage( CvFileStorage** fs ); + +/* returns attribute value or 0 (NULL) if there is no such attribute */ +CVAPI(const char*) cvAttrValue( const CvAttrList* attr, const char* attr_name ); + +/* starts writing compound structure (map or sequence) */ +CVAPI(void) cvStartWriteStruct( CvFileStorage* fs, const char* name, + int struct_flags, const char* type_name CV_DEFAULT(NULL), + CvAttrList attributes CV_DEFAULT(cvAttrList())); + +/* finishes writing compound structure */ +CVAPI(void) cvEndWriteStruct( CvFileStorage* fs ); + +/* writes an integer */ +CVAPI(void) cvWriteInt( CvFileStorage* fs, const char* name, int value ); + +/* writes a floating-point number */ +CVAPI(void) cvWriteReal( CvFileStorage* fs, const char* name, double value ); + +/* writes a string */ +CVAPI(void) cvWriteString( CvFileStorage* fs, const char* name, + const char* str, int quote CV_DEFAULT(0) ); + +/* writes a comment */ +CVAPI(void) cvWriteComment( CvFileStorage* fs, const char* comment, + int eol_comment ); + +/* writes instance of a standard type (matrix, image, sequence, graph etc.) + or user-defined type */ +CVAPI(void) cvWrite( CvFileStorage* fs, const char* name, const void* ptr, + CvAttrList attributes CV_DEFAULT(cvAttrList())); + +/* starts the next stream */ +CVAPI(void) cvStartNextStream( CvFileStorage* fs ); + +/* helper function: writes multiple integer or floating-point numbers */ +CVAPI(void) cvWriteRawData( CvFileStorage* fs, const void* src, + int len, const char* dt ); + +/* returns the hash entry corresponding to the specified literal key string or 0 + if there is no such a key in the storage */ +CVAPI(CvStringHashNode*) cvGetHashedKey( CvFileStorage* fs, const char* name, + int len CV_DEFAULT(-1), + int create_missing CV_DEFAULT(0)); + +/* returns file node with the specified key within the specified map + (collection of named nodes) */ +CVAPI(CvFileNode*) cvGetRootFileNode( const CvFileStorage* fs, + int stream_index CV_DEFAULT(0) ); + +/* returns file node with the specified key within the specified map + (collection of named nodes) */ +CVAPI(CvFileNode*) cvGetFileNode( CvFileStorage* fs, CvFileNode* map, + const CvStringHashNode* key, + int create_missing CV_DEFAULT(0) ); + +/* this is a slower version of cvGetFileNode that takes the key as a literal string */ +CVAPI(CvFileNode*) cvGetFileNodeByName( const CvFileStorage* fs, + const CvFileNode* map, + const char* name ); + +CV_INLINE int cvReadInt( const CvFileNode* node, int default_value CV_DEFAULT(0) ) +{ + return !node ? default_value : + CV_NODE_IS_INT(node->tag) ? node->data.i : + CV_NODE_IS_REAL(node->tag) ? cvRound(node->data.f) : 0x7fffffff; +} + + +CV_INLINE int cvReadIntByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, int default_value CV_DEFAULT(0) ) +{ + return cvReadInt( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +CV_INLINE double cvReadReal( const CvFileNode* node, double default_value CV_DEFAULT(0.) ) +{ + return !node ? default_value : + CV_NODE_IS_INT(node->tag) ? (double)node->data.i : + CV_NODE_IS_REAL(node->tag) ? node->data.f : 1e300; +} + + +CV_INLINE double cvReadRealByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, double default_value CV_DEFAULT(0.) ) +{ + return cvReadReal( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +CV_INLINE const char* cvReadString( const CvFileNode* node, + const char* default_value CV_DEFAULT(NULL) ) +{ + return !node ? default_value : CV_NODE_IS_STRING(node->tag) ? node->data.str.ptr : 0; +} + + +CV_INLINE const char* cvReadStringByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, const char* default_value CV_DEFAULT(NULL) ) +{ + return cvReadString( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +/* decodes standard or user-defined object and returns it */ +CVAPI(void*) cvRead( CvFileStorage* fs, CvFileNode* node, + CvAttrList* attributes CV_DEFAULT(NULL)); + +/* decodes standard or user-defined object and returns it */ +CV_INLINE void* cvReadByName( CvFileStorage* fs, const CvFileNode* map, + const char* name, CvAttrList* attributes CV_DEFAULT(NULL) ) +{ + return cvRead( fs, cvGetFileNodeByName( fs, map, name ), attributes ); +} + + +/* starts reading data from sequence or scalar numeric node */ +CVAPI(void) cvStartReadRawData( const CvFileStorage* fs, const CvFileNode* src, + CvSeqReader* reader ); + +/* reads multiple numbers and stores them to array */ +CVAPI(void) cvReadRawDataSlice( const CvFileStorage* fs, CvSeqReader* reader, + int count, void* dst, const char* dt ); + +/* combination of two previous functions for easier reading of whole sequences */ +CVAPI(void) cvReadRawData( const CvFileStorage* fs, const CvFileNode* src, + void* dst, const char* dt ); + +/* writes a copy of file node to file storage */ +CVAPI(void) cvWriteFileNode( CvFileStorage* fs, const char* new_node_name, + const CvFileNode* node, int embed ); + +/* returns name of file node */ +CVAPI(const char*) cvGetFileNodeName( const CvFileNode* node ); + +/*********************************** Adding own types ***********************************/ + +CVAPI(void) cvRegisterType( const CvTypeInfo* info ); +CVAPI(void) cvUnregisterType( const char* type_name ); +CVAPI(CvTypeInfo*) cvFirstType(void); +CVAPI(CvTypeInfo*) cvFindType( const char* type_name ); +CVAPI(CvTypeInfo*) cvTypeOf( const void* struct_ptr ); + +/* universal functions */ +CVAPI(void) cvRelease( void** struct_ptr ); +CVAPI(void*) cvClone( const void* struct_ptr ); + +/* simple API for reading/writing data */ +CVAPI(void) cvSave( const char* filename, const void* struct_ptr, + const char* name CV_DEFAULT(NULL), + const char* comment CV_DEFAULT(NULL), + CvAttrList attributes CV_DEFAULT(cvAttrList())); +CVAPI(void*) cvLoad( const char* filename, + CvMemStorage* memstorage CV_DEFAULT(NULL), + const char* name CV_DEFAULT(NULL), + const char** real_name CV_DEFAULT(NULL) ); + +/*********************************** Measuring Execution Time ***************************/ + +/* helper functions for RNG initialization and accurate time measurement: + uses internal clock counter on x86 */ +CVAPI(int64) cvGetTickCount( void ); +CVAPI(double) cvGetTickFrequency( void ); + +/*********************************** CPU capabilities ***********************************/ + +#define CV_CPU_NONE 0 +#define CV_CPU_MMX 1 +#define CV_CPU_SSE 2 +#define CV_CPU_SSE2 3 +#define CV_CPU_SSE3 4 +#define CV_CPU_SSSE3 5 +#define CV_CPU_SSE4_1 6 +#define CV_CPU_SSE4_2 7 +#define CV_CPU_POPCNT 8 +#define CV_CPU_AVX 10 +#define CV_HARDWARE_MAX_FEATURE 255 + +CVAPI(int) cvCheckHardwareSupport(int feature); + +/*********************************** Multi-Threading ************************************/ + +/* retrieve/set the number of threads used in OpenMP implementations */ +CVAPI(int) cvGetNumThreads( void ); +CVAPI(void) cvSetNumThreads( int threads CV_DEFAULT(0) ); +/* get index of the thread being executed */ +CVAPI(int) cvGetThreadNum( void ); + + +/********************************** Error Handling **************************************/ + +/* Get current OpenCV error status */ +CVAPI(int) cvGetErrStatus( void ); + +/* Sets error status silently */ +CVAPI(void) cvSetErrStatus( int status ); + +#define CV_ErrModeLeaf 0 /* Print error and exit program */ +#define CV_ErrModeParent 1 /* Print error and continue */ +#define CV_ErrModeSilent 2 /* Don't print and continue */ + +/* Retrives current error processing mode */ +CVAPI(int) cvGetErrMode( void ); + +/* Sets error processing mode, returns previously used mode */ +CVAPI(int) cvSetErrMode( int mode ); + +/* Sets error status and performs some additonal actions (displaying message box, + writing message to stderr, terminating application etc.) + depending on the current error mode */ +CVAPI(void) cvError( int status, const char* func_name, + const char* err_msg, const char* file_name, int line ); + +/* Retrieves textual description of the error given its code */ +CVAPI(const char*) cvErrorStr( int status ); + +/* Retrieves detailed information about the last error occured */ +CVAPI(int) cvGetErrInfo( const char** errcode_desc, const char** description, + const char** filename, int* line ); + +/* Maps IPP error codes to the counterparts from OpenCV */ +CVAPI(int) cvErrorFromIppStatus( int ipp_status ); + +typedef int (CV_CDECL *CvErrorCallback)( int status, const char* func_name, + const char* err_msg, const char* file_name, int line, void* userdata ); + +/* Assigns a new error-handling function */ +CVAPI(CvErrorCallback) cvRedirectError( CvErrorCallback error_handler, + void* userdata CV_DEFAULT(NULL), + void** prev_userdata CV_DEFAULT(NULL) ); + +/* + Output to: + cvNulDevReport - nothing + cvStdErrReport - console(fprintf(stderr,...)) + cvGuiBoxReport - MessageBox(WIN32) + */ +CVAPI(int) cvNulDevReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +CVAPI(int) cvStdErrReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +CVAPI(int) cvGuiBoxReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +#define OPENCV_ERROR(status,func,context) \ +cvError((status),(func),(context),__FILE__,__LINE__) + +#define OPENCV_ERRCHK(func,context) \ +{if (cvGetErrStatus() >= 0) \ +{OPENCV_ERROR(CV_StsBackTrace,(func),(context));}} + +#define OPENCV_ASSERT(expr,func,context) \ +{if (! (expr)) \ +{OPENCV_ERROR(CV_StsInternal,(func),(context));}} + +#define OPENCV_RSTERR() (cvSetErrStatus(CV_StsOk)) + +#define OPENCV_CALL( Func ) \ +{ \ +Func; \ +} + + +/* CV_FUNCNAME macro defines icvFuncName constant which is used by CV_ERROR macro */ +#ifdef CV_NO_FUNC_NAMES +#define CV_FUNCNAME( Name ) +#define cvFuncName "" +#else +#define CV_FUNCNAME( Name ) \ +static char cvFuncName[] = Name +#endif + + +/* + CV_ERROR macro unconditionally raises error with passed code and message. + After raising error, control will be transferred to the exit label. + */ +#define CV_ERROR( Code, Msg ) \ +{ \ + cvError( (Code), cvFuncName, Msg, __FILE__, __LINE__ ); \ + __CV_EXIT__; \ +} + +/* Simplified form of CV_ERROR */ +#define CV_ERROR_FROM_CODE( code ) \ + CV_ERROR( code, "" ) + +/* + CV_CHECK macro checks error status after CV (or IPL) + function call. If error detected, control will be transferred to the exit + label. + */ +#define CV_CHECK() \ +{ \ + if( cvGetErrStatus() < 0 ) \ + CV_ERROR( CV_StsBackTrace, "Inner function failed." ); \ +} + + +/* + CV_CALL macro calls CV (or IPL) function, checks error status and + signals a error if the function failed. Useful in "parent node" + error procesing mode + */ +#define CV_CALL( Func ) \ +{ \ + Func; \ + CV_CHECK(); \ +} + + +/* Runtime assertion macro */ +#define CV_ASSERT( Condition ) \ +{ \ + if( !(Condition) ) \ + CV_ERROR( CV_StsInternal, "Assertion: " #Condition " failed" ); \ +} + +#define __CV_BEGIN__ { +#define __CV_END__ goto exit; exit: ; } +#define __CV_EXIT__ goto exit + +#ifdef __cplusplus +} + +// classes for automatic module/RTTI data registration/unregistration +struct CV_EXPORTS CvModule +{ + CvModule( CvModuleInfo* _info ); + ~CvModule(); + CvModuleInfo* info; + + static CvModuleInfo* first; + static CvModuleInfo* last; +}; + +struct CV_EXPORTS CvType +{ + CvType( const char* type_name, + CvIsInstanceFunc is_instance, CvReleaseFunc release=0, + CvReadFunc read=0, CvWriteFunc write=0, CvCloneFunc clone=0 ); + ~CvType(); + CvTypeInfo* info; + + static CvTypeInfo* first; + static CvTypeInfo* last; +}; + +#endif + +#endif diff --git a/include/opencv2/core/eigen.hpp b/include/opencv2/core/eigen.hpp new file mode 100644 index 0000000..505652f --- /dev/null +++ b/include/opencv2/core/eigen.hpp @@ -0,0 +1,186 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_EIGEN_HPP__ +#define __OPENCV_CORE_EIGEN_HPP__ + +#ifdef __cplusplus + +#include "cxcore.h" + +namespace cv +{ + +template +void eigen2cv( const Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& src, Mat& dst ) +{ + if( !(src.Flags & Eigen::RowMajorBit) ) + { + Mat _src(src.cols(), src.rows(), DataType<_Tp>::type, + (void*)src.data(), src.stride()*sizeof(_Tp)); + transpose(_src, dst); + } + else + { + Mat _src(src.rows(), src.cols(), DataType<_Tp>::type, + (void*)src.data(), src.stride()*sizeof(_Tp)); + _src.copyTo(dst); + } +} + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst ) +{ + CV_DbgAssert(src.rows == _rows && src.cols == _cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else if( src.cols == src.rows ) + { + src.convertTo(_dst, _dst.type()); + transpose(_dst, _dst); + } + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst ) +{ + dst.resize(src.rows, src.cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else if( src.cols == src.rows ) + { + src.convertTo(_dst, _dst.type()); + transpose(_dst, _dst); + } + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst ) +{ + CV_Assert(src.cols == 1); + dst.resize(src.rows); + + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst ) +{ + CV_Assert(src.rows == 1); + dst.resize(src.cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +} + +#endif + +#endif + diff --git a/include/opencv2/core/internal.hpp b/include/opencv2/core/internal.hpp new file mode 100644 index 0000000..c147762 --- /dev/null +++ b/include/opencv2/core/internal.hpp @@ -0,0 +1,707 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* The header is for internal use and it is likely to change. + It contains some macro definitions that are used in cxcore, cv, cvaux + and, probably, other libraries. If you need some of this functionality, + the safe way is to copy it into your code and rename the macros. +*/ +#ifndef __OPENCV_CORE_INTERNAL_HPP__ +#define __OPENCV_CORE_INTERNAL_HPP__ + +#include + +#if defined WIN32 || defined _WIN32 +# ifndef WIN32 +# define WIN32 +# endif +# ifndef _WIN32 +# define _WIN32 +# endif +#endif + +#if defined WIN32 || defined WINCE +#ifndef _WIN32_WINNT // This is needed for the declaration of TryEnterCriticalSection in winbase.h with Visual Studio 2005 (and older?) +#define _WIN32_WINNT 0x0400 // http://msdn.microsoft.com/en-us/library/ms686857(VS.85).aspx +#endif +#include +#undef small +#undef min +#undef max +#else +#include +#include +#endif + +#ifdef __BORLANDC__ +#ifndef WIN32 + #define WIN32 +#endif +#ifndef _WIN32 + #define _WIN32 +#endif + #define CV_DLL + #undef _CV_ALWAYS_PROFILE_ + #define _CV_ALWAYS_NO_PROFILE_ +#endif + +#ifndef FALSE +#define FALSE 0 +#endif +#ifndef TRUE +#define TRUE 1 +#endif + +#define __BEGIN__ __CV_BEGIN__ +#define __END__ __CV_END__ +#define EXIT __CV_EXIT__ + +#ifdef HAVE_IPP +#include "ipp.h" + +CV_INLINE IppiSize ippiSize(int width, int height) +{ + IppiSize size = { width, height }; + return size; +} +#endif + +#if defined __SSE2a__ || _MSC_VERa >= 1300 +#include "emmintrin.h" +#define CV_SSE 1 +#define CV_SSE2 1 +#if defined __SSE3__ || _MSC_VER >= 1500 +#include "pmmintrin.h" +#define CV_SSE3 1 +#endif +#else +#define CV_SSE 0 +#define CV_SSE2 0 +#define CV_SSE3 0 +#endif + +#if defined ANDROID && defined __ARM_NEON__ +#include "arm_neon.h" +#define CV_NEON 1 + +#define CPU_HAS_NEON_FEATURE (true) +//TODO: make real check using stuff from "cpu-features.h" +//((bool)android_getCpuFeatures() & ANDROID_CPU_ARM_FEATURE_NEON) +#else +#define CV_NEON 0 +#define CPU_HAS_NEON_FEATURE (false) +#endif + +#ifndef IPPI_CALL +#define IPPI_CALL(func) CV_Assert((func) >= 0) +#endif + +#ifdef HAVE_TBB + #include "tbb/tbb_stddef.h" + #if TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202 + #include "tbb/tbb.h" + #include "tbb/task.h" + #undef min + #undef max + #else + #undef HAVE_TBB + #endif +#endif + +#ifdef HAVE_EIGEN + #include + #include "opencv2/core/eigen.hpp" +#endif + +#ifdef __cplusplus + +#ifdef HAVE_TBB + namespace cv + { + typedef tbb::blocked_range BlockedRange; + + template static inline + void parallel_for( const BlockedRange& range, const Body& body ) + { + tbb::parallel_for(range, body); + } + + template static inline + void parallel_do( Iterator first, Iterator last, const Body& body ) + { + tbb::parallel_do(first, last, body); + } + + typedef tbb::split Split; + + template static inline + void parallel_reduce( const BlockedRange& range, Body& body ) + { + tbb::parallel_reduce(range, body); + } + + typedef tbb::concurrent_vector ConcurrentRectVector; + } +#else + namespace cv + { + class BlockedRange + { + public: + BlockedRange() : _begin(0), _end(0), _grainsize(0) {} + BlockedRange(int b, int e, int g=1) : _begin(b), _end(e), _grainsize(g) {} + int begin() const { return _begin; } + int end() const { return _end; } + int grainsize() const { return _grainsize; } + + protected: + int _begin, _end, _grainsize; + }; + + +#ifdef HAVE_THREADING_FRAMEWORK +#include "threading_framework.hpp" + + template + static void parallel_for( const BlockedRange& range, const Body& body ) + { + tf::parallel_for(range, body); + } + typedef tf::ConcurrentVector ConcurrentRectVector; +#else + template static inline + void parallel_for( const BlockedRange& range, const Body& body ) + { + body(range); + } + typedef std::vector ConcurrentRectVector; +#endif + + template static inline + void parallel_do( Iterator first, Iterator last, const Body& body ) + { + for( ; first != last; ++first ) + body(*first); + } + + class Split {}; + + template static inline + void parallel_reduce( const BlockedRange& range, Body& body ) + { + body(range); + } + + } +#endif +#endif + +/* maximal size of vector to run matrix operations on it inline (i.e. w/o ipp calls) */ +#define CV_MAX_INLINE_MAT_OP_SIZE 10 + +/* maximal linear size of matrix to allocate it on stack. */ +#define CV_MAX_LOCAL_MAT_SIZE 32 + +/* maximal size of local memory storage */ +#define CV_MAX_LOCAL_SIZE \ + (CV_MAX_LOCAL_MAT_SIZE*CV_MAX_LOCAL_MAT_SIZE*(int)sizeof(double)) + +/* default image row align (in bytes) */ +#define CV_DEFAULT_IMAGE_ROW_ALIGN 4 + +/* matrices are continuous by default */ +#define CV_DEFAULT_MAT_ROW_ALIGN 1 + +/* maximum size of dynamic memory buffer. + cvAlloc reports an error if a larger block is requested. */ +#define CV_MAX_ALLOC_SIZE (((size_t)1 << (sizeof(size_t)*8-2))) + +/* the alignment of all the allocated buffers */ +#define CV_MALLOC_ALIGN 16 + +/* default alignment for dynamic data strucutures, resided in storages. */ +#define CV_STRUCT_ALIGN ((int)sizeof(double)) + +/* default storage block size */ +#define CV_STORAGE_BLOCK_SIZE ((1<<16) - 128) + +/* default memory block for sparse array elements */ +#define CV_SPARSE_MAT_BLOCK (1<<12) + +/* initial hash table size */ +#define CV_SPARSE_HASH_SIZE0 (1<<10) + +/* maximal average node_count/hash_size ratio beyond which hash table is resized */ +#define CV_SPARSE_HASH_RATIO 3 + +/* max length of strings */ +#define CV_MAX_STRLEN 1024 + +#if 0 /*def CV_CHECK_FOR_NANS*/ + #define CV_CHECK_NANS( arr ) cvCheckArray((arr)) +#else + #define CV_CHECK_NANS( arr ) +#endif + +/****************************************************************************************\ +* Common declarations * +\****************************************************************************************/ + +/* get alloca declaration */ +#ifdef __GNUC__ + #undef alloca + #define alloca __builtin_alloca + #define CV_HAVE_ALLOCA 1 +#elif defined WIN32 || defined _WIN32 || \ + defined WINCE || defined _MSC_VER || defined __BORLANDC__ + #include + #define CV_HAVE_ALLOCA 1 +#elif defined HAVE_ALLOCA_H + #include + #define CV_HAVE_ALLOCA 1 +#elif defined HAVE_ALLOCA + #include + #define CV_HAVE_ALLOCA 1 +#else + #undef CV_HAVE_ALLOCA +#endif + +#ifdef __GNUC__ +#define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x))) +#elif defined _MSC_VER +#define CV_DECL_ALIGNED(x) __declspec(align(x)) +#else +#define CV_DECL_ALIGNED(x) +#endif + +#if CV_HAVE_ALLOCA +/* ! DO NOT make it an inline function */ +#define cvStackAlloc(size) cvAlignPtr( alloca((size) + CV_MALLOC_ALIGN), CV_MALLOC_ALIGN ) +#endif + +#ifndef CV_IMPL +#define CV_IMPL CV_EXTERN_C +#endif + +#define CV_DBG_BREAK() { volatile int* crashMe = 0; *crashMe = 0; } + +/* default step, set in case of continuous data + to work around checks for valid step in some ipp functions */ +#define CV_STUB_STEP (1 << 30) + +#define CV_SIZEOF_FLOAT ((int)sizeof(float)) +#define CV_SIZEOF_SHORT ((int)sizeof(short)) + +#define CV_ORIGIN_TL 0 +#define CV_ORIGIN_BL 1 + +/* IEEE754 constants and macros */ +#define CV_POS_INF 0x7f800000 +#define CV_NEG_INF 0x807fffff /* CV_TOGGLE_FLT(0xff800000) */ +#define CV_1F 0x3f800000 +#define CV_TOGGLE_FLT(x) ((x)^((int)(x) < 0 ? 0x7fffffff : 0)) +#define CV_TOGGLE_DBL(x) \ + ((x)^((int64)(x) < 0 ? CV_BIG_INT(0x7fffffffffffffff) : 0)) + +#define CV_NOP(a) (a) +#define CV_ADD(a, b) ((a) + (b)) +#define CV_SUB(a, b) ((a) - (b)) +#define CV_MUL(a, b) ((a) * (b)) +#define CV_AND(a, b) ((a) & (b)) +#define CV_OR(a, b) ((a) | (b)) +#define CV_XOR(a, b) ((a) ^ (b)) +#define CV_ANDN(a, b) (~(a) & (b)) +#define CV_ORN(a, b) (~(a) | (b)) +#define CV_SQR(a) ((a) * (a)) + +#define CV_LT(a, b) ((a) < (b)) +#define CV_LE(a, b) ((a) <= (b)) +#define CV_EQ(a, b) ((a) == (b)) +#define CV_NE(a, b) ((a) != (b)) +#define CV_GT(a, b) ((a) > (b)) +#define CV_GE(a, b) ((a) >= (b)) + +#define CV_NONZERO(a) ((a) != 0) +#define CV_NONZERO_FLT(a) (((a)+(a)) != 0) + +/* general-purpose saturation macros */ +#define CV_CAST_8U(t) (uchar)(!((t) & ~255) ? (t) : (t) > 0 ? 255 : 0) +#define CV_CAST_8S(t) (schar)(!(((t)+128) & ~255) ? (t) : (t) > 0 ? 127 : -128) +#define CV_CAST_16U(t) (ushort)(!((t) & ~65535) ? (t) : (t) > 0 ? 65535 : 0) +#define CV_CAST_16S(t) (short)(!(((t)+32768) & ~65535) ? (t) : (t) > 0 ? 32767 : -32768) +#define CV_CAST_32S(t) (int)(t) +#define CV_CAST_64S(t) (int64)(t) +#define CV_CAST_32F(t) (float)(t) +#define CV_CAST_64F(t) (double)(t) + +#define CV_PASTE2(a,b) a##b +#define CV_PASTE(a,b) CV_PASTE2(a,b) + +#define CV_EMPTY +#define CV_MAKE_STR(a) #a + +#define CV_ZERO_OBJ(x) memset((x), 0, sizeof(*(x))) + +#define CV_DIM(static_array) ((int)(sizeof(static_array)/sizeof((static_array)[0]))) + +#define cvUnsupportedFormat "Unsupported format" + +CV_INLINE void* cvAlignPtr( const void* ptr, int align CV_DEFAULT(32) ) +{ + assert( (align & (align-1)) == 0 ); + return (void*)( ((size_t)ptr + align - 1) & ~(size_t)(align-1) ); +} + +CV_INLINE int cvAlign( int size, int align ) +{ + assert( (align & (align-1)) == 0 && size < INT_MAX ); + return (size + align - 1) & -align; +} + +CV_INLINE CvSize cvGetMatSize( const CvMat* mat ) +{ + CvSize size; + size.width = mat->cols; + size.height = mat->rows; + return size; +} + +#define CV_DESCALE(x,n) (((x) + (1 << ((n)-1))) >> (n)) +#define CV_FLT_TO_FIX(x,n) cvRound((x)*(1<<(n))) + +/****************************************************************************************\ + + Generic implementation of QuickSort algorithm. + ---------------------------------------------- + Using this macro user can declare customized sort function that can be much faster + than built-in qsort function because of lower overhead on elements + comparison and exchange. The macro takes less_than (or LT) argument - a macro or function + that takes 2 arguments returns non-zero if the first argument should be before the second + one in the sorted sequence and zero otherwise. + + Example: + + Suppose that the task is to sort points by ascending of y coordinates and if + y's are equal x's should ascend. + + The code is: + ------------------------------------------------------------------------------ + #define cmp_pts( pt1, pt2 ) \ + ((pt1).y < (pt2).y || ((pt1).y < (pt2).y && (pt1).x < (pt2).x)) + + [static] CV_IMPLEMENT_QSORT( icvSortPoints, CvPoint, cmp_pts ) + ------------------------------------------------------------------------------ + + After that the function "void icvSortPoints( CvPoint* array, size_t total, int aux );" + is available to user. + + aux is an additional parameter, which can be used when comparing elements. + The current implementation was derived from *BSD system qsort(): + + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + +\****************************************************************************************/ + +#define CV_IMPLEMENT_QSORT_EX( func_name, T, LT, user_data_type ) \ +void func_name( T *array, size_t total, user_data_type aux ) \ +{ \ + int isort_thresh = 7; \ + T t; \ + int sp = 0; \ + \ + struct \ + { \ + T *lb; \ + T *ub; \ + } \ + stack[48]; \ + \ + aux = aux; \ + \ + if( total <= 1 ) \ + return; \ + \ + stack[0].lb = array; \ + stack[0].ub = array + (total - 1); \ + \ + while( sp >= 0 ) \ + { \ + T* left = stack[sp].lb; \ + T* right = stack[sp--].ub; \ + \ + for(;;) \ + { \ + int i, n = (int)(right - left) + 1, m; \ + T* ptr; \ + T* ptr2; \ + \ + if( n <= isort_thresh ) \ + { \ + insert_sort: \ + for( ptr = left + 1; ptr <= right; ptr++ ) \ + { \ + for( ptr2 = ptr; ptr2 > left && LT(ptr2[0],ptr2[-1]); ptr2--) \ + CV_SWAP( ptr2[0], ptr2[-1], t ); \ + } \ + break; \ + } \ + else \ + { \ + T* left0; \ + T* left1; \ + T* right0; \ + T* right1; \ + T* pivot; \ + T* a; \ + T* b; \ + T* c; \ + int swap_cnt = 0; \ + \ + left0 = left; \ + right0 = right; \ + pivot = left + (n/2); \ + \ + if( n > 40 ) \ + { \ + int d = n / 8; \ + a = left, b = left + d, c = left + 2*d; \ + left = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + \ + a = pivot - d, b = pivot, c = pivot + d; \ + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + \ + a = right - 2*d, b = right - d, c = right; \ + right = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + } \ + \ + a = left, b = pivot, c = right; \ + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + if( pivot != left0 ) \ + { \ + CV_SWAP( *pivot, *left0, t ); \ + pivot = left0; \ + } \ + left = left1 = left0 + 1; \ + right = right1 = right0; \ + \ + for(;;) \ + { \ + while( left <= right && !LT(*pivot, *left) ) \ + { \ + if( !LT(*left, *pivot) ) \ + { \ + if( left > left1 ) \ + CV_SWAP( *left1, *left, t ); \ + swap_cnt = 1; \ + left1++; \ + } \ + left++; \ + } \ + \ + while( left <= right && !LT(*right, *pivot) ) \ + { \ + if( !LT(*pivot, *right) ) \ + { \ + if( right < right1 ) \ + CV_SWAP( *right1, *right, t ); \ + swap_cnt = 1; \ + right1--; \ + } \ + right--; \ + } \ + \ + if( left > right ) \ + break; \ + CV_SWAP( *left, *right, t ); \ + swap_cnt = 1; \ + left++; \ + right--; \ + } \ + \ + if( swap_cnt == 0 ) \ + { \ + left = left0, right = right0; \ + goto insert_sort; \ + } \ + \ + n = MIN( (int)(left1 - left0), (int)(left - left1) ); \ + for( i = 0; i < n; i++ ) \ + CV_SWAP( left0[i], left[i-n], t ); \ + \ + n = MIN( (int)(right0 - right1), (int)(right1 - right) ); \ + for( i = 0; i < n; i++ ) \ + CV_SWAP( left[i], right0[i-n+1], t ); \ + n = (int)(left - left1); \ + m = (int)(right1 - right); \ + if( n > 1 ) \ + { \ + if( m > 1 ) \ + { \ + if( n > m ) \ + { \ + stack[++sp].lb = left0; \ + stack[sp].ub = left0 + n - 1; \ + left = right0 - m + 1, right = right0; \ + } \ + else \ + { \ + stack[++sp].lb = right0 - m + 1; \ + stack[sp].ub = right0; \ + left = left0, right = left0 + n - 1; \ + } \ + } \ + else \ + left = left0, right = left0 + n - 1; \ + } \ + else if( m > 1 ) \ + left = right0 - m + 1, right = right0; \ + else \ + break; \ + } \ + } \ + } \ +} + +#define CV_IMPLEMENT_QSORT( func_name, T, cmp ) \ + CV_IMPLEMENT_QSORT_EX( func_name, T, cmp, int ) + +/****************************************************************************************\ +* Structures and macros for integration with IPP * +\****************************************************************************************/ + +/* IPP-compatible return codes */ +typedef enum CvStatus +{ + CV_BADMEMBLOCK_ERR = -113, + CV_INPLACE_NOT_SUPPORTED_ERR= -112, + CV_UNMATCHED_ROI_ERR = -111, + CV_NOTFOUND_ERR = -110, + CV_BADCONVERGENCE_ERR = -109, + + CV_BADDEPTH_ERR = -107, + CV_BADROI_ERR = -106, + CV_BADHEADER_ERR = -105, + CV_UNMATCHED_FORMATS_ERR = -104, + CV_UNSUPPORTED_COI_ERR = -103, + CV_UNSUPPORTED_CHANNELS_ERR = -102, + CV_UNSUPPORTED_DEPTH_ERR = -101, + CV_UNSUPPORTED_FORMAT_ERR = -100, + + CV_BADARG_ERR = -49, //ipp comp + CV_NOTDEFINED_ERR = -48, //ipp comp + + CV_BADCHANNELS_ERR = -47, //ipp comp + CV_BADRANGE_ERR = -44, //ipp comp + CV_BADSTEP_ERR = -29, //ipp comp + + CV_BADFLAG_ERR = -12, + CV_DIV_BY_ZERO_ERR = -11, //ipp comp + CV_BADCOEF_ERR = -10, + + CV_BADFACTOR_ERR = -7, + CV_BADPOINT_ERR = -6, + CV_BADSCALE_ERR = -4, + CV_OUTOFMEM_ERR = -3, + CV_NULLPTR_ERR = -2, + CV_BADSIZE_ERR = -1, + CV_NO_ERR = 0, + CV_OK = CV_NO_ERR +} +CvStatus; + +#define CV_NOTHROW throw() + +typedef struct CvFuncTable +{ + void* fn_2d[CV_DEPTH_MAX]; +} +CvFuncTable; + +typedef struct CvBigFuncTable +{ + void* fn_2d[CV_DEPTH_MAX*4]; +} +CvBigFuncTable; + +#define CV_INIT_FUNC_TAB( tab, FUNCNAME, FLAG ) \ + (tab).fn_2d[CV_8U] = (void*)FUNCNAME##_8u##FLAG; \ + (tab).fn_2d[CV_8S] = 0; \ + (tab).fn_2d[CV_16U] = (void*)FUNCNAME##_16u##FLAG; \ + (tab).fn_2d[CV_16S] = (void*)FUNCNAME##_16s##FLAG; \ + (tab).fn_2d[CV_32S] = (void*)FUNCNAME##_32s##FLAG; \ + (tab).fn_2d[CV_32F] = (void*)FUNCNAME##_32f##FLAG; \ + (tab).fn_2d[CV_64F] = (void*)FUNCNAME##_64f##FLAG + +#endif diff --git a/include/opencv2/core/mat.hpp b/include/opencv2/core/mat.hpp new file mode 100644 index 0000000..6f444e4 --- /dev/null +++ b/include/opencv2/core/mat.hpp @@ -0,0 +1,2557 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_MATRIX_OPERATIONS_HPP__ +#define __OPENCV_CORE_MATRIX_OPERATIONS_HPP__ + +#ifndef SKIP_INCLUDES +#include +#include +#endif // SKIP_INCLUDES + +#ifdef __cplusplus + +namespace cv +{ + +//////////////////////////////// Mat //////////////////////////////// + +inline Mat::Mat() + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ +} + +inline Mat::Mat(int _rows, int _cols, int _type) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create(_rows, _cols, _type); +} + +inline Mat::Mat(int _rows, int _cols, int _type, const Scalar& _s) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create(_rows, _cols, _type); + *this = _s; +} + +inline Mat::Mat(Size _sz, int _type) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create( _sz.height, _sz.width, _type ); +} + +inline Mat::Mat(Size _sz, int _type, const Scalar& _s) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create(_sz.height, _sz.width, _type); + *this = _s; +} + +inline Mat::Mat(int _dims, const int* _sz, int _type) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create(_dims, _sz, _type); +} + +inline Mat::Mat(int _dims, const int* _sz, int _type, const Scalar& _s) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create(_dims, _sz, _type); + *this = _s; +} + +inline Mat::Mat(const Mat& m) + : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data), + refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), + datalimit(m.datalimit), allocator(m.allocator), size(&rows) +{ + if( refcount ) + CV_XADD(refcount, 1); + if( m.dims <= 2 ) + { + step[0] = m.step[0]; step[1] = m.step[1]; + } + else + { + dims = 0; + copySize(m); + } +} + +inline Mat::Mat(int _rows, int _cols, int _type, void* _data, size_t _step) + : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_rows), cols(_cols), + data((uchar*)_data), refcount(0), datastart((uchar*)_data), dataend(0), + datalimit(0), allocator(0), size(&rows) +{ + size_t esz = CV_ELEM_SIZE(_type), minstep = cols*esz; + if( _step == AUTO_STEP ) + { + _step = minstep; + flags |= CONTINUOUS_FLAG; + } + else + { + if( rows == 1 ) _step = minstep; + CV_DbgAssert( _step >= minstep ); + flags |= _step == minstep ? CONTINUOUS_FLAG : 0; + } + step[0] = _step; step[1] = esz; + datalimit = datastart + _step*rows; + dataend = datalimit - _step + minstep; +} + +inline Mat::Mat(Size _sz, int _type, void* _data, size_t _step) + : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_sz.height), cols(_sz.width), + data((uchar*)_data), refcount(0), datastart((uchar*)_data), dataend(0), + datalimit(0), allocator(0), size(&rows) +{ + size_t esz = CV_ELEM_SIZE(_type), minstep = cols*esz; + if( _step == AUTO_STEP ) + { + _step = minstep; + flags |= CONTINUOUS_FLAG; + } + else + { + if( rows == 1 ) _step = minstep; + CV_DbgAssert( _step >= minstep ); + flags |= _step == minstep ? CONTINUOUS_FLAG : 0; + } + step[0] = _step; step[1] = esz; + datalimit = datastart + _step*rows; + dataend = datalimit - _step + minstep; +} + + +inline Mat::Mat(const CvMat* m, bool copyData) + : flags(MAGIC_VAL + (m->type & (CV_MAT_TYPE_MASK|CV_MAT_CONT_FLAG))), + dims(2), rows(m->rows), cols(m->cols), data(m->data.ptr), refcount(0), + datastart(m->data.ptr), allocator(0), size(&rows) +{ + if( !copyData ) + { + size_t esz = CV_ELEM_SIZE(m->type), minstep = cols*esz, _step = m->step; + if( _step == 0 ) + _step = minstep; + datalimit = datastart + _step*rows; + dataend = datalimit - _step + minstep; + step[0] = _step; step[1] = esz; + } + else + { + data = datastart = dataend = 0; + Mat(m->rows, m->cols, m->type, m->data.ptr, m->step).copyTo(*this); + } +} + +template inline Mat::Mat(const vector<_Tp>& vec, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows((int)vec.size()), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if(vec.empty()) + return; + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&vec[0]; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat((int)vec.size(), 1, DataType<_Tp>::type, (uchar*)&vec[0]).copyTo(*this); +} + + +template inline Mat::Mat(const Vec<_Tp, n>& vec, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(n), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)vec.val; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat(n, 1, DataType<_Tp>::type, (void*)vec.val).copyTo(*this); +} + + +template inline Mat::Mat(const Matx<_Tp,m,n>& M, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(m), cols(n), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = cols*sizeof(_Tp); + step[1] = sizeof(_Tp); + data = datastart = (uchar*)M.val; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat(m, n, DataType<_Tp>::type, (uchar*)M.val).copyTo(*this); +} + + +template inline Mat::Mat(const Point_<_Tp>& pt, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(2), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&pt.x; + datalimit = dataend = datastart + rows*step[0]; + } + else + { + create(2, 1, DataType<_Tp>::type); + ((_Tp*)data)[0] = pt.x; + ((_Tp*)data)[1] = pt.y; + } +} + + +template inline Mat::Mat(const Point3_<_Tp>& pt, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(3), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&pt.x; + datalimit = dataend = datastart + rows*step[0]; + } + else + { + create(3, 1, DataType<_Tp>::type); + ((_Tp*)data)[0] = pt.x; + ((_Tp*)data)[1] = pt.y; + ((_Tp*)data)[2] = pt.z; + } +} + + +template inline Mat::Mat(const MatCommaInitializer_<_Tp>& commaInitializer) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + *this = *commaInitializer; +} + +inline Mat::~Mat() +{ + release(); + if( step.p != step.buf ) + fastFree(step.p); +} + +inline Mat& Mat::operator = (const Mat& m) +{ + if( this != &m ) + { + if( m.refcount ) + CV_XADD(m.refcount, 1); + release(); + flags = m.flags; + if( dims <= 2 && m.dims <= 2 ) + { + dims = m.dims; + rows = m.rows; + cols = m.cols; + step[0] = m.step[0]; + step[1] = m.step[1]; + } + else + copySize(m); + data = m.data; + datastart = m.datastart; + dataend = m.dataend; + datalimit = m.datalimit; + refcount = m.refcount; + allocator = m.allocator; + } + return *this; +} + +inline Mat Mat::row(int y) const { return Mat(*this, Range(y, y+1), Range::all()); } +inline Mat Mat::col(int x) const { return Mat(*this, Range::all(), Range(x, x+1)); } +inline Mat Mat::rowRange(int startrow, int endrow) const + { return Mat(*this, Range(startrow, endrow), Range::all()); } +inline Mat Mat::rowRange(const Range& r) const + { return Mat(*this, r, Range::all()); } +inline Mat Mat::colRange(int startcol, int endcol) const + { return Mat(*this, Range::all(), Range(startcol, endcol)); } +inline Mat Mat::colRange(const Range& r) const + { return Mat(*this, Range::all(), r); } + +inline Mat Mat::diag(const Mat& d) +{ + CV_Assert( d.cols == 1 ); + Mat m(d.rows, d.rows, d.type(), Scalar(0)), md = m.diag(); + d.copyTo(md); + return m; +} + +inline Mat Mat::clone() const +{ + Mat m; + copyTo(m); + return m; +} + +inline void Mat::assignTo( Mat& m, int type ) const +{ + if( type < 0 ) + m = *this; + else + convertTo(m, type); +} + +inline void Mat::create(int _rows, int _cols, int _type) +{ + _type &= TYPE_MASK; + if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && data ) + return; + int sz[] = {_rows, _cols}; + create(2, sz, _type); +} + +inline void Mat::create(Size _sz, int _type) +{ + create(_sz.height, _sz.width, _type); +} + +inline void Mat::addref() +{ if( refcount ) CV_XADD(refcount, 1); } + +inline void Mat::release() +{ + if( refcount && CV_XADD(refcount, -1) == 1 ) + deallocate(); + data = datastart = dataend = datalimit = 0; + size.p[0] = 0; + refcount = 0; +} + +inline Mat Mat::operator()( Range rowRange, Range colRange ) const +{ + return Mat(*this, rowRange, colRange); +} + +inline Mat Mat::operator()( const Rect& roi ) const +{ return Mat(*this, roi); } + +inline Mat Mat::operator()(const Range* ranges) const +{ + return Mat(*this, ranges); +} + +inline Mat::operator CvMat() const +{ + CV_DbgAssert(dims <= 2); + CvMat m = cvMat(rows, dims == 1 ? 1 : cols, type(), data); + m.step = (int)step[0]; + m.type = (m.type & ~CONTINUOUS_FLAG) | (flags & CONTINUOUS_FLAG); + return m; +} + +inline bool Mat::isContinuous() const { return (flags & CONTINUOUS_FLAG) != 0; } +inline bool Mat::isSubmatrix() const { return (flags & SUBMATRIX_FLAG) != 0; } +inline size_t Mat::elemSize() const { return dims > 0 ? step.p[dims-1] : 0; } +inline size_t Mat::elemSize1() const { return CV_ELEM_SIZE1(flags); } +inline int Mat::type() const { return CV_MAT_TYPE(flags); } +inline int Mat::depth() const { return CV_MAT_DEPTH(flags); } +inline int Mat::channels() const { return CV_MAT_CN(flags); } +inline size_t Mat::step1(int i) const { return step.p[i]/elemSize1(); } +inline bool Mat::empty() const { return data == 0 || total() == 0; } +inline size_t Mat::total() const +{ + if( dims <= 2 ) + return rows*cols; + size_t p = 1; + for( int i = 0; i < dims; i++ ) + p *= size[i]; + return p; +} + +inline uchar* Mat::ptr(int y) +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return data + step.p[0]*y; +} + +inline const uchar* Mat::ptr(int y) const +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return data + step.p[0]*y; +} + +template inline _Tp* Mat::ptr(int y) +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return (_Tp*)(data + step.p[0]*y); +} + +template inline const _Tp* Mat::ptr(int y) const +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && data && (unsigned)y < (unsigned)size.p[0]) ); + return (const _Tp*)(data + step.p[0]*y); +} + + +inline uchar* Mat::ptr(int i0, int i1) +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return data + i0*step.p[0] + i1*step.p[1]; +} + +inline const uchar* Mat::ptr(int i0, int i1) const +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return data + i0*step.p[0] + i1*step.p[1]; +} + +template inline _Tp* Mat::ptr(int i0, int i1) +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return (_Tp*)(data + i0*step.p[0] + i1*step.p[1]); +} + +template inline const _Tp* Mat::ptr(int i0, int i1) const +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return (const _Tp*)(data + i0*step.p[0] + i1*step.p[1]); +} + +inline uchar* Mat::ptr(int i0, int i1, int i2) +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]; +} + +inline const uchar* Mat::ptr(int i0, int i1, int i2) const +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]; +} + +template inline _Tp* Mat::ptr(int i0, int i1, int i2) +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return (_Tp*)(data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]); +} + +template inline const _Tp* Mat::ptr(int i0, int i1, int i2) const +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return (const _Tp*)(data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]); +} + +inline uchar* Mat::ptr(const int* idx) +{ + int i, d = dims; + uchar* p = data; + CV_DbgAssert( d >= 1 && p ); + for( i = 0; i < d; i++ ) + { + CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] ); + p += idx[i]*step.p[i]; + } + return p; +} + +inline const uchar* Mat::ptr(const int* idx) const +{ + int i, d = dims; + uchar* p = data; + CV_DbgAssert( d >= 1 && p ); + for( i = 0; i < d; i++ ) + { + CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] ); + p += idx[i]*step.p[i]; + } + return p; +} + +template inline _Tp& Mat::at(int i0, int i1) +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((_Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline const _Tp& Mat::at(int i0, int i1) const +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((const _Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline _Tp& Mat::at(Point pt) +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)(pt.x*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((_Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline const _Tp& Mat::at(Point pt) const +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)(pt.x*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((const _Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline _Tp& Mat::at(int i0) +{ + CV_DbgAssert( dims <= 2 && data && (size.p[0] == 1 || size.p[1] == 1) && + (unsigned)i0 < (unsigned)(size.p[0] + size.p[1] - 1) && + elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)(data + step.p[size.p[0]==1]*i0); +} + +template inline const _Tp& Mat::at(int i0) const +{ + CV_DbgAssert( dims <= 2 && data && (size.p[0] == 1 || size.p[1] == 1) && + (unsigned)i0 < (unsigned)(size.p[0] + size.p[1] - 1) && + elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)(data + step.p[size.p[0]==1]*i0); +} + +template inline _Tp& Mat::at(int i0, int i1, int i2) +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)ptr(i0, i1, i2); +} +template inline const _Tp& Mat::at(int i0, int i1, int i2) const +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(const _Tp*)ptr(i0, i1, i2); +} +template inline _Tp& Mat::at(const int* idx) +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)ptr(idx); +} +template inline const _Tp& Mat::at(const int* idx) const +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(const _Tp*)ptr(idx); +} + + +template inline MatConstIterator_<_Tp> Mat::begin() const +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return MatConstIterator_<_Tp>((const Mat_<_Tp>*)this); +} + +template inline MatConstIterator_<_Tp> Mat::end() const +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + MatConstIterator_<_Tp> it((const Mat_<_Tp>*)this); + it += total(); + return it; +} + +template inline MatIterator_<_Tp> Mat::begin() +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return MatIterator_<_Tp>((Mat_<_Tp>*)this); +} + +template inline MatIterator_<_Tp> Mat::end() +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + MatIterator_<_Tp> it((Mat_<_Tp>*)this); + it += total(); + return it; +} + +template inline Mat::operator vector<_Tp>() const +{ + vector<_Tp> v; + copyTo(v); + return v; +} + +template inline Mat::operator Vec<_Tp, n>() const +{ + CV_Assert( data && dims <= 2 && (rows == 1 || cols == 1) && + rows + cols - 1 == n && channels() == 1 ); + + if( isContinuous() && type() == DataType<_Tp>::type ) + return Vec<_Tp, n>((_Tp*)data); + Vec<_Tp, n> v; Mat tmp(rows, cols, DataType<_Tp>::type, v.val); + convertTo(tmp, tmp.type()); + return v; +} + +template inline Mat::operator Matx<_Tp, m, n>() const +{ + CV_Assert( data && dims <= 2 && rows == m && cols == n && channels() == 1 ); + + if( isContinuous() && type() == DataType<_Tp>::type ) + return Matx<_Tp, m, n>((_Tp*)data); + Matx<_Tp, m, n> mtx; Mat tmp(rows, cols, DataType<_Tp>::type, mtx.val); + convertTo(tmp, tmp.type()); + return mtx; +} + + +template inline void Mat::push_back(const _Tp& elem) +{ + if( !data ) + { + *this = Mat(1, 1, DataType<_Tp>::type, (void*)&elem).clone(); + return; + } + CV_Assert(DataType<_Tp>::type == type() && cols == 1 + /* && dims == 2 (cols == 1 implies dims == 2) */); + uchar* tmp = dataend + step[0]; + if( !isSubmatrix() && isContinuous() && tmp <= datalimit ) + { + *(_Tp*)(data + (size.p[0]++)*step.p[0]) = elem; + dataend = tmp; + } + else + push_back_(&elem); +} + +template inline void Mat::push_back(const Mat_<_Tp>& m) +{ + push_back((const Mat&)m); +} + +inline Mat::MSize::MSize(int* _p) : p(_p) {} +inline Size Mat::MSize::operator()() const +{ + CV_DbgAssert(p[-1] <= 2); + return Size(p[1], p[0]); +} +inline const int& Mat::MSize::operator[](int i) const { return p[i]; } +inline int& Mat::MSize::operator[](int i) { return p[i]; } +inline Mat::MSize::operator const int*() const { return p; } + +inline bool Mat::MSize::operator == (const MSize& sz) const +{ + int d = p[-1], dsz = sz.p[-1]; + if( d != dsz ) + return false; + if( d == 2 ) + return p[0] == sz.p[0] && p[1] == sz.p[1]; + + for( int i = 0; i < d; i++ ) + if( p[i] != sz.p[i] ) + return false; + return true; +} + +inline bool Mat::MSize::operator != (const MSize& sz) const +{ + return !(*this == sz); +} + +inline Mat::MStep::MStep() { p = buf; p[0] = p[1] = 0; } +inline Mat::MStep::MStep(size_t s) { p = buf; p[0] = s; p[1] = 0; } +inline const size_t& Mat::MStep::operator[](int i) const { return p[i]; } +inline size_t& Mat::MStep::operator[](int i) { return p[i]; } +inline Mat::MStep::operator size_t() const +{ + CV_DbgAssert( p == buf ); + return buf[0]; +} +inline Mat::MStep& Mat::MStep::operator = (size_t s) +{ + CV_DbgAssert( p == buf ); + buf[0] = s; + return *this; +} + +static inline Mat cvarrToMatND(const CvArr* arr, bool copyData=false, int coiMode=0) +{ + return cvarrToMat(arr, copyData, true, coiMode); +} + +///////////////////////////////////////////// SVD ////////////////////////////////////////////////////// + +inline SVD::SVD() {} +inline SVD::SVD( InputArray m, int flags ) { operator ()(m, flags); } +inline void SVD::solveZ( InputArray m, OutputArray _dst ) +{ + SVD svd(m); + _dst.create(svd.vt.cols, 1, svd.vt.type()); + Mat dst = _dst.getMat(); + svd.vt.row(svd.vt.rows-1).reshape(1,svd.vt.cols).copyTo(dst); +} + +template inline void + SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt ) +{ + assert( nm == MIN(m, n)); + Mat _a(a, false), _u(u, false), _w(w, false), _vt(vt, false); + SVD::compute(_a, _w, _u, _vt); + CV_Assert(_w.data == (uchar*)&w.val[0] && _u.data == (uchar*)&u.val[0] && _vt.data == (uchar*)&vt.val[0]); +} + +template inline void +SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w ) +{ + assert( nm == MIN(m, n)); + Mat _a(a, false), _w(w, false); + SVD::compute(_a, _w); + CV_Assert(_w.data == (uchar*)&w.val[0]); +} + +template inline void +SVD::backSubst( const Matx<_Tp, nm, 1>& w, const Matx<_Tp, m, nm>& u, + const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, + Matx<_Tp, n, nb>& dst ) +{ + assert( nm == MIN(m, n)); + Mat _u(u, false), _w(w, false), _vt(vt, false), _rhs(rhs, false), _dst(dst, false); + SVD::backSubst(_w, _u, _vt, _rhs, _dst); + CV_Assert(_dst.data == (uchar*)&dst.val[0]); +} + +///////////////////////////////// Mat_<_Tp> //////////////////////////////////// + +template inline Mat_<_Tp>::Mat_() + : Mat() { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type; } + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols) + : Mat(_rows, _cols, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols, const _Tp& value) + : Mat(_rows, _cols, DataType<_Tp>::type) { *this = value; } + +template inline Mat_<_Tp>::Mat_(Size _sz) + : Mat(_sz.height, _sz.width, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(Size _sz, const _Tp& value) + : Mat(_sz.height, _sz.width, DataType<_Tp>::type) { *this = value; } + +template inline Mat_<_Tp>::Mat_(int _dims, const int* _sz) + : Mat(_dims, _sz, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(int _dims, const int* _sz, const _Tp& _s) + : Mat(_dims, _sz, DataType<_Tp>::type, Scalar(_s)) {} + +template inline Mat_<_Tp>::Mat_(const Mat_<_Tp>& m, const Range* ranges) + : Mat(m, ranges) {} + +template inline Mat_<_Tp>::Mat_(const Mat& m) + : Mat() { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type; *this = m; } + +template inline Mat_<_Tp>::Mat_(const Mat_& m) + : Mat(m) {} + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols, _Tp* _data, size_t steps) + : Mat(_rows, _cols, DataType<_Tp>::type, _data, steps) {} + +template inline Mat_<_Tp>::Mat_(const Mat_& m, const Range& rowRange, const Range& colRange) + : Mat(m, rowRange, colRange) {} + +template inline Mat_<_Tp>::Mat_(const Mat_& m, const Rect& roi) + : Mat(m, roi) {} + +template template inline + Mat_<_Tp>::Mat_(const Vec::channel_type, n>& vec, bool copyData) + : Mat(n/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&vec) +{ + CV_Assert(n%DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template template inline + Mat_<_Tp>::Mat_(const Matx::channel_type,m,n>& M, bool copyData) + : Mat(m, n/DataType<_Tp>::channels, DataType<_Tp>::type, (void*)&M) +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const Point_::channel_type>& pt, bool copyData) + : Mat(2/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&pt) +{ + CV_Assert(2 % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const Point3_::channel_type>& pt, bool copyData) + : Mat(3/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&pt) +{ + CV_Assert(3 % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const MatCommaInitializer_<_Tp>& commaInitializer) + : Mat(commaInitializer) {} + +template inline Mat_<_Tp>::Mat_(const vector<_Tp>& vec, bool copyData) + : Mat(vec, copyData) {} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const Mat& m) +{ + if( DataType<_Tp>::type == m.type() ) + { + Mat::operator = (m); + return *this; + } + if( DataType<_Tp>::depth == m.depth() ) + { + return (*this = m.reshape(DataType<_Tp>::channels, m.dims, 0)); + } + CV_DbgAssert(DataType<_Tp>::channels == m.channels()); + m.convertTo(*this, type()); + return *this; +} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const Mat_& m) +{ + Mat::operator=(m); + return *this; +} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const _Tp& s) +{ + typedef typename DataType<_Tp>::vec_type VT; + Mat::operator=(Scalar((const VT&)s)); + return *this; +} + +template inline void Mat_<_Tp>::create(int _rows, int _cols) +{ + Mat::create(_rows, _cols, DataType<_Tp>::type); +} + +template inline void Mat_<_Tp>::create(Size _sz) +{ + Mat::create(_sz, DataType<_Tp>::type); +} + +template inline void Mat_<_Tp>::create(int _dims, const int* _sz) +{ + Mat::create(_dims, _sz, DataType<_Tp>::type); +} + + +template inline Mat_<_Tp> Mat_<_Tp>::cross(const Mat_& m) const +{ return Mat_<_Tp>(Mat::cross(m)); } + +template template inline Mat_<_Tp>::operator Mat_() const +{ return Mat_(*this); } + +template inline Mat_<_Tp> Mat_<_Tp>::row(int y) const +{ return Mat_(*this, Range(y, y+1), Range::all()); } +template inline Mat_<_Tp> Mat_<_Tp>::col(int x) const +{ return Mat_(*this, Range::all(), Range(x, x+1)); } +template inline Mat_<_Tp> Mat_<_Tp>::diag(int d) const +{ return Mat_(Mat::diag(d)); } +template inline Mat_<_Tp> Mat_<_Tp>::clone() const +{ return Mat_(Mat::clone()); } + +template inline size_t Mat_<_Tp>::elemSize() const +{ + CV_DbgAssert( Mat::elemSize() == sizeof(_Tp) ); + return sizeof(_Tp); +} + +template inline size_t Mat_<_Tp>::elemSize1() const +{ + CV_DbgAssert( Mat::elemSize1() == sizeof(_Tp)/DataType<_Tp>::channels ); + return sizeof(_Tp)/DataType<_Tp>::channels; +} +template inline int Mat_<_Tp>::type() const +{ + CV_DbgAssert( Mat::type() == DataType<_Tp>::type ); + return DataType<_Tp>::type; +} +template inline int Mat_<_Tp>::depth() const +{ + CV_DbgAssert( Mat::depth() == DataType<_Tp>::depth ); + return DataType<_Tp>::depth; +} +template inline int Mat_<_Tp>::channels() const +{ + CV_DbgAssert( Mat::channels() == DataType<_Tp>::channels ); + return DataType<_Tp>::channels; +} +template inline size_t Mat_<_Tp>::stepT(int i) const { return step.p[i]/elemSize(); } +template inline size_t Mat_<_Tp>::step1(int i) const { return step.p[i]/elemSize1(); } + +template inline Mat_<_Tp> Mat_<_Tp>::reshape(int _rows) const +{ return Mat_<_Tp>(Mat::reshape(0,_rows)); } + +template inline Mat_<_Tp>& Mat_<_Tp>::adjustROI( int dtop, int dbottom, int dleft, int dright ) +{ return (Mat_<_Tp>&)(Mat::adjustROI(dtop, dbottom, dleft, dright)); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Range& rowRange, const Range& colRange ) const +{ return Mat_<_Tp>(*this, rowRange, colRange); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Rect& roi ) const +{ return Mat_<_Tp>(*this, roi); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Range* ranges ) const +{ return Mat_<_Tp>(*this, ranges); } + +template inline _Tp* Mat_<_Tp>::operator [](int y) +{ return (_Tp*)ptr(y); } +template inline const _Tp* Mat_<_Tp>::operator [](int y) const +{ return (const _Tp*)ptr(y); } + +template inline _Tp& Mat_<_Tp>::operator ()(int i0, int i1) +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((_Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0, int i1) const +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((const _Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline _Tp& Mat_<_Tp>::operator ()(Point pt) +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)pt.x < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((_Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline const _Tp& Mat_<_Tp>::operator ()(Point pt) const +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)pt.x < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((const _Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline _Tp& Mat_<_Tp>::operator ()(const int* idx) +{ + return Mat::at<_Tp>(idx); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(const int* idx) const +{ + return Mat::at<_Tp>(idx); +} + +template inline _Tp& Mat_<_Tp>::operator ()(int i0) +{ + return this->at<_Tp>(i0); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0) const +{ + return this->at<_Tp>(i0); +} + +template inline _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2) +{ + return this->at<_Tp>(i0, i1, i2); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2) const +{ + return this->at<_Tp>(i0, i1, i2); +} + + +template inline Mat_<_Tp>::operator vector<_Tp>() const +{ + vector<_Tp> v; + copyTo(v); + return v; +} + +template template inline Mat_<_Tp>::operator Vec::channel_type, n>() const +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + return this->Mat::operator Vec::channel_type, n>(); +} + +template template inline Mat_<_Tp>::operator Matx::channel_type, m, n>() const +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + return this->Mat::operator Matx::channel_type, m, n>(); +} + +template inline void +process( const Mat_& m1, Mat_& m2, Op op ) +{ + int y, x, rows = m1.rows, cols = m1.cols; + int c1 = m1.channels(), c2 = m2.channels(); + + CV_DbgAssert( m1.size() == m2.size() ); + + for( y = 0; y < rows; y++ ) + { + const T1* src = m1[y]; + T2* dst = m2[y]; + + for( x = 0; x < cols; x++ ) + dst[x] = op(src[x]); + } +} + +template inline void +process( const Mat_& m1, const Mat_& m2, Mat_& m3, Op op ) +{ + int y, x, rows = m1.rows, cols = m1.cols; + + CV_DbgAssert( m1.size() == m2.size() ); + + for( y = 0; y < rows; y++ ) + { + const T1* src1 = m1[y]; + const T2* src2 = m2[y]; + T3* dst = m3[y]; + + for( x = 0; x < cols; x++ ) + dst[x] = op( src1[x], src2[x] ); + } +} + + +/////////////////////////////// Input/Output Arrays ///////////////////////////////// + +template inline _InputArray::_InputArray(const vector<_Tp>& vec) + : flags(STD_VECTOR + DataType<_Tp>::type), obj((void*)&vec) {} + +template inline _InputArray::_InputArray(const vector >& vec) + : flags(STD_VECTOR_VECTOR + DataType<_Tp>::type), obj((void*)&vec) {} + +template inline _InputArray::_InputArray(const Matx<_Tp, m, n>& mtx) + : flags(MATX + DataType<_Tp>::type), obj((void*)&mtx), sz(n, m) {} + +inline _InputArray::_InputArray(const Scalar& s) + : flags(MATX + CV_64F), obj((void*)&s), sz(1, 4) {} + +template inline _OutputArray::_OutputArray(vector<_Tp>& vec) : _InputArray(vec) {} +template inline _OutputArray::_OutputArray(vector >& vec) : _InputArray(vec) {} +template inline _OutputArray::_OutputArray(Matx<_Tp, m, n>& mtx) : _InputArray(mtx) {} + +//////////////////////////////////// Matrix Expressions ///////////////////////////////////////// + +class CV_EXPORTS MatOp +{ +public: + MatOp() {}; + virtual ~MatOp() {}; + + virtual bool elementWise(const MatExpr& expr) const; + virtual void assign(const MatExpr& expr, Mat& m, int type=-1) const = 0; + virtual void roi(const MatExpr& expr, const Range& rowRange, + const Range& colRange, MatExpr& res) const; + virtual void diag(const MatExpr& expr, int d, MatExpr& res) const; + virtual void augAssignAdd(const MatExpr& expr, Mat& m) const; + virtual void augAssignSubtract(const MatExpr& expr, Mat& m) const; + virtual void augAssignMultiply(const MatExpr& expr, Mat& m) const; + virtual void augAssignDivide(const MatExpr& expr, Mat& m) const; + virtual void augAssignAnd(const MatExpr& expr, Mat& m) const; + virtual void augAssignOr(const MatExpr& expr, Mat& m) const; + virtual void augAssignXor(const MatExpr& expr, Mat& m) const; + + virtual void add(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void add(const MatExpr& expr1, const Scalar& s, MatExpr& res) const; + + virtual void subtract(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void subtract(const Scalar& s, const MatExpr& expr, MatExpr& res) const; + + virtual void multiply(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const; + virtual void multiply(const MatExpr& expr1, double s, MatExpr& res) const; + + virtual void divide(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const; + virtual void divide(double s, const MatExpr& expr, MatExpr& res) const; + + virtual void abs(const MatExpr& expr, MatExpr& res) const; + + virtual void transpose(const MatExpr& expr, MatExpr& res) const; + virtual void matmul(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void invert(const MatExpr& expr, int method, MatExpr& res) const; + + virtual Size size(const MatExpr& expr) const; + virtual int type(const MatExpr& expr) const; +}; + + +class CV_EXPORTS MatExpr +{ +public: + MatExpr() : op(0), flags(0), a(Mat()), b(Mat()), c(Mat()), alpha(0), beta(0), s(Scalar()) {} + MatExpr(const MatOp* _op, int _flags, const Mat& _a=Mat(), const Mat& _b=Mat(), + const Mat& _c=Mat(), double _alpha=1, double _beta=1, const Scalar& _s=Scalar()) + : op(_op), flags(_flags), a(_a), b(_b), c(_c), alpha(_alpha), beta(_beta), s(_s) {} + explicit MatExpr(const Mat& m); + operator Mat() const + { + Mat m; + op->assign(*this, m); + return m; + } + + template operator Mat_<_Tp>() const + { + Mat_<_Tp> m; + op->assign(*this, m, DataType<_Tp>::type); + return m; + } + + MatExpr row(int y) const; + MatExpr col(int x) const; + MatExpr diag(int d=0) const; + MatExpr operator()( const Range& rowRange, const Range& colRange ) const; + MatExpr operator()( const Rect& roi ) const; + + Mat cross(const Mat& m) const; + double dot(const Mat& m) const; + + MatExpr t() const; + MatExpr inv(int method = DECOMP_LU) const; + MatExpr mul(const MatExpr& e, double scale=1) const; + MatExpr mul(const Mat& m, double scale=1) const; + + Size size() const; + int type() const; + + const MatOp* op; + int flags; + + Mat a, b, c; + double alpha, beta; + Scalar s; +}; + + +CV_EXPORTS MatExpr operator + (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator + (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator + (const Scalar& s, const Mat& a); +CV_EXPORTS MatExpr operator + (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator + (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator + (const MatExpr& e, const Scalar& s); +CV_EXPORTS MatExpr operator + (const Scalar& s, const MatExpr& e); +CV_EXPORTS MatExpr operator + (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator - (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator - (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator - (const Scalar& s, const Mat& a); +CV_EXPORTS MatExpr operator - (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator - (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator - (const MatExpr& e, const Scalar& s); +CV_EXPORTS MatExpr operator - (const Scalar& s, const MatExpr& e); +CV_EXPORTS MatExpr operator - (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator - (const Mat& m); +CV_EXPORTS MatExpr operator - (const MatExpr& e); + +CV_EXPORTS MatExpr operator * (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator * (const Mat& a, double s); +CV_EXPORTS MatExpr operator * (double s, const Mat& a); +CV_EXPORTS MatExpr operator * (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator * (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator * (const MatExpr& e, double s); +CV_EXPORTS MatExpr operator * (double s, const MatExpr& e); +CV_EXPORTS MatExpr operator * (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator / (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator / (const Mat& a, double s); +CV_EXPORTS MatExpr operator / (double s, const Mat& a); +CV_EXPORTS MatExpr operator / (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator / (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator / (const MatExpr& e, double s); +CV_EXPORTS MatExpr operator / (double s, const MatExpr& e); +CV_EXPORTS MatExpr operator / (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator < (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator < (const Mat& a, double s); +CV_EXPORTS MatExpr operator < (double s, const Mat& a); + +CV_EXPORTS MatExpr operator <= (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator <= (const Mat& a, double s); +CV_EXPORTS MatExpr operator <= (double s, const Mat& a); + +CV_EXPORTS MatExpr operator == (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator == (const Mat& a, double s); +CV_EXPORTS MatExpr operator == (double s, const Mat& a); + +CV_EXPORTS MatExpr operator != (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator != (const Mat& a, double s); +CV_EXPORTS MatExpr operator != (double s, const Mat& a); + +CV_EXPORTS MatExpr operator >= (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator >= (const Mat& a, double s); +CV_EXPORTS MatExpr operator >= (double s, const Mat& a); + +CV_EXPORTS MatExpr operator > (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator > (const Mat& a, double s); +CV_EXPORTS MatExpr operator > (double s, const Mat& a); + +CV_EXPORTS MatExpr min(const Mat& a, const Mat& b); +CV_EXPORTS MatExpr min(const Mat& a, double s); +CV_EXPORTS MatExpr min(double s, const Mat& a); + +CV_EXPORTS MatExpr max(const Mat& a, const Mat& b); +CV_EXPORTS MatExpr max(const Mat& a, double s); +CV_EXPORTS MatExpr max(double s, const Mat& a); + +template static inline MatExpr min(const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + return cv::min((const Mat&)a, (const Mat&)b); +} + +template static inline MatExpr min(const Mat_<_Tp>& a, double s) +{ + return cv::min((const Mat&)a, s); +} + +template static inline MatExpr min(double s, const Mat_<_Tp>& a) +{ + return cv::min((const Mat&)a, s); +} + +template static inline MatExpr max(const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + return cv::max((const Mat&)a, (const Mat&)b); +} + +template static inline MatExpr max(const Mat_<_Tp>& a, double s) +{ + return cv::max((const Mat&)a, s); +} + +template static inline MatExpr max(double s, const Mat_<_Tp>& a) +{ + return cv::max((const Mat&)a, s); +} + +template static inline void min(const Mat_<_Tp>& a, const Mat_<_Tp>& b, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, (const Mat&)b, (Mat&)c); +} + +template static inline void min(const Mat_<_Tp>& a, double s, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + +template static inline void min(double s, const Mat_<_Tp>& a, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + +template static inline void max(const Mat_<_Tp>& a, const Mat_<_Tp>& b, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, (const Mat&)b, (Mat&)c); +} + +template static inline void max(const Mat_<_Tp>& a, double s, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + +template static inline void max(double s, const Mat_<_Tp>& a, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + + +CV_EXPORTS MatExpr operator & (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator & (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator & (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator | (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator | (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator | (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator ^ (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator ^ (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator ^ (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator ~(const Mat& m); + +CV_EXPORTS MatExpr abs(const Mat& m); +CV_EXPORTS MatExpr abs(const MatExpr& e); + +template static inline MatExpr abs(const Mat_<_Tp>& m) +{ + return cv::abs((const Mat&)m); +} + +////////////////////////////// Augmenting algebraic operations ////////////////////////////////// + +inline Mat& Mat::operator = (const MatExpr& e) +{ + e.op->assign(e, *this); + return *this; +} + +template inline Mat_<_Tp>::Mat_(const MatExpr& e) +{ + e.op->assign(e, *this, DataType<_Tp>::type); +} + +template Mat_<_Tp>& Mat_<_Tp>::operator = (const MatExpr& e) +{ + e.op->assign(e, *this, DataType<_Tp>::type); + return *this; +} + +static inline Mat& operator += (const Mat& a, const Mat& b) +{ + add(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator += (const Mat& a, const Scalar& s) +{ + add(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + add(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const Scalar& s) +{ + add(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator += (const Mat& a, const MatExpr& b) +{ + b.op->augAssignAdd(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignAdd(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator -= (const Mat& a, const Mat& b) +{ + subtract(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator -= (const Mat& a, const Scalar& s) +{ + subtract(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + subtract(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const Scalar& s) +{ + subtract(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator -= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignSubtract(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignSubtract(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator *= (const Mat& a, const Mat& b) +{ + gemm(a, b, 1, Mat(), 0, (Mat&)a, 0); + return (Mat&)a; +} + +static inline Mat& operator *= (const Mat& a, double s) +{ + a.convertTo((Mat&)a, -1, s); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + gemm(a, b, 1, Mat(), 0, (Mat&)a, 0); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, double s) +{ + a.convertTo((Mat&)a, -1, s); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator *= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignMultiply(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignMultiply(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator /= (const Mat& a, const Mat& b) +{ + divide(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator /= (const Mat& a, double s) +{ + a.convertTo((Mat&)a, -1, 1./s); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + divide(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, double s) +{ + a.convertTo((Mat&)a, -1, 1./s); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator /= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignDivide(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignDivide(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +////////////////////////////// Logical operations /////////////////////////////// + +static inline Mat& operator &= (const Mat& a, const Mat& b) +{ + bitwise_and(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator &= (const Mat& a, const Scalar& s) +{ + bitwise_and(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator &= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_and(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator &= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_and(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator |= (const Mat& a, const Mat& b) +{ + bitwise_or(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator |= (const Mat& a, const Scalar& s) +{ + bitwise_or(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator |= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_or(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator |= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_or(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator ^= (const Mat& a, const Mat& b) +{ + bitwise_xor(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator ^= (const Mat& a, const Scalar& s) +{ + bitwise_xor(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator ^= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_xor(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator ^= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_xor(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +/////////////////////////////// Miscellaneous operations ////////////////////////////// + +template void split(const Mat& src, vector >& mv) +{ split(src, (vector&)mv ); } + +////////////////////////////////////////////////////////////// + +template inline MatExpr Mat_<_Tp>::zeros(int rows, int cols) +{ + return Mat::zeros(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::zeros(Size sz) +{ + return Mat::zeros(sz, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::ones(int rows, int cols) +{ + return Mat::ones(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::ones(Size sz) +{ + return Mat::ones(sz, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::eye(int rows, int cols) +{ + return Mat::eye(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::eye(Size sz) +{ + return Mat::eye(sz, DataType<_Tp>::type); +} + +//////////////////////////////// Iterators & Comma initializers ////////////////////////////////// + +inline MatConstIterator::MatConstIterator() + : m(0), elemSize(0), ptr(0), sliceStart(0), sliceEnd(0) {} + +inline MatConstIterator::MatConstIterator(const Mat* _m) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + if( m && m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + seek((const int*)0); +} + +inline MatConstIterator::MatConstIterator(const Mat* _m, int _row, int _col) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + CV_Assert(m && m->dims <= 2); + if( m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + int idx[]={_row, _col}; + seek(idx); +} + +inline MatConstIterator::MatConstIterator(const Mat* _m, Point _pt) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + CV_Assert(m && m->dims <= 2); + if( m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + int idx[]={_pt.y, _pt.x}; + seek(idx); +} + +inline MatConstIterator::MatConstIterator(const MatConstIterator& it) + : m(it.m), elemSize(it.elemSize), ptr(it.ptr), sliceStart(it.sliceStart), sliceEnd(it.sliceEnd) +{} + +inline MatConstIterator& MatConstIterator::operator = (const MatConstIterator& it ) +{ + m = it.m; elemSize = it.elemSize; ptr = it.ptr; + sliceStart = it.sliceStart; sliceEnd = it.sliceEnd; + return *this; +} + +inline uchar* MatConstIterator::operator *() const { return ptr; } + +inline MatConstIterator& MatConstIterator::operator += (ptrdiff_t ofs) +{ + if( !m || ofs == 0 ) + return *this; + ptrdiff_t ofsb = ofs*elemSize; + ptr += ofsb; + if( ptr < sliceStart || sliceEnd <= ptr ) + { + ptr -= ofsb; + seek(ofs, true); + } + return *this; +} + +inline MatConstIterator& MatConstIterator::operator -= (ptrdiff_t ofs) +{ return (*this += -ofs); } + +inline MatConstIterator& MatConstIterator::operator --() +{ + if( m && (ptr -= elemSize) < sliceStart ) + { + ptr += elemSize; + seek(-1, true); + } + return *this; +} + +inline MatConstIterator MatConstIterator::operator --(int) +{ + MatConstIterator b = *this; + *this += -1; + return b; +} + +inline MatConstIterator& MatConstIterator::operator ++() +{ + if( m && (ptr += elemSize) >= sliceEnd ) + { + ptr -= elemSize; + seek(1, true); + } + return *this; +} + +inline MatConstIterator MatConstIterator::operator ++(int) +{ + MatConstIterator b = *this; + *this += 1; + return b; +} + +template inline MatConstIterator_<_Tp>::MatConstIterator_() {} + +template inline MatConstIterator_<_Tp>::MatConstIterator_(const Mat_<_Tp>* _m) + : MatConstIterator(_m) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col) + : MatConstIterator(_m, _row, _col) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const Mat_<_Tp>* _m, Point _pt) + : MatConstIterator(_m, _pt) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const MatConstIterator_& it) + : MatConstIterator(it) {} + +template inline MatConstIterator_<_Tp>& + MatConstIterator_<_Tp>::operator = (const MatConstIterator_& it ) +{ + MatConstIterator::operator = (it); + return *this; +} + +template inline _Tp MatConstIterator_<_Tp>::operator *() const { return *(_Tp*)(this->ptr); } + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator += (ptrdiff_t ofs) +{ + MatConstIterator::operator += (ofs); + return *this; +} + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator -= (ptrdiff_t ofs) +{ return (*this += -ofs); } + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator --() +{ + MatConstIterator::operator --(); + return *this; +} + +template inline MatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator --(int) +{ + MatConstIterator_ b = *this; + MatConstIterator::operator --(); + return b; +} + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator ++() +{ + MatConstIterator::operator ++(); + return *this; +} + +template inline MatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator ++(int) +{ + MatConstIterator_ b = *this; + MatConstIterator::operator ++(); + return b; +} + +template inline MatIterator_<_Tp>::MatIterator_() : MatConstIterator_<_Tp>() {} + +template inline MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m) + : MatConstIterator_<_Tp>(_m) {} + +template inline MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m, int _row, int _col) + : MatConstIterator_<_Tp>(_m, _row, _col) {} + +template inline MatIterator_<_Tp>::MatIterator_(const Mat_<_Tp>* _m, Point _pt) + : MatConstIterator_<_Tp>(_m, _pt) {} + +template inline MatIterator_<_Tp>::MatIterator_(const Mat_<_Tp>* _m, const int* _idx) + : MatConstIterator_<_Tp>(_m, _idx) {} + +template inline MatIterator_<_Tp>::MatIterator_(const MatIterator_& it) + : MatConstIterator_<_Tp>(it) {} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator = (const MatIterator_<_Tp>& it ) +{ + MatConstIterator::operator = (it); + return *this; +} + +template inline _Tp& MatIterator_<_Tp>::operator *() const { return *(_Tp*)(this->ptr); } + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator += (ptrdiff_t ofs) +{ + MatConstIterator::operator += (ofs); + return *this; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator -= (ptrdiff_t ofs) +{ + MatConstIterator::operator += (-ofs); + return *this; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator --() +{ + MatConstIterator::operator --(); + return *this; +} + +template inline MatIterator_<_Tp> MatIterator_<_Tp>::operator --(int) +{ + MatIterator_ b = *this; + MatConstIterator::operator --(); + return b; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator ++() +{ + MatConstIterator::operator ++(); + return *this; +} + +template inline MatIterator_<_Tp> MatIterator_<_Tp>::operator ++(int) +{ + MatIterator_ b = *this; + MatConstIterator::operator ++(); + return b; +} + +template inline Point MatConstIterator_<_Tp>::pos() const +{ + if( !m ) + return Point(); + CV_DbgAssert( m->dims <= 2 ); + if( m->isContinuous() ) + { + ptrdiff_t ofs = (const _Tp*)ptr - (const _Tp*)m->data; + int y = (int)(ofs / m->cols), x = (int)(ofs - (ptrdiff_t)y*m->cols); + return Point(x, y); + } + else + { + ptrdiff_t ofs = (uchar*)ptr - m->data; + int y = (int)(ofs / m->step), x = (int)((ofs - y*m->step)/sizeof(_Tp)); + return Point(x, y); + } +} + +static inline bool +operator == (const MatConstIterator& a, const MatConstIterator& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatConstIterator& a, const MatConstIterator& b) +{ return !(a == b); } + +template static inline bool +operator == (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b) +{ return a.m != b.m || a.ptr != b.ptr; } + +template static inline bool +operator == (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b) +{ return a.m != b.m || a.ptr != b.ptr; } + +static inline bool +operator < (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr < b.ptr; } + +static inline bool +operator > (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr > b.ptr; } + +static inline bool +operator <= (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr <= b.ptr; } + +static inline bool +operator >= (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr >= b.ptr; } + +CV_EXPORTS ptrdiff_t operator - (const MatConstIterator& b, const MatConstIterator& a); + +static inline MatConstIterator operator + (const MatConstIterator& a, ptrdiff_t ofs) +{ MatConstIterator b = a; return b += ofs; } + +static inline MatConstIterator operator + (ptrdiff_t ofs, const MatConstIterator& a) +{ MatConstIterator b = a; return b += ofs; } + +static inline MatConstIterator operator - (const MatConstIterator& a, ptrdiff_t ofs) +{ MatConstIterator b = a; return b += -ofs; } + +template static inline MatConstIterator_<_Tp> +operator + (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; } + +template static inline MatConstIterator_<_Tp> +operator + (ptrdiff_t ofs, const MatConstIterator_<_Tp>& a) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; } + +template static inline MatConstIterator_<_Tp> +operator - (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatConstIterator_<_Tp>&)t; } + +inline uchar* MatConstIterator::operator [](ptrdiff_t i) const +{ return *(*this + i); } + +template inline _Tp MatConstIterator_<_Tp>::operator [](ptrdiff_t i) const +{ return *(_Tp*)MatConstIterator::operator [](i); } + +template static inline MatIterator_<_Tp> +operator + (const MatIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatIterator_<_Tp>&)t; } + +template static inline MatIterator_<_Tp> +operator + (ptrdiff_t ofs, const MatIterator_<_Tp>& a) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatIterator_<_Tp>&)t; } + +template static inline MatIterator_<_Tp> +operator - (const MatIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatIterator_<_Tp>&)t; } + +template inline _Tp& MatIterator_<_Tp>::operator [](ptrdiff_t i) const +{ return *(*this + i); } + +template inline MatConstIterator_<_Tp> Mat_<_Tp>::begin() const +{ return Mat::begin<_Tp>(); } + +template inline MatConstIterator_<_Tp> Mat_<_Tp>::end() const +{ return Mat::end<_Tp>(); } + +template inline MatIterator_<_Tp> Mat_<_Tp>::begin() +{ return Mat::begin<_Tp>(); } + +template inline MatIterator_<_Tp> Mat_<_Tp>::end() +{ return Mat::end<_Tp>(); } + +template inline MatCommaInitializer_<_Tp>::MatCommaInitializer_(Mat_<_Tp>* _m) : it(_m) {} + +template template inline MatCommaInitializer_<_Tp>& +MatCommaInitializer_<_Tp>::operator , (T2 v) +{ + CV_DbgAssert( this->it < ((const Mat_<_Tp>*)this->it.m)->end() ); + *this->it = _Tp(v); ++this->it; + return *this; +} + +template inline Mat_<_Tp> MatCommaInitializer_<_Tp>::operator *() const +{ + CV_DbgAssert( this->it == ((const Mat_<_Tp>*)this->it.m)->end() ); + return Mat_<_Tp>(*this->it.m); +} + +template inline MatCommaInitializer_<_Tp>::operator Mat_<_Tp>() const +{ + CV_DbgAssert( this->it == ((const Mat_<_Tp>*)this->it.m)->end() ); + return Mat_<_Tp>(*this->it.m); +} + +template static inline MatCommaInitializer_<_Tp> +operator << (const Mat_<_Tp>& m, T2 val) +{ + MatCommaInitializer_<_Tp> commaInitializer((Mat_<_Tp>*)&m); + return (commaInitializer, val); +} + +//////////////////////////////// SparseMat //////////////////////////////// + +inline SparseMat::SparseMat() +: flags(MAGIC_VAL), hdr(0) +{ +} + +inline SparseMat::SparseMat(int _dims, const int* _sizes, int _type) +: flags(MAGIC_VAL), hdr(0) +{ + create(_dims, _sizes, _type); +} + +inline SparseMat::SparseMat(const SparseMat& m) +: flags(m.flags), hdr(m.hdr) +{ + addref(); +} + +inline SparseMat::~SparseMat() +{ + release(); +} + +inline SparseMat& SparseMat::operator = (const SparseMat& m) +{ + if( this != &m ) + { + if( m.hdr ) + CV_XADD(&m.hdr->refcount, 1); + release(); + flags = m.flags; + hdr = m.hdr; + } + return *this; +} + +inline SparseMat& SparseMat::operator = (const Mat& m) +{ return (*this = SparseMat(m)); } + +inline SparseMat SparseMat::clone() const +{ + SparseMat temp; + this->copyTo(temp); + return temp; +} + + +inline void SparseMat::assignTo( SparseMat& m, int type ) const +{ + if( type < 0 ) + m = *this; + else + convertTo(m, type); +} + +inline void SparseMat::addref() +{ if( hdr ) CV_XADD(&hdr->refcount, 1); } + +inline void SparseMat::release() +{ + if( hdr && CV_XADD(&hdr->refcount, -1) == 1 ) + delete hdr; + hdr = 0; +} + +inline size_t SparseMat::elemSize() const +{ return CV_ELEM_SIZE(flags); } + +inline size_t SparseMat::elemSize1() const +{ return CV_ELEM_SIZE1(flags); } + +inline int SparseMat::type() const +{ return CV_MAT_TYPE(flags); } + +inline int SparseMat::depth() const +{ return CV_MAT_DEPTH(flags); } + +inline int SparseMat::channels() const +{ return CV_MAT_CN(flags); } + +inline const int* SparseMat::size() const +{ + return hdr ? hdr->size : 0; +} + +inline int SparseMat::size(int i) const +{ + if( hdr ) + { + CV_DbgAssert((unsigned)i < (unsigned)hdr->dims); + return hdr->size[i]; + } + return 0; +} + +inline int SparseMat::dims() const +{ + return hdr ? hdr->dims : 0; +} + +inline size_t SparseMat::nzcount() const +{ + return hdr ? hdr->nodeCount : 0; +} + +inline size_t SparseMat::hash(int i0) const +{ + return (size_t)i0; +} + +inline size_t SparseMat::hash(int i0, int i1) const +{ + return (size_t)(unsigned)i0*HASH_SCALE + (unsigned)i1; +} + +inline size_t SparseMat::hash(int i0, int i1, int i2) const +{ + return ((size_t)(unsigned)i0*HASH_SCALE + (unsigned)i1)*HASH_SCALE + (unsigned)i2; +} + +inline size_t SparseMat::hash(const int* idx) const +{ + size_t h = (unsigned)idx[0]; + if( !hdr ) + return 0; + int i, d = hdr->dims; + for( i = 1; i < d; i++ ) + h = h*HASH_SCALE + (unsigned)idx[i]; + return h; +} + +template inline _Tp& SparseMat::ref(int i0, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, true, hashval); } + +template inline _Tp& SparseMat::ref(int i0, int i1, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, true, hashval); } + +template inline _Tp& SparseMat::ref(int i0, int i1, int i2, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, i2, true, hashval); } + +template inline _Tp& SparseMat::ref(const int* idx, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(idx, true, hashval); } + +template inline _Tp SparseMat::value(int i0, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(int i0, int i1, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(int i0, int i1, int i2, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(const int* idx, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval); + return p ? *p : _Tp(); +} + +template inline const _Tp* SparseMat::find(int i0, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval); } + +template inline const _Tp* SparseMat::find(int i0, int i1, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval); } + +template inline const _Tp* SparseMat::find(int i0, int i1, int i2, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval); } + +template inline const _Tp* SparseMat::find(const int* idx, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval); } + +template inline _Tp& SparseMat::value(Node* n) +{ return *(_Tp*)((uchar*)n + hdr->valueOffset); } + +template inline const _Tp& SparseMat::value(const Node* n) const +{ return *(const _Tp*)((const uchar*)n + hdr->valueOffset); } + +inline SparseMat::Node* SparseMat::node(size_t nidx) +{ return (Node*)&hdr->pool[nidx]; } + +inline const SparseMat::Node* SparseMat::node(size_t nidx) const +{ return (const Node*)&hdr->pool[nidx]; } + +inline SparseMatIterator SparseMat::begin() +{ return SparseMatIterator(this); } + +inline SparseMatConstIterator SparseMat::begin() const +{ return SparseMatConstIterator(this); } + +inline SparseMatIterator SparseMat::end() +{ SparseMatIterator it(this); it.seekEnd(); return it; } + +inline SparseMatConstIterator SparseMat::end() const +{ SparseMatConstIterator it(this); it.seekEnd(); return it; } + +template inline SparseMatIterator_<_Tp> SparseMat::begin() +{ return SparseMatIterator_<_Tp>(this); } + +template inline SparseMatConstIterator_<_Tp> SparseMat::begin() const +{ return SparseMatConstIterator_<_Tp>(this); } + +template inline SparseMatIterator_<_Tp> SparseMat::end() +{ SparseMatIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline SparseMatConstIterator_<_Tp> SparseMat::end() const +{ SparseMatConstIterator_<_Tp> it(this); it.seekEnd(); return it; } + + +inline SparseMatConstIterator::SparseMatConstIterator() +: m(0), hashidx(0), ptr(0) +{ +} + +inline SparseMatConstIterator::SparseMatConstIterator(const SparseMatConstIterator& it) +: m(it.m), hashidx(it.hashidx), ptr(it.ptr) +{ +} + +static inline bool operator == (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2) +{ return it1.m == it2.m && it1.hashidx == it2.hashidx && it1.ptr == it2.ptr; } + +static inline bool operator != (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2) +{ return !(it1 == it2); } + + +inline SparseMatConstIterator& SparseMatConstIterator::operator = (const SparseMatConstIterator& it) +{ + if( this != &it ) + { + m = it.m; + hashidx = it.hashidx; + ptr = it.ptr; + } + return *this; +} + +template inline const _Tp& SparseMatConstIterator::value() const +{ return *(_Tp*)ptr; } + +inline const SparseMat::Node* SparseMatConstIterator::node() const +{ + return ptr && m && m->hdr ? + (const SparseMat::Node*)(ptr - m->hdr->valueOffset) : 0; +} + +inline SparseMatConstIterator SparseMatConstIterator::operator ++(int) +{ + SparseMatConstIterator it = *this; + ++*this; + return it; +} + + +inline void SparseMatConstIterator::seekEnd() +{ + if( m && m->hdr ) + { + hashidx = m->hdr->hashtab.size(); + ptr = 0; + } +} + +inline SparseMatIterator::SparseMatIterator() +{} + +inline SparseMatIterator::SparseMatIterator(SparseMat* _m) +: SparseMatConstIterator(_m) +{} + +inline SparseMatIterator::SparseMatIterator(const SparseMatIterator& it) +: SparseMatConstIterator(it) +{ +} + +inline SparseMatIterator& SparseMatIterator::operator = (const SparseMatIterator& it) +{ + (SparseMatConstIterator&)*this = it; + return *this; +} + +template inline _Tp& SparseMatIterator::value() const +{ return *(_Tp*)ptr; } + +inline SparseMat::Node* SparseMatIterator::node() const +{ + return (SparseMat::Node*)SparseMatConstIterator::node(); +} + +inline SparseMatIterator& SparseMatIterator::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +inline SparseMatIterator SparseMatIterator::operator ++(int) +{ + SparseMatIterator it = *this; + ++*this; + return it; +} + + +template inline SparseMat_<_Tp>::SparseMat_() +{ flags = MAGIC_VAL | DataType<_Tp>::type; } + +template inline SparseMat_<_Tp>::SparseMat_(int _dims, const int* _sizes) +: SparseMat(_dims, _sizes, DataType<_Tp>::type) +{} + +template inline SparseMat_<_Tp>::SparseMat_(const SparseMat& m) +{ + if( m.type() == DataType<_Tp>::type ) + *this = (const SparseMat_<_Tp>&)m; + else + m.convertTo(this, DataType<_Tp>::type); +} + +template inline SparseMat_<_Tp>::SparseMat_(const SparseMat_<_Tp>& m) +{ + this->flags = m.flags; + this->hdr = m.hdr; + if( this->hdr ) + CV_XADD(&this->hdr->refcount, 1); +} + +template inline SparseMat_<_Tp>::SparseMat_(const Mat& m) +{ + SparseMat sm(m); + *this = sm; +} + +template inline SparseMat_<_Tp>::SparseMat_(const CvSparseMat* m) +{ + SparseMat sm(m); + *this = sm; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const SparseMat_<_Tp>& m) +{ + if( this != &m ) + { + if( m.hdr ) CV_XADD(&m.hdr->refcount, 1); + release(); + flags = m.flags; + hdr = m.hdr; + } + return *this; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const SparseMat& m) +{ + if( m.type() == DataType<_Tp>::type ) + return (*this = (const SparseMat_<_Tp>&)m); + m.convertTo(*this, DataType<_Tp>::type); + return *this; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const Mat& m) +{ return (*this = SparseMat(m)); } + +template inline SparseMat_<_Tp> +SparseMat_<_Tp>::clone() const +{ + SparseMat_<_Tp> m; + this->copyTo(m); + return m; +} + +template inline void +SparseMat_<_Tp>::create(int _dims, const int* _sizes) +{ + SparseMat::create(_dims, _sizes, DataType<_Tp>::type); +} + +template inline +SparseMat_<_Tp>::operator CvSparseMat*() const +{ + return SparseMat::operator CvSparseMat*(); +} + +template inline int SparseMat_<_Tp>::type() const +{ return DataType<_Tp>::type; } + +template inline int SparseMat_<_Tp>::depth() const +{ return DataType<_Tp>::depth; } + +template inline int SparseMat_<_Tp>::channels() const +{ return DataType<_Tp>::channels; } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, int i1, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, i1, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, int i1, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, i1, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, int i1, int i2, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, i1, i2, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, int i1, int i2, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, i1, i2, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(const int* idx, size_t* hashval) +{ return SparseMat::ref<_Tp>(idx, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(const int* idx, size_t* hashval) const +{ return SparseMat::value<_Tp>(idx, hashval); } + +template inline SparseMatIterator_<_Tp> SparseMat_<_Tp>::begin() +{ return SparseMatIterator_<_Tp>(this); } + +template inline SparseMatConstIterator_<_Tp> SparseMat_<_Tp>::begin() const +{ return SparseMatConstIterator_<_Tp>(this); } + +template inline SparseMatIterator_<_Tp> SparseMat_<_Tp>::end() +{ SparseMatIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline SparseMatConstIterator_<_Tp> SparseMat_<_Tp>::end() const +{ SparseMatConstIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_() +{} + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMat_<_Tp>* _m) +: SparseMatConstIterator(_m) +{} + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMatConstIterator_<_Tp>& it) +: SparseMatConstIterator(it) +{} + +template inline SparseMatConstIterator_<_Tp>& +SparseMatConstIterator_<_Tp>::operator = (const SparseMatConstIterator_<_Tp>& it) +{ return ((SparseMatConstIterator&)*this = it); } + +template inline const _Tp& +SparseMatConstIterator_<_Tp>::operator *() const +{ return *(const _Tp*)this->ptr; } + +template inline SparseMatConstIterator_<_Tp>& +SparseMatConstIterator_<_Tp>::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +template inline SparseMatConstIterator_<_Tp> +SparseMatConstIterator_<_Tp>::operator ++(int) +{ + SparseMatConstIterator it = *this; + SparseMatConstIterator::operator ++(); + return it; +} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_() +{} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_(SparseMat_<_Tp>* _m) +: SparseMatConstIterator_<_Tp>(_m) +{} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_(const SparseMatIterator_<_Tp>& it) +: SparseMatConstIterator_<_Tp>(it) +{} + +template inline SparseMatIterator_<_Tp>& +SparseMatIterator_<_Tp>::operator = (const SparseMatIterator_<_Tp>& it) +{ return ((SparseMatIterator&)*this = it); } + +template inline _Tp& +SparseMatIterator_<_Tp>::operator *() const +{ return *(_Tp*)this->ptr; } + +template inline SparseMatIterator_<_Tp>& +SparseMatIterator_<_Tp>::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +template inline SparseMatIterator_<_Tp> +SparseMatIterator_<_Tp>::operator ++(int) +{ + SparseMatIterator it = *this; + SparseMatConstIterator::operator ++(); + return it; +} + +} + +#endif +#endif diff --git a/include/opencv2/core/operations.hpp b/include/opencv2/core/operations.hpp new file mode 100644 index 0000000..c3af7b2 --- /dev/null +++ b/include/opencv2/core/operations.hpp @@ -0,0 +1,3628 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_OPERATIONS_HPP__ +#define __OPENCV_CORE_OPERATIONS_HPP__ + +#ifndef SKIP_INCLUDES + #include + #include +#endif // SKIP_INCLUDES + +#ifdef __cplusplus + +/////// exchange-add operation for atomic operations on reference counters /////// +#ifdef __GNUC_Lior_ + + #if __GNUC__*10 + __GNUC_MINOR__ >= 42 + + #if !defined WIN32 && (defined __i486__ || defined __i586__ || \ + defined __i686__ || defined __MMX__ || defined __SSE__ || defined __ppc__) + #define CV_XADD __sync_fetch_and_add + #else + #include + #define CV_XADD __gnu_cxx::__exchange_and_add + #endif + + #else + #include + #if __GNUC__*10 + __GNUC_MINOR__ >= 34 + #define CV_XADD __gnu_cxx::__exchange_and_add + #else + #define CV_XADD __exchange_and_add + #endif + #endif + +#elif defined WIN32 || defined _WIN32 + #include + #define CV_XADD(addr,delta) _InterlockedExchangeAdd((long volatile*)(addr), (delta)) +#else + + template static inline _Tp CV_XADD(_Tp* addr, _Tp delta) + { int tmp = *addr; *addr += delta; return tmp; } + +#endif + +#include + +namespace cv +{ + +using std::cos; +using std::sin; +using std::max; +using std::min; +using std::exp; +using std::log; +using std::pow; +using std::sqrt; + + +/////////////// saturate_cast (used in image & signal processing) /////////////////// + +template static inline _Tp saturate_cast(uchar v) { return _Tp(v); } +template static inline _Tp saturate_cast(schar v) { return _Tp(v); } +template static inline _Tp saturate_cast(ushort v) { return _Tp(v); } +template static inline _Tp saturate_cast(short v) { return _Tp(v); } +template static inline _Tp saturate_cast(unsigned v) { return _Tp(v); } +template static inline _Tp saturate_cast(int v) { return _Tp(v); } +template static inline _Tp saturate_cast(float v) { return _Tp(v); } +template static inline _Tp saturate_cast(double v) { return _Tp(v); } + +template<> inline uchar saturate_cast(schar v) +{ return (uchar)std::max((int)v, 0); } +template<> inline uchar saturate_cast(ushort v) +{ return (uchar)std::min((unsigned)v, (unsigned)UCHAR_MAX); } +template<> inline uchar saturate_cast(int v) +{ return (uchar)((unsigned)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); } +template<> inline uchar saturate_cast(short v) +{ return saturate_cast((int)v); } +template<> inline uchar saturate_cast(unsigned v) +{ return (uchar)std::min(v, (unsigned)UCHAR_MAX); } +template<> inline uchar saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline uchar saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline schar saturate_cast(uchar v) +{ return (schar)std::min((int)v, SCHAR_MAX); } +template<> inline schar saturate_cast(ushort v) +{ return (schar)std::min((unsigned)v, (unsigned)SCHAR_MAX); } +template<> inline schar saturate_cast(int v) +{ + return (schar)((unsigned)(v-SCHAR_MIN) <= (unsigned)UCHAR_MAX ? + v : v > 0 ? SCHAR_MAX : SCHAR_MIN); +} +template<> inline schar saturate_cast(short v) +{ return saturate_cast((int)v); } +template<> inline schar saturate_cast(unsigned v) +{ return (schar)std::min(v, (unsigned)SCHAR_MAX); } + +template<> inline schar saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline schar saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline ushort saturate_cast(schar v) +{ return (ushort)std::max((int)v, 0); } +template<> inline ushort saturate_cast(short v) +{ return (ushort)std::max((int)v, 0); } +template<> inline ushort saturate_cast(int v) +{ return (ushort)((unsigned)v <= (unsigned)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); } +template<> inline ushort saturate_cast(unsigned v) +{ return (ushort)std::min(v, (unsigned)USHRT_MAX); } +template<> inline ushort saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline ushort saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline short saturate_cast(ushort v) +{ return (short)std::min((int)v, SHRT_MAX); } +template<> inline short saturate_cast(int v) +{ + return (short)((unsigned)(v - SHRT_MIN) <= (unsigned)USHRT_MAX ? + v : v > 0 ? SHRT_MAX : SHRT_MIN); +} +template<> inline short saturate_cast(unsigned v) +{ return (short)std::min(v, (unsigned)SHRT_MAX); } +template<> inline short saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline short saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline int saturate_cast(float v) { return cvRound(v); } +template<> inline int saturate_cast(double v) { return cvRound(v); } + +// we intentionally do not clip negative numbers, to make -1 become 0xffffffff etc. +template<> inline unsigned saturate_cast(float v){ return cvRound(v); } +template<> inline unsigned saturate_cast(double v) { return cvRound(v); } + + +//////////////////////////////// Matx ///////////////////////////////// + + +template inline Matx<_Tp, m, n>::Matx() +{ + for(int i = 0; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0) +{ + val[0] = v0; + for(int i = 1; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1) +{ + assert(channels >= 2); + val[0] = v0; val[1] = v1; + for(int i = 2; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2) +{ + assert(channels >= 3); + val[0] = v0; val[1] = v1; val[2] = v2; + for(int i = 3; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3) +{ + assert(channels >= 4); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + for(int i = 4; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4) +{ + assert(channels >= 5); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[4] = v4; + for(int i = 5; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5) +{ + assert(channels >= 6); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; + for(int i = 6; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6) +{ + assert(channels >= 7); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; + for(int i = 7; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7) +{ + assert(channels >= 8); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + for(int i = 8; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8) +{ + assert(channels >= 9); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; + for(int i = 9; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9) +{ + assert(channels >= 10); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; + for(int i = 10; i < channels; i++) val[i] = _Tp(0); +} + + +template +inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11) +{ + assert(channels == 12); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11; +} + +template +inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11, + _Tp v12, _Tp v13, _Tp v14, _Tp v15) +{ + assert(channels == 16); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11; + val[12] = v12; val[13] = v13; val[14] = v14; val[15] = v15; +} + +template inline Matx<_Tp, m, n>::Matx(const _Tp* values) +{ + for( int i = 0; i < channels; i++ ) val[i] = values[i]; +} + +template inline Matx<_Tp, m, n> Matx<_Tp, m, n>::all(_Tp alpha) +{ + Matx<_Tp, m, n> M; + for( int i = 0; i < m*n; i++ ) M.val[i] = alpha; + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::zeros() +{ + return all(0); +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::ones() +{ + return all(1); +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::eye() +{ + Matx<_Tp,m,n> M; + for(int i = 0; i < MIN(m,n); i++) + M(i,i) = 1; + return M; +} + +template inline _Tp Matx<_Tp, m, n>::dot(const Matx<_Tp, m, n>& M) const +{ + _Tp s = 0; + for( int i = 0; i < m*n; i++ ) s += val[i]*M.val[i]; + return s; +} + + +template inline double Matx<_Tp, m, n>::ddot(const Matx<_Tp, m, n>& M) const +{ + double s = 0; + for( int i = 0; i < m*n; i++ ) s += (double)val[i]*M.val[i]; + return s; +} + + + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::diag(const Matx<_Tp,MIN(m,n),1>& d) +{ + Matx<_Tp,m,n> M; + for(int i = 0; i < MIN(m,n); i++) + M(i,i) = d(i, 0); + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::randu(_Tp a, _Tp b) +{ + Matx<_Tp,m,n> M; + Mat matM(M, false); + cv::randu(matM, Scalar(a), Scalar(b)); + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::randn(_Tp a, _Tp b) +{ + Matx<_Tp,m,n> M; + Mat matM(M, false); + cv::randn(matM, Scalar(a), Scalar(b)); + return M; +} + +template template +inline Matx<_Tp, m, n>::operator Matx() const +{ + Matx M; + for( int i = 0; i < m*n; i++ ) M.val[i] = saturate_cast(val[i]); + return M; +} + + +template template inline +Matx<_Tp, m1, n1> Matx<_Tp, m, n>::reshape() const +{ + CV_DbgAssert(m1*n1 == m*n); + return (const Matx<_Tp, m1, n1>&)*this; +} + + +template +template inline +Matx<_Tp, m1, n1> Matx<_Tp, m, n>::get_minor(int i, int j) const +{ + CV_DbgAssert(0 <= i && i+m1 <= m && 0 <= j && j+n1 <= n); + Matx<_Tp, m1, n1> s; + for( int di = 0; di < m1; di++ ) + for( int dj = 0; dj < n1; dj++ ) + s(di, dj) = (*this)(i+di, j+dj); + return s; +} + + +template inline +Matx<_Tp, 1, n> Matx<_Tp, m, n>::row(int i) const +{ + CV_DbgAssert((unsigned)i < (unsigned)m); + return Matx<_Tp, 1, n>(&val[i*n]); +} + + +template inline +Matx<_Tp, m, 1> Matx<_Tp, m, n>::col(int j) const +{ + CV_DbgAssert((unsigned)j < (unsigned)n); + Matx<_Tp, m, 1> v; + for( int i = 0; i < m; i++ ) + v[i] = val[i*n + j]; + return v; +} + + +template inline +Matx<_Tp, MIN(m,n), 1> Matx<_Tp, m, n>::diag() const +{ + diag_type d; + for( int i = 0; i < MIN(m, n); i++ ) + d.val[i] = val[i*n + i]; + return d; +} + + +template inline +const _Tp& Matx<_Tp, m, n>::operator ()(int i, int j) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n ); + return this->val[i*n + j]; +} + + +template inline +_Tp& Matx<_Tp, m, n>::operator ()(int i, int j) +{ + CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n ); + return val[i*n + j]; +} + + +template inline +const _Tp& Matx<_Tp, m, n>::operator ()(int i) const +{ + CV_DbgAssert( (m == 1 || n == 1) && (unsigned)i < (unsigned)(m+n-1) ); + return val[i]; +} + + +template inline +_Tp& Matx<_Tp, m, n>::operator ()(int i) +{ + CV_DbgAssert( (m == 1 || n == 1) && (unsigned)i < (unsigned)(m+n-1) ); + return val[i]; +} + + +template static inline +Matx<_Tp1, m, n>& operator += (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]); + return a; +} + + +template static inline +Matx<_Tp1, m, n>& operator -= (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]); + return a; +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] + b.val[i]); +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] - b.val[i]); +} + + +template template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] * alpha); +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] * b.val[i]); +} + + +template template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp) +{ + for( int i = 0; i < m; i++ ) + for( int j = 0; j < n; j++ ) + { + _Tp s = 0; + for( int k = 0; k < l; k++ ) + s += a(i, k) * b(k, j); + val[i*n + j] = s; + } +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, n, m>& a, Matx_TOp) +{ + for( int i = 0; i < m; i++ ) + for( int j = 0; j < n; j++ ) + val[i*n + j] = a(j, i); +} + + +template static inline +Matx<_Tp, m, n> operator + (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_AddOp()); +} + + +template static inline +Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_SubOp()); +} + + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, int alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, float alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, double alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, int alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, float alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, double alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (int alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (float alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (double alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, -1, Matx_ScaleOp()); +} + + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_MatMulOp()); +} + + +template static inline +Point_<_Tp> operator * (const Matx<_Tp, 2, 2>& a, const Point_<_Tp>& b) +{ + return Point_<_Tp>(a*Vec<_Tp,2>(b.x,b.y)); +} + + +template static inline +Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point3_<_Tp>& b) +{ + return Point3_<_Tp>(a*Vec<_Tp,3>(b.x,b.y,b.z)); +} + + +template static inline +Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point_<_Tp>& b) +{ + return Point3_<_Tp>(a*Vec<_Tp,3>(b.x, b.y, 1)); +} + + +template static inline +Matx<_Tp, 4, 1> operator * (const Matx<_Tp, 4, 4>& a, const Point3_<_Tp>& b) +{ + return a*Matx<_Tp, 4, 1>(b.x, b.y, b.z, 1); +} + + +template static inline +Scalar operator * (const Matx<_Tp, 4, 4>& a, const Scalar& b) +{ + return Scalar(a*Matx<_Tp, 4, 1>(b[0],b[1],b[2],b[3])); +} + + +template inline +Matx<_Tp, m, n> Matx<_Tp, m, n>::mul(const Matx<_Tp, m, n>& a) const +{ + return Matx<_Tp, m, n>(*this, a, Matx_MulOp()); +} + + +CV_EXPORTS int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n); +CV_EXPORTS int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n); +CV_EXPORTS bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n); +CV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n); + + +template struct CV_EXPORTS Matx_DetOp +{ + double operator ()(const Matx<_Tp, m, m>& a) const + { + Matx<_Tp, m, m> temp = a; + double p = LU(temp.val, m, m, 0, 0, 0); + if( p == 0 ) + return p; + for( int i = 0; i < m; i++ ) + p *= temp(i, i); + return p; + } +}; + + +template struct CV_EXPORTS Matx_DetOp<_Tp, 1> +{ + double operator ()(const Matx<_Tp, 1, 1>& a) const + { + return a(0,0); + } +}; + + +template struct CV_EXPORTS Matx_DetOp<_Tp, 2> +{ + double operator ()(const Matx<_Tp, 2, 2>& a) const + { + return a(0,0)*a(1,1) - a(0,1)*a(1,0); + } +}; + + +template struct CV_EXPORTS Matx_DetOp<_Tp, 3> +{ + double operator ()(const Matx<_Tp, 3, 3>& a) const + { + return a(0,0)*(a(1,1)*a(2,2) - a(2,1)*a(1,2)) - + a(0,1)*(a(1,0)*a(2,2) - a(2,0)*a(1,2)) + + a(0,2)*(a(1,0)*a(2,1) - a(2,0)*a(1,1)); + } +}; + +template static inline +double determinant(const Matx<_Tp, m, m>& a) +{ + return Matx_DetOp<_Tp, m>()(a); +} + + +template static inline +double trace(const Matx<_Tp, m, n>& a) +{ + _Tp s = 0; + for( int i = 0; i < std::min(m, n); i++ ) + s += a(i,i); + return s; +} + + +template inline +Matx<_Tp, n, m> Matx<_Tp, m, n>::t() const +{ + return Matx<_Tp, n, m>(*this, Matx_TOp()); +} + + +template struct CV_EXPORTS Matx_FastInvOp +{ + bool operator()(const Matx<_Tp, m, m>& a, Matx<_Tp, m, m>& b, int method) const + { + Matx<_Tp, m, m> temp = a; + + // assume that b is all 0's on input => make it a unity matrix + for( int i = 0; i < m; i++ ) + b(i, i) = (_Tp)1; + + if( method == DECOMP_CHOLESKY ) + return Cholesky(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m); + + return LU(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m) != 0; + } +}; + + +template struct CV_EXPORTS Matx_FastInvOp<_Tp, 2> +{ + bool operator()(const Matx<_Tp, 2, 2>& a, Matx<_Tp, 2, 2>& b, int) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + b(1,1) = a(0,0)*d; + b(0,0) = a(1,1)*d; + b(0,1) = -a(0,1)*d; + b(1,0) = -a(1,0)*d; + return true; + } +}; + + +template struct CV_EXPORTS Matx_FastInvOp<_Tp, 3> +{ + bool operator()(const Matx<_Tp, 3, 3>& a, Matx<_Tp, 3, 3>& b, int) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + b(0,0) = (a(1,1) * a(2,2) - a(1,2) * a(2,1)) * d; + b(0,1) = (a(0,2) * a(2,1) - a(0,1) * a(2,2)) * d; + b(0,2) = (a(0,1) * a(1,2) - a(0,2) * a(1,1)) * d; + + b(1,0) = (a(1,2) * a(2,0) - a(1,0) * a(2,2)) * d; + b(1,1) = (a(0,0) * a(2,2) - a(0,2) * a(2,0)) * d; + b(1,2) = (a(0,2) * a(1,0) - a(0,0) * a(1,2)) * d; + + b(2,0) = (a(1,0) * a(2,1) - a(1,1) * a(2,0)) * d; + b(2,1) = (a(0,1) * a(2,0) - a(0,0) * a(2,1)) * d; + b(2,2) = (a(0,0) * a(1,1) - a(0,1) * a(1,0)) * d; + return true; + } +}; + + +template inline +Matx<_Tp, n, m> Matx<_Tp, m, n>::inv(int method) const +{ + Matx<_Tp, n, m> b; + bool ok; + if( method == DECOMP_LU || method == DECOMP_CHOLESKY ) + ok = Matx_FastInvOp<_Tp, m>()(*this, b, method); + else + { + Mat A(*this, false), B(b, false); + ok = invert(A, B, method); + } + return ok ? b : Matx<_Tp, n, m>::zeros(); +} + + +template struct CV_EXPORTS Matx_FastSolveOp +{ + bool operator()(const Matx<_Tp, m, m>& a, const Matx<_Tp, m, n>& b, + Matx<_Tp, m, n>& x, int method) const + { + Matx<_Tp, m, m> temp = a; + x = b; + if( method == DECOMP_CHOLESKY ) + return Cholesky(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n); + + return LU(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n) != 0; + } +}; + + +template struct CV_EXPORTS Matx_FastSolveOp<_Tp, 2, 1> +{ + bool operator()(const Matx<_Tp, 2, 2>& a, const Matx<_Tp, 2, 1>& b, + Matx<_Tp, 2, 1>& x, int method) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + x(0) = (b(0)*a(1,1) - b(1)*a(0,1))*d; + x(1) = (b(1)*a(0,0) - b(0)*a(1,0))*d; + return true; + } +}; + + +template struct CV_EXPORTS Matx_FastSolveOp<_Tp, 3, 1> +{ + bool operator()(const Matx<_Tp, 3, 3>& a, const Matx<_Tp, 3, 1>& b, + Matx<_Tp, 3, 1>& x, int method) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + x(0) = d*(b(0)*(a(1,1)*a(2,2) - a(1,2)*a(2,1)) - + a(0,1)*(b(1)*a(2,2) - a(1,2)*b(2)) + + a(0,2)*(b(1)*a(2,1) - a(1,1)*b(2))); + + x(1) = d*(a(0,0)*(b(1)*a(2,2) - a(1,2)*b(2)) - + b(0)*(a(1,0)*a(2,2) - a(1,2)*a(2,0)) + + a(0,2)*(a(1,0)*b(2) - b(1)*a(2,0))); + + x(2) = d*(a(0,0)*(a(1,1)*b(2) - b(1)*a(2,1)) - + a(0,1)*(a(1,0)*b(2) - b(1)*a(2,0)) + + b(0)*(a(1,0)*a(2,1) - a(1,1)*a(2,0))); + return true; + } +}; + + +template template inline +Matx<_Tp, n, l> Matx<_Tp, m, n>::solve(const Matx<_Tp, m, l>& rhs, int method) const +{ + Matx<_Tp, n, l> x; + bool ok; + if( method == DECOMP_LU || method == DECOMP_CHOLESKY ) + ok = Matx_FastSolveOp<_Tp, m, l>()(*this, rhs, x, method); + else + { + Mat A(*this, false), B(rhs, false), X(x, false); + ok = cv::solve(A, B, X, method); + } + + return ok ? x : Matx<_Tp, n, l>::zeros(); +} + + +template static inline +double norm(const Matx<_Tp, m, n>& M) +{ + double s = 0; + for( int i = 0; i < m*n; i++ ) + s += (double)M.val[i]*M.val[i]; + return std::sqrt(s); +} + + +template static inline +double norm(const Matx<_Tp, m, n>& M, int normType) +{ + if( normType == NORM_INF ) + { + _Tp s = 0; + for( int i = 0; i < m*n; i++ ) + s = std::max(s, std::abs(M.val[i])); + return s; + } + + if( normType == NORM_L1 ) + { + _Tp s = 0; + for( int i = 0; i < m*n; i++ ) + s += std::abs(M.val[i]); + return s; + } + + CV_DbgAssert( normType == NORM_L2 ); + return norm(M); +} + + +template static inline +bool operator == (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + if( a.val[i] != b.val[i] ) return false; + return true; +} + +template static inline +bool operator != (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return !(a == b); +} + + +template static inline +MatxCommaInitializer<_Tp, m, n> operator << (const Matx<_Tp, m, n>& mtx, _T2 val) +{ + MatxCommaInitializer<_Tp, m, n> commaInitializer((Matx<_Tp, m, n>*)&mtx); + return (commaInitializer, val); +} + +template inline +MatxCommaInitializer<_Tp, m, n>::MatxCommaInitializer(Matx<_Tp, m, n>* _mtx) + : dst(_mtx), idx(0) +{} + +template template inline +MatxCommaInitializer<_Tp, m, n>& MatxCommaInitializer<_Tp, m, n>::operator , (_T2 value) +{ + CV_DbgAssert( idx < m*n ); + dst->val[idx++] = saturate_cast<_Tp>(value); + return *this; +} + +template inline +Matx<_Tp, m, n> MatxCommaInitializer<_Tp, m, n>::operator *() const +{ + CV_DbgAssert( idx == n*m ); + return *dst; +} + +/////////////////////////// short vector (Vec) ///////////////////////////// + +template inline Vec<_Tp, cn>::Vec() +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0) + : Matx<_Tp, cn, 1>(v0) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1) + : Matx<_Tp, cn, 1>(v0, v1) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2) + : Matx<_Tp, cn, 1>(v0, v1, v2) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9) +{} + +template inline Vec<_Tp, cn>::Vec(const _Tp* values) + : Matx<_Tp, cn, 1>(values) +{} + + +template inline Vec<_Tp, cn>::Vec(const Vec<_Tp, cn>& m) + : Matx<_Tp, cn, 1>(m.val) +{} + +template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp op) +: Matx<_Tp, cn, 1>(a, b, op) +{} + +template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp op) +: Matx<_Tp, cn, 1>(a, b, op) +{} + +template template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp op) +: Matx<_Tp, cn, 1>(a, alpha, op) +{} + +template inline +Vec<_Tp, cn>& Vec<_Tp, cn>::operator = (const Matx<_Tp, cn, 1>& m) +{ + for( int i = 0; i < cn; i++ ) + this->val[i] = m.val[i]; + return *this; +} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::all(_Tp alpha) +{ + Vec v; + for( int i = 0; i < cn; i++ ) v.val[i] = alpha; + return v; +} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::mul(const Vec<_Tp, cn>& v) const +{ + Vec<_Tp, cn> w; + for( int i = 0; i < cn; i++ ) w.val[i] = saturate_cast<_Tp>(this->val[i]*v.val[i]); + return w; +} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::cross(const Vec<_Tp, cn>& v) const +{ + CV_Error(CV_StsError, "for arbitrary-size vector there is no cross-product defined"); + return Vec<_Tp, cn>(); +} + +template template +inline Vec<_Tp, cn>::operator Vec() const +{ + Vec v; + for( int i = 0; i < cn; i++ ) v.val[i] = saturate_cast(this->val[i]); + return v; +} + +template inline Vec<_Tp, cn>::operator CvScalar() const +{ + CvScalar s = {{0,0,0,0}}; + int i; + for( i = 0; i < std::min(cn, 4); i++ ) s.val[i] = this->val[i]; + for( ; i < 4; i++ ) s.val[i] = 0; + return s; +} + +template inline const _Tp& Vec<_Tp, cn>::operator [](int i) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline _Tp& Vec<_Tp, cn>::operator [](int i) +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline const _Tp& Vec<_Tp, cn>::operator ()(int i) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline _Tp& Vec<_Tp, cn>::operator ()(int i) +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template static inline Vec<_Tp1, cn>& +operator += (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b) +{ + for( int i = 0; i < cn; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]); + return a; +} + +template static inline Vec<_Tp1, cn>& +operator -= (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b) +{ + for( int i = 0; i < cn; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]); + return a; +} + +template static inline Vec<_Tp, cn> +operator + (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b) +{ + return Vec<_Tp, cn>(a, b, Matx_AddOp()); +} + +template static inline Vec<_Tp, cn> +operator - (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b) +{ + return Vec<_Tp, cn>(a, b, Matx_SubOp()); +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, int alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, float alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, double alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, int alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (int alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, float alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (float alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, double alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (double alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator - (const Vec<_Tp, cn>& a) +{ + Vec<_Tp,cn> t; + for( int i = 0; i < cn; i++ ) t.val[i] = saturate_cast<_Tp>(-a.val[i]); + return t; +} + +template<> inline Vec Vec::cross(const Vec& v) const +{ + return Vec(val[1]*v.val[2] - val[2]*v.val[1], + val[2]*v.val[0] - val[0]*v.val[2], + val[0]*v.val[1] - val[1]*v.val[0]); +} + +template<> inline Vec Vec::cross(const Vec& v) const +{ + return Vec(val[1]*v.val[2] - val[2]*v.val[1], + val[2]*v.val[0] - val[0]*v.val[2], + val[0]*v.val[1] - val[1]*v.val[0]); +} + +template static inline +Vec& operator += (Vec& a, const Vec& b) +{ + a[0] = saturate_cast(a[0] + b[0]); + a[1] = saturate_cast(a[1] + b[1]); + return a; +} + +template static inline +Vec& operator += (Vec& a, const Vec& b) +{ + a[0] = saturate_cast(a[0] + b[0]); + a[1] = saturate_cast(a[1] + b[1]); + a[2] = saturate_cast(a[2] + b[2]); + return a; +} + + +template static inline +Vec& operator += (Vec& a, const Vec& b) +{ + a[0] = saturate_cast(a[0] + b[0]); + a[1] = saturate_cast(a[1] + b[1]); + a[2] = saturate_cast(a[2] + b[2]); + a[3] = saturate_cast(a[3] + b[3]); + return a; +} + + +template static inline +VecCommaInitializer<_Tp, cn> operator << (const Vec<_Tp, cn>& vec, _T2 val) +{ + VecCommaInitializer<_Tp, cn> commaInitializer((Vec<_Tp, cn>*)&vec); + return (commaInitializer, val); +} + +template inline +VecCommaInitializer<_Tp, cn>::VecCommaInitializer(Vec<_Tp, cn>* _vec) + : MatxCommaInitializer<_Tp, cn, 1>(_vec) +{} + +template template inline +VecCommaInitializer<_Tp, cn>& VecCommaInitializer<_Tp, cn>::operator , (_T2 value) +{ + CV_DbgAssert( this->idx < cn ); + this->dst->val[this->idx++] = saturate_cast<_Tp>(value); + return *this; +} + +template inline +Vec<_Tp, cn> VecCommaInitializer<_Tp, cn>::operator *() const +{ + CV_DbgAssert( this->idx == cn ); + return *this->dst; +} + +//////////////////////////////// Complex ////////////////////////////// + +template inline Complex<_Tp>::Complex() : re(0), im(0) {} +template inline Complex<_Tp>::Complex( _Tp _re, _Tp _im ) : re(_re), im(_im) {} +template template inline Complex<_Tp>::operator Complex() const +{ return Complex(saturate_cast(re), saturate_cast(im)); } +template inline Complex<_Tp> Complex<_Tp>::conj() const +{ return Complex<_Tp>(re, -im); } + +template static inline +bool operator == (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return a.re == b.re && a.im == b.im; } + +template static inline +bool operator != (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return a.re != b.re || a.im != b.im; } + +template static inline +Complex<_Tp> operator + (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re + b.re, a.im + b.im ); } + +template static inline +Complex<_Tp>& operator += (Complex<_Tp>& a, const Complex<_Tp>& b) +{ a.re += b.re; a.im += b.im; return a; } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re - b.re, a.im - b.im ); } + +template static inline +Complex<_Tp>& operator -= (Complex<_Tp>& a, const Complex<_Tp>& b) +{ a.re -= b.re; a.im -= b.im; return a; } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a) +{ return Complex<_Tp>(-a.re, -a.im); } + +template static inline +Complex<_Tp> operator * (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re*b.re - a.im*b.im, a.re*b.im + a.im*b.re ); } + +template static inline +Complex<_Tp> operator * (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re*b, a.im*b ); } + +template static inline +Complex<_Tp> operator * (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( a.re*b, a.im*b ); } + +template static inline +Complex<_Tp> operator + (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re + b, a.im ); } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re - b, a.im ); } + +template static inline +Complex<_Tp> operator + (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( a.re + b, a.im ); } + +template static inline +Complex<_Tp> operator - (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( b - a.re, -a.im ); } + +template static inline +Complex<_Tp>& operator += (Complex<_Tp>& a, _Tp b) +{ a.re += b; return a; } + +template static inline +Complex<_Tp>& operator -= (Complex<_Tp>& a, _Tp b) +{ a.re -= b; return a; } + +template static inline +Complex<_Tp>& operator *= (Complex<_Tp>& a, _Tp b) +{ a.re *= b; a.im *= b; return a; } + +template static inline +double abs(const Complex<_Tp>& a) +{ return std::sqrt( (double)a.re*a.re + (double)a.im*a.im); } + +template static inline +Complex<_Tp> operator / (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ + double t = 1./((double)b.re*b.re + (double)b.im*b.im); + return Complex<_Tp>( (_Tp)((a.re*b.re + a.im*b.im)*t), + (_Tp)((-a.re*b.im + a.im*b.re)*t) ); +} + +template static inline +Complex<_Tp>& operator /= (Complex<_Tp>& a, const Complex<_Tp>& b) +{ + return (a = a / b); +} + +template static inline +Complex<_Tp> operator / (const Complex<_Tp>& a, _Tp b) +{ + _Tp t = (_Tp)1/b; + return Complex<_Tp>( a.re*t, a.im*t ); +} + +template static inline +Complex<_Tp> operator / (_Tp b, const Complex<_Tp>& a) +{ + return Complex<_Tp>(b)/a; +} + +template static inline +Complex<_Tp> operator /= (const Complex<_Tp>& a, _Tp b) +{ + _Tp t = (_Tp)1/b; + a.re *= t; a.im *= t; return a; +} + +//////////////////////////////// 2D Point //////////////////////////////// + +template inline Point_<_Tp>::Point_() : x(0), y(0) {} +template inline Point_<_Tp>::Point_(_Tp _x, _Tp _y) : x(_x), y(_y) {} +template inline Point_<_Tp>::Point_(const Point_& pt) : x(pt.x), y(pt.y) {} +template inline Point_<_Tp>::Point_(const CvPoint& pt) : x((_Tp)pt.x), y((_Tp)pt.y) {} +template inline Point_<_Tp>::Point_(const CvPoint2D32f& pt) + : x(saturate_cast<_Tp>(pt.x)), y(saturate_cast<_Tp>(pt.y)) {} +template inline Point_<_Tp>::Point_(const Size_<_Tp>& sz) : x(sz.width), y(sz.height) {} +template inline Point_<_Tp>::Point_(const Vec<_Tp,2>& v) : x(v[0]), y(v[1]) {} +template inline Point_<_Tp>& Point_<_Tp>::operator = (const Point_& pt) +{ x = pt.x; y = pt.y; return *this; } + +template template inline Point_<_Tp>::operator Point_<_Tp2>() const +{ return Point_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y)); } +template inline Point_<_Tp>::operator CvPoint() const +{ return cvPoint(saturate_cast(x), saturate_cast(y)); } +template inline Point_<_Tp>::operator CvPoint2D32f() const +{ return cvPoint2D32f((float)x, (float)y); } +template inline Point_<_Tp>::operator Vec<_Tp, 2>() const +{ return Vec<_Tp, 2>(x, y); } + +template inline _Tp Point_<_Tp>::dot(const Point_& pt) const +{ return saturate_cast<_Tp>(x*pt.x + y*pt.y); } +template inline double Point_<_Tp>::ddot(const Point_& pt) const +{ return (double)x*pt.x + (double)y*pt.y; } + +template static inline Point_<_Tp>& +operator += (Point_<_Tp>& a, const Point_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x + b.x); + a.y = saturate_cast<_Tp>(a.y + b.y); + return a; +} + +template static inline Point_<_Tp>& +operator -= (Point_<_Tp>& a, const Point_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x - b.x); + a.y = saturate_cast<_Tp>(a.y - b.y); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, int b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, float b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, double b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline double norm(const Point_<_Tp>& pt) +{ return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y); } + +template static inline bool operator == (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return a.x == b.x && a.y == b.y; } + +template static inline bool operator != (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return a.x != b.x || a.y != b.y; } + +template static inline Point_<_Tp> operator + (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x + b.x), saturate_cast<_Tp>(a.y + b.y) ); } + +template static inline Point_<_Tp> operator - (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x - b.x), saturate_cast<_Tp>(a.y - b.y) ); } + +template static inline Point_<_Tp> operator - (const Point_<_Tp>& a) +{ return Point_<_Tp>( saturate_cast<_Tp>(-a.x), saturate_cast<_Tp>(-a.y) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, int b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (int a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, float b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (float a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, double b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (double a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +//////////////////////////////// 3D Point //////////////////////////////// + +template inline Point3_<_Tp>::Point3_() : x(0), y(0), z(0) {} +template inline Point3_<_Tp>::Point3_(_Tp _x, _Tp _y, _Tp _z) : x(_x), y(_y), z(_z) {} +template inline Point3_<_Tp>::Point3_(const Point3_& pt) : x(pt.x), y(pt.y), z(pt.z) {} +template inline Point3_<_Tp>::Point3_(const Point_<_Tp>& pt) : x(pt.x), y(pt.y), z(_Tp()) {} +template inline Point3_<_Tp>::Point3_(const CvPoint3D32f& pt) : + x(saturate_cast<_Tp>(pt.x)), y(saturate_cast<_Tp>(pt.y)), z(saturate_cast<_Tp>(pt.z)) {} +template inline Point3_<_Tp>::Point3_(const Vec<_Tp, 3>& v) : x(v[0]), y(v[1]), z(v[2]) {} + +template template inline Point3_<_Tp>::operator Point3_<_Tp2>() const +{ return Point3_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), saturate_cast<_Tp2>(z)); } + +template inline Point3_<_Tp>::operator CvPoint3D32f() const +{ return cvPoint3D32f((float)x, (float)y, (float)z); } + +template inline Point3_<_Tp>::operator Vec<_Tp, 3>() const +{ return Vec<_Tp, 3>(x, y, z); } + +template inline Point3_<_Tp>& Point3_<_Tp>::operator = (const Point3_& pt) +{ x = pt.x; y = pt.y; z = pt.z; return *this; } + +template inline _Tp Point3_<_Tp>::dot(const Point3_& pt) const +{ return saturate_cast<_Tp>(x*pt.x + y*pt.y + z*pt.z); } +template inline double Point3_<_Tp>::ddot(const Point3_& pt) const +{ return (double)x*pt.x + (double)y*pt.y + (double)z*pt.z; } + +template inline Point3_<_Tp> Point3_<_Tp>::cross(const Point3_<_Tp>& pt) const +{ + return Point3_<_Tp>(y*pt.z - z*pt.y, z*pt.x - x*pt.z, x*pt.y - y*pt.x); +} + +template static inline Point3_<_Tp>& +operator += (Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x + b.x); + a.y = saturate_cast<_Tp>(a.y + b.y); + a.z = saturate_cast<_Tp>(a.z + b.z); + return a; +} + +template static inline Point3_<_Tp>& +operator -= (Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x - b.x); + a.y = saturate_cast<_Tp>(a.y - b.y); + a.z = saturate_cast<_Tp>(a.z - b.z); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, int b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, float b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, double b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline double norm(const Point3_<_Tp>& pt) +{ return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y + (double)pt.z*pt.z); } + +template static inline bool operator == (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return a.x == b.x && a.y == b.y && a.z == b.z; } + +template static inline bool operator != (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return a.x != b.x || a.y != b.y || a.z != b.z; } + +template static inline Point3_<_Tp> operator + (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x + b.x), + saturate_cast<_Tp>(a.y + b.y), + saturate_cast<_Tp>(a.z + b.z)); } + +template static inline Point3_<_Tp> operator - (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x - b.x), + saturate_cast<_Tp>(a.y - b.y), + saturate_cast<_Tp>(a.z - b.z)); } + +template static inline Point3_<_Tp> operator - (const Point3_<_Tp>& a) +{ return Point3_<_Tp>( saturate_cast<_Tp>(-a.x), + saturate_cast<_Tp>(-a.y), + saturate_cast<_Tp>(-a.z) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, int b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (int a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, float b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (float a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, double b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (double a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +//////////////////////////////// Size //////////////////////////////// + +template inline Size_<_Tp>::Size_() + : width(0), height(0) {} +template inline Size_<_Tp>::Size_(_Tp _width, _Tp _height) + : width(_width), height(_height) {} +template inline Size_<_Tp>::Size_(const Size_& sz) + : width(sz.width), height(sz.height) {} +template inline Size_<_Tp>::Size_(const CvSize& sz) + : width(saturate_cast<_Tp>(sz.width)), height(saturate_cast<_Tp>(sz.height)) {} +template inline Size_<_Tp>::Size_(const CvSize2D32f& sz) + : width(saturate_cast<_Tp>(sz.width)), height(saturate_cast<_Tp>(sz.height)) {} +template inline Size_<_Tp>::Size_(const Point_<_Tp>& pt) : width(pt.x), height(pt.y) {} + +template template inline Size_<_Tp>::operator Size_<_Tp2>() const +{ return Size_<_Tp2>(saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); } +template inline Size_<_Tp>::operator CvSize() const +{ return cvSize(saturate_cast(width), saturate_cast(height)); } +template inline Size_<_Tp>::operator CvSize2D32f() const +{ return cvSize2D32f((float)width, (float)height); } + +template inline Size_<_Tp>& Size_<_Tp>::operator = (const Size_<_Tp>& sz) +{ width = sz.width; height = sz.height; return *this; } +template static inline Size_<_Tp> operator * (const Size_<_Tp>& a, _Tp b) +{ return Size_<_Tp>(a.width * b, a.height * b); } +template static inline Size_<_Tp> operator + (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return Size_<_Tp>(a.width + b.width, a.height + b.height); } +template static inline Size_<_Tp> operator - (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return Size_<_Tp>(a.width - b.width, a.height - b.height); } +template inline _Tp Size_<_Tp>::area() const { return width*height; } + +template static inline Size_<_Tp>& operator += (Size_<_Tp>& a, const Size_<_Tp>& b) +{ a.width += b.width; a.height += b.height; return a; } +template static inline Size_<_Tp>& operator -= (Size_<_Tp>& a, const Size_<_Tp>& b) +{ a.width -= b.width; a.height -= b.height; return a; } + +template static inline bool operator == (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return a.width == b.width && a.height == b.height; } +template static inline bool operator != (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return a.width != b.width || a.height != b.height; } + +//////////////////////////////// Rect //////////////////////////////// + + +template inline Rect_<_Tp>::Rect_() : x(0), y(0), width(0), height(0) {} +template inline Rect_<_Tp>::Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height) : x(_x), y(_y), width(_width), height(_height) {} +template inline Rect_<_Tp>::Rect_(const Rect_<_Tp>& r) : x(r.x), y(r.y), width(r.width), height(r.height) {} +template inline Rect_<_Tp>::Rect_(const CvRect& r) : x((_Tp)r.x), y((_Tp)r.y), width((_Tp)r.width), height((_Tp)r.height) {} +template inline Rect_<_Tp>::Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz) : + x(org.x), y(org.y), width(sz.width), height(sz.height) {} +template inline Rect_<_Tp>::Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2) +{ + x = std::min(pt1.x, pt2.x); y = std::min(pt1.y, pt2.y); + width = std::max(pt1.x, pt2.x) - x; height = std::max(pt1.y, pt2.y) - y; +} +template inline Rect_<_Tp>& Rect_<_Tp>::operator = ( const Rect_<_Tp>& r ) +{ x = r.x; y = r.y; width = r.width; height = r.height; return *this; } + +template inline Point_<_Tp> Rect_<_Tp>::tl() const { return Point_<_Tp>(x,y); } +template inline Point_<_Tp> Rect_<_Tp>::br() const { return Point_<_Tp>(x+width, y+height); } + +template static inline Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Point_<_Tp>& b ) +{ a.x += b.x; a.y += b.y; return a; } +template static inline Rect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Point_<_Tp>& b ) +{ a.x -= b.x; a.y -= b.y; return a; } + +template static inline Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Size_<_Tp>& b ) +{ a.width += b.width; a.height += b.height; return a; } + +template static inline Rect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Size_<_Tp>& b ) +{ a.width -= b.width; a.height -= b.height; return a; } + +template static inline Rect_<_Tp>& operator &= ( Rect_<_Tp>& a, const Rect_<_Tp>& b ) +{ + _Tp x1 = std::max(a.x, b.x), y1 = std::max(a.y, b.y); + a.width = std::min(a.x + a.width, b.x + b.width) - x1; + a.height = std::min(a.y + a.height, b.y + b.height) - y1; + a.x = x1; a.y = y1; + if( a.width <= 0 || a.height <= 0 ) + a = Rect(); + return a; +} + +template static inline Rect_<_Tp>& operator |= ( Rect_<_Tp>& a, const Rect_<_Tp>& b ) +{ + _Tp x1 = std::min(a.x, b.x), y1 = std::min(a.y, b.y); + a.width = std::max(a.x + a.width, b.x + b.width) - x1; + a.height = std::max(a.y + a.height, b.y + b.height) - y1; + a.x = x1; a.y = y1; + return a; +} + +template inline Size_<_Tp> Rect_<_Tp>::size() const { return Size_<_Tp>(width, height); } +template inline _Tp Rect_<_Tp>::area() const { return width*height; } + +template template inline Rect_<_Tp>::operator Rect_<_Tp2>() const +{ return Rect_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), + saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); } +template inline Rect_<_Tp>::operator CvRect() const +{ return cvRect(saturate_cast(x), saturate_cast(y), + saturate_cast(width), saturate_cast(height)); } + +template inline bool Rect_<_Tp>::contains(const Point_<_Tp>& pt) const +{ return x <= pt.x && pt.x < x + width && y <= pt.y && pt.y < y + height; } + +template static inline bool operator == (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + return a.x == b.x && a.y == b.y && a.width == b.width && a.height == b.height; +} + +template static inline bool operator != (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + return a.x != b.x || a.y != b.y || a.width != b.width || a.height != b.height; +} + +template static inline Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Point_<_Tp>& b) +{ + return Rect_<_Tp>( a.x + b.x, a.y + b.y, a.width, a.height ); +} + +template static inline Rect_<_Tp> operator - (const Rect_<_Tp>& a, const Point_<_Tp>& b) +{ + return Rect_<_Tp>( a.x - b.x, a.y - b.y, a.width, a.height ); +} + +template static inline Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Size_<_Tp>& b) +{ + return Rect_<_Tp>( a.x, a.y, a.width + b.width, a.height + b.height ); +} + +template static inline Rect_<_Tp> operator & (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + Rect_<_Tp> c = a; + return c &= b; +} + +template static inline Rect_<_Tp> operator | (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + Rect_<_Tp> c = a; + return c |= b; +} + +template inline bool Point_<_Tp>::inside( const Rect_<_Tp>& r ) const +{ + return r.contains(*this); +} + +inline RotatedRect::RotatedRect() { angle = 0; } +inline RotatedRect::RotatedRect(const Point2f& _center, const Size2f& _size, float _angle) + : center(_center), size(_size), angle(_angle) {} +inline RotatedRect::RotatedRect(const CvBox2D& box) + : center(box.center), size(box.size), angle(box.angle) {} +inline RotatedRect::operator CvBox2D() const +{ + CvBox2D box; box.center = center; box.size = size; box.angle = angle; + return box; +} + +//////////////////////////////// Scalar_ /////////////////////////////// + +template inline Scalar_<_Tp>::Scalar_() +{ this->val[0] = this->val[1] = this->val[2] = this->val[3] = 0; } + +template inline Scalar_<_Tp>::Scalar_(_Tp v0, _Tp v1, _Tp v2, _Tp v3) +{ this->val[0] = v0; this->val[1] = v1; this->val[2] = v2; this->val[3] = v3; } + +template inline Scalar_<_Tp>::Scalar_(const CvScalar& s) +{ + this->val[0] = saturate_cast<_Tp>(s.val[0]); + this->val[1] = saturate_cast<_Tp>(s.val[1]); + this->val[2] = saturate_cast<_Tp>(s.val[2]); + this->val[3] = saturate_cast<_Tp>(s.val[3]); +} + +template inline Scalar_<_Tp>::Scalar_(_Tp v0) +{ this->val[0] = v0; this->val[1] = this->val[2] = this->val[3] = 0; } + +template inline Scalar_<_Tp> Scalar_<_Tp>::all(_Tp v0) +{ return Scalar_<_Tp>(v0, v0, v0, v0); } +template inline Scalar_<_Tp>::operator CvScalar() const +{ return cvScalar(this->val[0], this->val[1], this->val[2], this->val[3]); } + +template template inline Scalar_<_Tp>::operator Scalar_() const +{ + return Scalar_(saturate_cast(this->val[0]), + saturate_cast(this->val[1]), + saturate_cast(this->val[2]), + saturate_cast(this->val[3])); +} + +template static inline Scalar_<_Tp>& operator += (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] + b.val[0]); + a.val[1] = saturate_cast<_Tp>(a.val[1] + b.val[1]); + a.val[2] = saturate_cast<_Tp>(a.val[2] + b.val[2]); + a.val[3] = saturate_cast<_Tp>(a.val[3] + b.val[3]); + return a; +} + +template static inline Scalar_<_Tp>& operator -= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] - b.val[0]); + a.val[1] = saturate_cast<_Tp>(a.val[1] - b.val[1]); + a.val[2] = saturate_cast<_Tp>(a.val[2] - b.val[2]); + a.val[3] = saturate_cast<_Tp>(a.val[3] - b.val[3]); + return a; +} + +template static inline Scalar_<_Tp>& operator *= ( Scalar_<_Tp>& a, _Tp v ) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] * v); + a.val[1] = saturate_cast<_Tp>(a.val[1] * v); + a.val[2] = saturate_cast<_Tp>(a.val[2] * v); + a.val[3] = saturate_cast<_Tp>(a.val[3] * v); + return a; +} + +template inline Scalar_<_Tp> Scalar_<_Tp>::mul(const Scalar_<_Tp>& t, double scale ) const +{ + return Scalar_<_Tp>( saturate_cast<_Tp>(this->val[0]*t.val[0]*scale), + saturate_cast<_Tp>(this->val[1]*t.val[1]*scale), + saturate_cast<_Tp>(this->val[2]*t.val[2]*scale), + saturate_cast<_Tp>(this->val[3]*t.val[3]*scale)); +} + +template static inline bool operator == ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b ) +{ + return a.val[0] == b.val[0] && a.val[1] == b.val[1] && + a.val[2] == b.val[2] && a.val[3] == b.val[3]; +} + +template static inline bool operator != ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b ) +{ + return a.val[0] != b.val[0] || a.val[1] != b.val[1] || + a.val[2] != b.val[2] || a.val[3] != b.val[3]; +} + +template static inline Scalar_<_Tp> operator + (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] + b.val[0]), + saturate_cast<_Tp>(a.val[1] + b.val[1]), + saturate_cast<_Tp>(a.val[2] + b.val[2]), + saturate_cast<_Tp>(a.val[3] + b.val[3])); +} + +template static inline Scalar_<_Tp> operator - (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] - b.val[0]), + saturate_cast<_Tp>(a.val[1] - b.val[1]), + saturate_cast<_Tp>(a.val[2] - b.val[2]), + saturate_cast<_Tp>(a.val[3] - b.val[3])); +} + +template static inline Scalar_<_Tp> operator * (const Scalar_<_Tp>& a, _Tp alpha) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] * alpha), + saturate_cast<_Tp>(a.val[1] * alpha), + saturate_cast<_Tp>(a.val[2] * alpha), + saturate_cast<_Tp>(a.val[3] * alpha)); +} + +template static inline Scalar_<_Tp> operator * (_Tp alpha, const Scalar_<_Tp>& a) +{ + return a*alpha; +} + +template static inline Scalar_<_Tp> operator - (const Scalar_<_Tp>& a) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(-a.val[0]), saturate_cast<_Tp>(-a.val[1]), + saturate_cast<_Tp>(-a.val[2]), saturate_cast<_Tp>(-a.val[3])); +} + + +template static inline Scalar_<_Tp> +operator * (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a[0]*b[0] - a[1]*b[1] - a[2]*b[2] - a[3]*b[3]), + saturate_cast<_Tp>(a[0]*b[1] + a[1]*b[0] + a[2]*b[3] - a[3]*b[2]), + saturate_cast<_Tp>(a[0]*b[2] - a[1]*b[3] + a[2]*b[0] - a[3]*b[1]), + saturate_cast<_Tp>(a[0]*b[3] + a[1]*b[2] - a[2]*b[1] - a[3]*b[0])); +} + +template static inline Scalar_<_Tp>& +operator *= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a = a*b; + return a; +} + +template inline Scalar_<_Tp> Scalar_<_Tp>::conj() const +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(this->val[0]), + saturate_cast<_Tp>(-this->val[1]), + saturate_cast<_Tp>(-this->val[2]), + saturate_cast<_Tp>(-this->val[3])); +} + +template inline bool Scalar_<_Tp>::isReal() const +{ + return this->val[1] == 0 && this->val[2] == 0 && this->val[3] == 0; +} + +template static inline +Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, _Tp alpha) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] / alpha), + saturate_cast<_Tp>(a.val[1] / alpha), + saturate_cast<_Tp>(a.val[2] / alpha), + saturate_cast<_Tp>(a.val[3] / alpha)); +} + +template static inline +Scalar_ operator / (const Scalar_& a, float alpha) +{ + float s = 1/alpha; + return Scalar_(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s); +} + +template static inline +Scalar_ operator / (const Scalar_& a, double alpha) +{ + double s = 1/alpha; + return Scalar_(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s); +} + +template static inline +Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, _Tp alpha) +{ + a = a/alpha; + return a; +} + +template static inline +Scalar_<_Tp> operator / (_Tp a, const Scalar_<_Tp>& b) +{ + _Tp s = a/(b[0]*b[0] + b[1]*b[1] + b[2]*b[2] + b[3]*b[3]); + return b.conj()*s; +} + +template static inline +Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return a*((_Tp)1/b); +} + +template static inline +Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a = a/b; + return a; +} + +//////////////////////////////// Range ///////////////////////////////// + +inline Range::Range() : start(0), end(0) {} +inline Range::Range(int _start, int _end) : start(_start), end(_end) {} +inline Range::Range(const CvSlice& slice) : start(slice.start_index), end(slice.end_index) +{ + if( start == 0 && end == CV_WHOLE_SEQ_END_INDEX ) + *this = Range::all(); +} + +inline int Range::size() const { return end - start; } +inline bool Range::empty() const { return start == end; } +inline Range Range::all() { return Range(INT_MIN, INT_MAX); } + +static inline bool operator == (const Range& r1, const Range& r2) +{ return r1.start == r2.start && r1.end == r2.end; } + +static inline bool operator != (const Range& r1, const Range& r2) +{ return !(r1 == r2); } + +static inline bool operator !(const Range& r) +{ return r.start == r.end; } + +static inline Range operator & (const Range& r1, const Range& r2) +{ + Range r(std::max(r1.start, r2.start), std::min(r1.end, r2.end)); + r.end = std::max(r.end, r.start); + return r; +} + +static inline Range& operator &= (Range& r1, const Range& r2) +{ + r1 = r1 & r2; + return r1; +} + +static inline Range operator + (const Range& r1, int delta) +{ + return Range(r1.start + delta, r1.end + delta); +} + +static inline Range operator + (int delta, const Range& r1) +{ + return Range(r1.start + delta, r1.end + delta); +} + +static inline Range operator - (const Range& r1, int delta) +{ + return r1 + (-delta); +} + +inline Range::operator CvSlice() const +{ return *this != Range::all() ? cvSlice(start, end) : CV_WHOLE_SEQ; } + + + +//////////////////////////////// Vector //////////////////////////////// + +// template vector class. It is similar to STL's vector, +// with a few important differences: +// 1) it can be created on top of user-allocated data w/o copying it +// 2) vector b = a means copying the header, +// not the underlying data (use clone() to make a deep copy) +template class CV_EXPORTS Vector +{ +public: + typedef _Tp value_type; + typedef _Tp* iterator; + typedef const _Tp* const_iterator; + typedef _Tp& reference; + typedef const _Tp& const_reference; + + struct CV_EXPORTS Hdr + { + Hdr() : data(0), datastart(0), refcount(0), size(0), capacity(0) {}; + _Tp* data; + _Tp* datastart; + int* refcount; + size_t size; + size_t capacity; + }; + + Vector() {} + Vector(size_t _size) { resize(_size); } + Vector(size_t _size, const _Tp& val) + { + resize(_size); + for(size_t i = 0; i < _size; i++) + hdr.data[i] = val; + } + Vector(_Tp* _data, size_t _size, bool _copyData=false) + { set(_data, _size, _copyData); } + + template Vector(const Vec<_Tp, n>& vec) + { set((_Tp*)&vec.val[0], n, true); } + + Vector(const std::vector<_Tp>& vec, bool _copyData=false) + { set((_Tp*)&vec[0], vec.size(), _copyData); } + + Vector(const Vector& d) { *this = d; } + + Vector(const Vector& d, const Range& r_) + { + Range r = r_ == Range::all() ? Range(0, d.size()) : r_; + /*if( r == Range::all() ) + r = Range(0, d.size());*/ + if( r.size() > 0 && r.start >= 0 && r.end <= d.size() ) + { + if( d.hdr.refcount ) + CV_XADD(d.hdr.refcount, 1); + hdr.refcount = d.hdr.refcount; + hdr.datastart = d.hdr.datastart; + hdr.data = d.hdr.data + r.start; + hdr.capacity = hdr.size = r.size(); + } + } + + Vector<_Tp>& operator = (const Vector& d) + { + if( this != &d ) + { + if( d.hdr.refcount ) + CV_XADD(d.hdr.refcount, 1); + release(); + hdr = d.hdr; + } + return *this; + } + + ~Vector() { release(); } + + Vector<_Tp> clone() const + { return hdr.data ? Vector<_Tp>(hdr.data, hdr.size, true) : Vector<_Tp>(); } + + void copyTo(Vector<_Tp>& vec) const + { + size_t i, sz = size(); + vec.resize(sz); + const _Tp* src = hdr.data; + _Tp* dst = vec.hdr.data; + for( i = 0; i < sz; i++ ) + dst[i] = src[i]; + } + + void copyTo(std::vector<_Tp>& vec) const + { + size_t i, sz = size(); + vec.resize(sz); + const _Tp* src = hdr.data; + _Tp* dst = sz ? &vec[0] : 0; + for( i = 0; i < sz; i++ ) + dst[i] = src[i]; + } + + operator CvMat() const + { return cvMat((int)size(), 1, type(), (void*)hdr.data); } + + _Tp& operator [] (size_t i) { CV_DbgAssert( i < size() ); return hdr.data[i]; } + const _Tp& operator [] (size_t i) const { CV_DbgAssert( i < size() ); return hdr.data[i]; } + Vector operator() (const Range& r) const { return Vector(*this, r); } + _Tp& back() { CV_DbgAssert(!empty()); return hdr.data[hdr.size-1]; } + const _Tp& back() const { CV_DbgAssert(!empty()); return hdr.data[hdr.size-1]; } + _Tp& front() { CV_DbgAssert(!empty()); return hdr.data[0]; } + const _Tp& front() const { CV_DbgAssert(!empty()); return hdr.data[0]; } + + _Tp* begin() { return hdr.data; } + _Tp* end() { return hdr.data + hdr.size; } + const _Tp* begin() const { return hdr.data; } + const _Tp* end() const { return hdr.data + hdr.size; } + + void addref() { if( hdr.refcount ) CV_XADD(hdr.refcount, 1); } + void release() + { + if( hdr.refcount && CV_XADD(hdr.refcount, -1) == 1 ) + { + delete[] hdr.datastart; + delete hdr.refcount; + } + hdr = Hdr(); + } + + void set(_Tp* _data, size_t _size, bool _copyData=false) + { + if( !_copyData ) + { + release(); + hdr.data = hdr.datastart = _data; + hdr.size = hdr.capacity = _size; + hdr.refcount = 0; + } + else + { + reserve(_size); + for( size_t i = 0; i < _size; i++ ) + hdr.data[i] = _data[i]; + hdr.size = _size; + } + } + + void reserve(size_t newCapacity) + { + _Tp* newData; + int* newRefcount; + size_t i, oldSize = hdr.size; + if( (!hdr.refcount || *hdr.refcount == 1) && hdr.capacity >= newCapacity ) + return; + newCapacity = std::max(newCapacity, oldSize); + newData = new _Tp[newCapacity]; + newRefcount = new int(1); + for( i = 0; i < oldSize; i++ ) + newData[i] = hdr.data[i]; + release(); + hdr.data = hdr.datastart = newData; + hdr.capacity = newCapacity; + hdr.size = oldSize; + hdr.refcount = newRefcount; + } + + void resize(size_t newSize) + { + size_t i; + newSize = std::max(newSize, (size_t)0); + if( (!hdr.refcount || *hdr.refcount == 1) && hdr.size == newSize ) + return; + if( newSize > hdr.capacity ) + reserve(std::max(newSize, std::max((size_t)4, hdr.capacity*2))); + for( i = hdr.size; i < newSize; i++ ) + hdr.data[i] = _Tp(); + hdr.size = newSize; + } + + Vector<_Tp>& push_back(const _Tp& elem) + { + if( hdr.size == hdr.capacity ) + reserve( std::max((size_t)4, hdr.capacity*2) ); + hdr.data[hdr.size++] = elem; + return *this; + } + + Vector<_Tp>& pop_back() + { + if( hdr.size > 0 ) + --hdr.size; + return *this; + } + + size_t size() const { return hdr.size; } + size_t capacity() const { return hdr.capacity; } + bool empty() const { return hdr.size == 0; } + void clear() { resize(0); } + int type() const { return DataType<_Tp>::type; } + +protected: + Hdr hdr; +}; + + +template inline typename DataType<_Tp>::work_type +dot(const Vector<_Tp>& v1, const Vector<_Tp>& v2) +{ + typedef typename DataType<_Tp>::work_type _Tw; + size_t i, n = v1.size(); + assert(v1.size() == v2.size()); + + _Tw s = 0; + const _Tp *ptr1 = &v1[0], *ptr2 = &v2[0]; + for( i = 0; i <= n - 4; i += 4 ) + s += (_Tw)ptr1[i]*ptr2[i] + (_Tw)ptr1[i+1]*ptr2[i+1] + + (_Tw)ptr1[i+2]*ptr2[i+2] + (_Tw)ptr1[i+3]*ptr2[i+3]; + for( ; i < n; i++ ) + s += (_Tw)ptr1[i]*ptr2[i]; + return s; +} + +// Multiply-with-Carry RNG +inline RNG::RNG() { state = 0xffffffff; } +inline RNG::RNG(uint64 _state) { state = _state ? _state : 0xffffffff; } +inline unsigned RNG::next() +{ + state = (uint64)(unsigned)state*CV_RNG_COEFF + (unsigned)(state >> 32); + return (unsigned)state; +} + +inline RNG::operator uchar() { return (uchar)next(); } +inline RNG::operator schar() { return (schar)next(); } +inline RNG::operator ushort() { return (ushort)next(); } +inline RNG::operator short() { return (short)next(); } +inline RNG::operator unsigned() { return next(); } +inline unsigned RNG::operator ()(unsigned N) {return (unsigned)uniform(0,N);} +inline unsigned RNG::operator ()() {return next();} +inline RNG::operator int() { return (int)next(); } +// * (2^32-1)^-1 +inline RNG::operator float() { return next()*2.3283064365386962890625e-10f; } +inline RNG::operator double() +{ + unsigned t = next(); + return (((uint64)t << 32) | next())*5.4210108624275221700372640043497e-20; +} +inline int RNG::uniform(int a, int b) { return a == b ? a : next()%(b - a) + a; } +inline float RNG::uniform(float a, float b) { return ((float)*this)*(b - a) + a; } +inline double RNG::uniform(double a, double b) { return ((double)*this)*(b - a) + a; } + +inline TermCriteria::TermCriteria() : type(0), maxCount(0), epsilon(0) {} +inline TermCriteria::TermCriteria(int _type, int _maxCount, double _epsilon) + : type(_type), maxCount(_maxCount), epsilon(_epsilon) {} +inline TermCriteria::TermCriteria(const CvTermCriteria& criteria) + : type(criteria.type), maxCount(criteria.max_iter), epsilon(criteria.epsilon) {} +inline TermCriteria::operator CvTermCriteria() const +{ return cvTermCriteria(type, maxCount, epsilon); } + +inline uchar* LineIterator::operator *() { return ptr; } +inline LineIterator& LineIterator::operator ++() +{ + int mask = err < 0 ? -1 : 0; + err += minusDelta + (plusDelta & mask); + ptr += minusStep + (plusStep & mask); + return *this; +} +inline LineIterator LineIterator::operator ++(int) +{ + LineIterator it = *this; + ++(*this); + return it; +} +inline Point LineIterator::pos() const +{ + Point p; + p.y = (int)((ptr - ptr0)/step); + p.x = (int)(((ptr - ptr0) - p.y*step)/elemSize); + return p; +} + +/////////////////////////////// AutoBuffer //////////////////////////////////////// + +template inline AutoBuffer<_Tp, fixed_size>::AutoBuffer() +{ + ptr = buf; + size = fixed_size; +} + +template inline AutoBuffer<_Tp, fixed_size>::AutoBuffer(size_t _size) +{ + ptr = buf; + size = fixed_size; + allocate(_size); +} + +template inline AutoBuffer<_Tp, fixed_size>::~AutoBuffer() +{ deallocate(); } + +template inline void AutoBuffer<_Tp, fixed_size>::allocate(size_t _size) +{ + if(_size <= size) + return; + deallocate(); + if(_size > fixed_size) + { + ptr = cv::allocate<_Tp>(_size); + size = _size; + } +} + +template inline void AutoBuffer<_Tp, fixed_size>::deallocate() +{ + if( ptr != buf ) + { + cv::deallocate<_Tp>(ptr, size); + ptr = buf; + size = fixed_size; + } +} + +template inline AutoBuffer<_Tp, fixed_size>::operator _Tp* () +{ return ptr; } + +template inline AutoBuffer<_Tp, fixed_size>::operator const _Tp* () const +{ return ptr; } + + +/////////////////////////////////// Ptr //////////////////////////////////////// + +template inline Ptr<_Tp>::Ptr() : obj(0), refcount(0) {} +template inline Ptr<_Tp>::Ptr(_Tp* _obj) : obj(_obj) +{ + if(obj) + { + refcount = (int*)fastMalloc(sizeof(*refcount)); + *refcount = 1; + } + else + refcount = 0; +} + +template inline void Ptr<_Tp>::addref() +{ if( refcount ) CV_XADD(refcount, 1); } + +template inline void Ptr<_Tp>::release() +{ + if( refcount && CV_XADD(refcount, -1) == 1 ) + { + delete_obj(); + fastFree(refcount); + } + refcount = 0; + obj = 0; +} + +template inline void Ptr<_Tp>::delete_obj() +{ + if( obj ) delete obj; +} + +template inline Ptr<_Tp>::~Ptr() { release(); } + +template inline Ptr<_Tp>::Ptr(const Ptr<_Tp>& ptr) +{ + obj = ptr.obj; + refcount = ptr.refcount; + addref(); +} + +template inline Ptr<_Tp>& Ptr<_Tp>::operator = (const Ptr<_Tp>& ptr) +{ + int* _refcount = ptr.refcount; + if( _refcount ) + CV_XADD(_refcount, 1); + release(); + obj = ptr.obj; + refcount = _refcount; + return *this; +} + +template inline _Tp* Ptr<_Tp>::operator -> () { return obj; } +template inline const _Tp* Ptr<_Tp>::operator -> () const { return obj; } + +template inline Ptr<_Tp>::operator _Tp* () { return obj; } +template inline Ptr<_Tp>::operator const _Tp*() const { return obj; } + +template inline bool Ptr<_Tp>::empty() const { return obj == 0; } + +//// specializied implementations of Ptr::delete_obj() for classic OpenCV types + +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); + +//////////////////////////////////////// XML & YAML I/O //////////////////////////////////// + +CV_EXPORTS_W void write( FileStorage& fs, const string& name, int value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, float value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, double value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, const string& value ); + +template inline void write(FileStorage& fs, const _Tp& value) +{ write(fs, string(), value); } + +CV_EXPORTS void writeScalar( FileStorage& fs, int value ); +CV_EXPORTS void writeScalar( FileStorage& fs, float value ); +CV_EXPORTS void writeScalar( FileStorage& fs, double value ); +CV_EXPORTS void writeScalar( FileStorage& fs, const string& value ); + +template<> inline void write( FileStorage& fs, const int& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const float& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const double& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const string& value ) +{ + writeScalar(fs, value); +} + +template inline void write(FileStorage& fs, const Point_<_Tp>& pt ) +{ + write(fs, pt.x); + write(fs, pt.y); +} + +template inline void write(FileStorage& fs, const Point3_<_Tp>& pt ) +{ + write(fs, pt.x); + write(fs, pt.y); + write(fs, pt.z); +} + +template inline void write(FileStorage& fs, const Size_<_Tp>& sz ) +{ + write(fs, sz.width); + write(fs, sz.height); +} + +template inline void write(FileStorage& fs, const Complex<_Tp>& c ) +{ + write(fs, c.re); + write(fs, c.im); +} + +template inline void write(FileStorage& fs, const Rect_<_Tp>& r ) +{ + write(fs, r.x); + write(fs, r.y); + write(fs, r.width); + write(fs, r.height); +} + +template inline void write(FileStorage& fs, const Vec<_Tp, cn>& v ) +{ + for(int i = 0; i < cn; i++) + write(fs, v.val[i]); +} + +template inline void write(FileStorage& fs, const Scalar_<_Tp>& s ) +{ + write(fs, s.val[0]); + write(fs, s.val[1]); + write(fs, s.val[2]); + write(fs, s.val[3]); +} + +inline void write(FileStorage& fs, const Range& r ) +{ + write(fs, r.start); + write(fs, r.end); +} + +class CV_EXPORTS WriteStructContext +{ +public: + WriteStructContext(FileStorage& _fs, const string& name, + int flags, const string& typeName=string()); + ~WriteStructContext(); + FileStorage* fs; +}; + +template inline void write(FileStorage& fs, const string& name, const Point_<_Tp>& pt ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, pt.x); + write(fs, pt.y); +} + +template inline void write(FileStorage& fs, const string& name, const Point3_<_Tp>& pt ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, pt.x); + write(fs, pt.y); + write(fs, pt.z); +} + +template inline void write(FileStorage& fs, const string& name, const Size_<_Tp>& sz ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, sz.width); + write(fs, sz.height); +} + +template inline void write(FileStorage& fs, const string& name, const Complex<_Tp>& c ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, c.re); + write(fs, c.im); +} + +template inline void write(FileStorage& fs, const string& name, const Rect_<_Tp>& r ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, r.x); + write(fs, r.y); + write(fs, r.width); + write(fs, r.height); +} + +template inline void write(FileStorage& fs, const string& name, const Vec<_Tp, cn>& v ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + for(int i = 0; i < cn; i++) + write(fs, v.val[i]); +} + +template inline void write(FileStorage& fs, const string& name, const Scalar_<_Tp>& s ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, s.val[0]); + write(fs, s.val[1]); + write(fs, s.val[2]); + write(fs, s.val[3]); +} + +inline void write(FileStorage& fs, const string& name, const Range& r ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, r.start); + write(fs, r.end); +} + +template class CV_EXPORTS VecWriterProxy +{ +public: + VecWriterProxy( FileStorage* _fs ) : fs(_fs) {} + void operator()(const vector<_Tp>& vec) const + { + size_t i, count = vec.size(); + for( i = 0; i < count; i++ ) + write( *fs, vec[i] ); + } + FileStorage* fs; +}; + +template class CV_EXPORTS VecWriterProxy<_Tp,1> +{ +public: + VecWriterProxy( FileStorage* _fs ) : fs(_fs) {} + void operator()(const vector<_Tp>& vec) const + { + int _fmt = DataType<_Tp>::fmt; + char fmt[] = { (char)((_fmt>>8)+'1'), (char)_fmt, '\0' }; + fs->writeRaw( string(fmt), (uchar*)&vec[0], vec.size()*sizeof(_Tp) ); + } + FileStorage* fs; +}; + + +template static inline void write( FileStorage& fs, const vector<_Tp>& vec ) +{ + VecWriterProxy<_Tp, DataType<_Tp>::fmt != 0> w(&fs); + w(vec); +} + +template static inline FileStorage& +operator << ( FileStorage& fs, const vector<_Tp>& vec ) +{ + VecWriterProxy<_Tp, DataType<_Tp>::generic_type == 0> w(&fs); + w(vec); + return fs; +} + +CV_EXPORTS_W void write( FileStorage& fs, const string& name, const Mat& value ); +CV_EXPORTS void write( FileStorage& fs, const string& name, const SparseMat& value ); + +template static inline FileStorage& operator << (FileStorage& fs, const _Tp& value) +{ + if( !fs.isOpened() ) + return fs; + if( fs.state == FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP ) + CV_Error( CV_StsError, "No element name has been given" ); + write( fs, fs.elname, value ); + if( fs.state & FileStorage::INSIDE_MAP ) + fs.state = FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP; + return fs; +} + +CV_EXPORTS FileStorage& operator << (FileStorage& fs, const string& str); + +static inline FileStorage& operator << (FileStorage& fs, const char* str) +{ return (fs << string(str)); } + +inline FileNode::FileNode() : fs(0), node(0) {} +inline FileNode::FileNode(const CvFileStorage* _fs, const CvFileNode* _node) + : fs(_fs), node(_node) {} + +inline FileNode::FileNode(const FileNode& _node) : fs(_node.fs), node(_node.node) {} + +inline int FileNode::type() const { return !node ? NONE : (node->tag & TYPE_MASK); } +inline bool FileNode::empty() const { return node == 0; } +inline bool FileNode::isNone() const { return type() == NONE; } +inline bool FileNode::isSeq() const { return type() == SEQ; } +inline bool FileNode::isMap() const { return type() == MAP; } +inline bool FileNode::isInt() const { return type() == INT; } +inline bool FileNode::isReal() const { return type() == REAL; } +inline bool FileNode::isString() const { return type() == STR; } +inline bool FileNode::isNamed() const { return !node ? false : (node->tag & NAMED) != 0; } +inline size_t FileNode::size() const +{ + int t = type(); + return t == MAP ? ((CvSet*)node->data.map)->active_count : + t == SEQ ? node->data.seq->total : node != 0; +} + +inline CvFileNode* FileNode::operator *() { return (CvFileNode*)node; } +inline const CvFileNode* FileNode::operator* () const { return node; } + +static inline void read(const FileNode& node, int& value, int default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? cvRound(node.node->data.f) : 0x7fffffff; +} + +static inline void read(const FileNode& node, bool& value, bool default_value) +{ + int temp; read(node, temp, (int)default_value); + value = temp != 0; +} + +static inline void read(const FileNode& node, uchar& value, uchar default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, schar& value, schar default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, ushort& value, ushort default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, short& value, short default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, float& value, float default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? (float)node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? (float)node.node->data.f : 1e30f; +} + +static inline void read(const FileNode& node, double& value, double default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? (double)node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? node.node->data.f : 1e300; +} + +static inline void read(const FileNode& node, string& value, const string& default_value) +{ + value = !node.node ? default_value : CV_NODE_IS_STRING(node.node->tag) ? string(node.node->data.str.ptr) : string(""); +} + +CV_EXPORTS_W void read(const FileNode& node, Mat& mat, const Mat& default_mat=Mat() ); +CV_EXPORTS void read(const FileNode& node, SparseMat& mat, const SparseMat& default_mat=SparseMat() ); + +inline FileNode::operator int() const +{ + int value; + read(*this, value, 0); + return value; +} +inline FileNode::operator float() const +{ + float value; + read(*this, value, 0.f); + return value; +} +inline FileNode::operator double() const +{ + double value; + read(*this, value, 0.); + return value; +} +inline FileNode::operator string() const +{ + string value; + read(*this, value, value); + return value; +} + +inline void FileNode::readRaw( const string& fmt, uchar* vec, size_t len ) const +{ + begin().readRaw( fmt, vec, len ); +} + +template class CV_EXPORTS VecReaderProxy +{ +public: + VecReaderProxy( FileNodeIterator* _it ) : it(_it) {} + void operator()(vector<_Tp>& vec, size_t count) const + { + count = std::min(count, it->remaining); + vec.resize(count); + for( size_t i = 0; i < count; i++, ++(*it) ) + read(**it, vec[i], _Tp()); + } + FileNodeIterator* it; +}; + +template class CV_EXPORTS VecReaderProxy<_Tp,1> +{ +public: + VecReaderProxy( FileNodeIterator* _it ) : it(_it) {} + void operator()(vector<_Tp>& vec, size_t count) const + { + size_t remaining = it->remaining, cn = DataType<_Tp>::channels; + int _fmt = DataType<_Tp>::fmt; + char fmt[] = { (char)((_fmt>>8)+'1'), (char)_fmt, '\0' }; + size_t remaining1 = remaining/cn; + count = count < remaining1 ? count : remaining1; + vec.resize(count); + it->readRaw( string(fmt), (uchar*)&vec[0], count*sizeof(_Tp) ); + } + FileNodeIterator* it; +}; + +template static inline void +read( FileNodeIterator& it, vector<_Tp>& vec, size_t maxCount=(size_t)INT_MAX ) +{ + VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it); + r(vec, maxCount); +} + +template static inline void +read( FileNode& node, vector<_Tp>& vec, const vector<_Tp>& default_value=vector<_Tp>() ) +{ + read( node.begin(), vec ); +} + +inline FileNodeIterator FileNode::begin() const +{ + return FileNodeIterator(fs, node); +} + +inline FileNodeIterator FileNode::end() const +{ + return FileNodeIterator(fs, node, size()); +} + +inline FileNode FileNodeIterator::operator *() const +{ return FileNode(fs, (const CvFileNode*)reader.ptr); } + +inline FileNode FileNodeIterator::operator ->() const +{ return FileNode(fs, (const CvFileNode*)reader.ptr); } + +template static inline FileNodeIterator& operator >> (FileNodeIterator& it, _Tp& value) +{ read( *it, value, _Tp()); return ++it; } + +template static inline +FileNodeIterator& operator >> (FileNodeIterator& it, vector<_Tp>& vec) +{ + VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it); + r(vec, (size_t)INT_MAX); + return it; +} + +template static inline void operator >> (const FileNode& n, _Tp& value) +{ read( n, value, _Tp()); } + +template static inline void operator >> (const FileNode& n, vector<_Tp>& vec) +{ FileNodeIterator it = n.begin(); it >> vec; } + +static inline bool operator == (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it1.fs == it2.fs && it1.container == it2.container && + it1.reader.ptr == it2.reader.ptr && it1.remaining == it2.remaining; +} + +static inline bool operator != (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return !(it1 == it2); +} + +static inline ptrdiff_t operator - (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it2.remaining - it1.remaining; +} + +static inline bool operator < (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it1.remaining > it2.remaining; +} + +inline FileNode FileStorage::getFirstTopLevelNode() const +{ + FileNode r = root(); + FileNodeIterator it = r.begin(); + return it != r.end() ? *it : FileNode(); +} + +//////////////////////////////////////// Various algorithms //////////////////////////////////// + +template static inline _Tp gcd(_Tp a, _Tp b) +{ + if( a < b ) + std::swap(a, b); + while( b > 0 ) + { + _Tp r = a % b; + a = b; + b = r; + } + return a; +} + +/****************************************************************************************\ + + Generic implementation of QuickSort algorithm + Use it as: vector<_Tp> a; ... sort(a,); + + The current implementation was derived from *BSD system qsort(): + + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + +\****************************************************************************************/ + +template void sort( vector<_Tp>& vec, _LT LT=_LT() ) +{ + int isort_thresh = 7; + int sp = 0; + + struct + { + _Tp *lb; + _Tp *ub; + } stack[48]; + + size_t total = vec.size(); + + if( total <= 1 ) + return; + + _Tp* arr = &vec[0]; + stack[0].lb = arr; + stack[0].ub = arr + (total - 1); + + while( sp >= 0 ) + { + _Tp* left = stack[sp].lb; + _Tp* right = stack[sp--].ub; + + for(;;) + { + int i, n = (int)(right - left) + 1, m; + _Tp* ptr; + _Tp* ptr2; + + if( n <= isort_thresh ) + { + insert_sort: + for( ptr = left + 1; ptr <= right; ptr++ ) + { + for( ptr2 = ptr; ptr2 > left && LT(ptr2[0],ptr2[-1]); ptr2--) + std::swap( ptr2[0], ptr2[-1] ); + } + break; + } + else + { + _Tp* left0; + _Tp* left1; + _Tp* right0; + _Tp* right1; + _Tp* pivot; + _Tp* a; + _Tp* b; + _Tp* c; + int swap_cnt = 0; + + left0 = left; + right0 = right; + pivot = left + (n/2); + + if( n > 40 ) + { + int d = n / 8; + a = left, b = left + d, c = left + 2*d; + left = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + + a = pivot - d, b = pivot, c = pivot + d; + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + + a = right - 2*d, b = right - d, c = right; + right = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + } + + a = left, b = pivot, c = right; + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + if( pivot != left0 ) + { + std::swap( *pivot, *left0 ); + pivot = left0; + } + left = left1 = left0 + 1; + right = right1 = right0; + + for(;;) + { + while( left <= right && !LT(*pivot, *left) ) + { + if( !LT(*left, *pivot) ) + { + if( left > left1 ) + std::swap( *left1, *left ); + swap_cnt = 1; + left1++; + } + left++; + } + + while( left <= right && !LT(*right, *pivot) ) + { + if( !LT(*pivot, *right) ) + { + if( right < right1 ) + std::swap( *right1, *right ); + swap_cnt = 1; + right1--; + } + right--; + } + + if( left > right ) + break; + std::swap( *left, *right ); + swap_cnt = 1; + left++; + right--; + } + + if( swap_cnt == 0 ) + { + left = left0, right = right0; + goto insert_sort; + } + + n = std::min( (int)(left1 - left0), (int)(left - left1) ); + for( i = 0; i < n; i++ ) + std::swap( left0[i], left[i-n] ); + + n = std::min( (int)(right0 - right1), (int)(right1 - right) ); + for( i = 0; i < n; i++ ) + std::swap( left[i], right0[i-n+1] ); + n = (int)(left - left1); + m = (int)(right1 - right); + if( n > 1 ) + { + if( m > 1 ) + { + if( n > m ) + { + stack[++sp].lb = left0; + stack[sp].ub = left0 + n - 1; + left = right0 - m + 1, right = right0; + } + else + { + stack[++sp].lb = right0 - m + 1; + stack[sp].ub = right0; + left = left0, right = left0 + n - 1; + } + } + else + left = left0, right = left0 + n - 1; + } + else if( m > 1 ) + left = right0 - m + 1, right = right0; + else + break; + } + } + } +} + +template class CV_EXPORTS LessThan +{ +public: + bool operator()(const _Tp& a, const _Tp& b) const { return a < b; } +}; + +template class CV_EXPORTS GreaterEq +{ +public: + bool operator()(const _Tp& a, const _Tp& b) const { return a >= b; } +}; + +template class CV_EXPORTS LessThanIdx +{ +public: + LessThanIdx( const _Tp* _arr ) : arr(_arr) {} + bool operator()(int a, int b) const { return arr[a] < arr[b]; } + const _Tp* arr; +}; + +template class CV_EXPORTS GreaterEqIdx +{ +public: + GreaterEqIdx( const _Tp* _arr ) : arr(_arr) {} + bool operator()(int a, int b) const { return arr[a] >= arr[b]; } + const _Tp* arr; +}; + + +// This function splits the input sequence or set into one or more equivalence classes and +// returns the vector of labels - 0-based class indexes for each element. +// predicate(a,b) returns true if the two sequence elements certainly belong to the same class. +// +// The algorithm is described in "Introduction to Algorithms" +// by Cormen, Leiserson and Rivest, the chapter "Data structures for disjoint sets" +template int +partition( const vector<_Tp>& _vec, vector& labels, + _EqPredicate predicate=_EqPredicate()) +{ + int i, j, N = (int)_vec.size(); + const _Tp* vec = &_vec[0]; + + const int PARENT=0; + const int RANK=1; + + vector _nodes(N*2); + int (*nodes)[2] = (int(*)[2])&_nodes[0]; + + // The first O(N) pass: create N single-vertex trees + for(i = 0; i < N; i++) + { + nodes[i][PARENT]=-1; + nodes[i][RANK] = 0; + } + + // The main O(N^2) pass: merge connected components + for( i = 0; i < N; i++ ) + { + int root = i; + + // find root + while( nodes[root][PARENT] >= 0 ) + root = nodes[root][PARENT]; + + for( j = 0; j < N; j++ ) + { + if( i == j || !predicate(vec[i], vec[j])) + continue; + int root2 = j; + + while( nodes[root2][PARENT] >= 0 ) + root2 = nodes[root2][PARENT]; + + if( root2 != root ) + { + // unite both trees + int rank = nodes[root][RANK], rank2 = nodes[root2][RANK]; + if( rank > rank2 ) + nodes[root2][PARENT] = root; + else + { + nodes[root][PARENT] = root2; + nodes[root2][RANK] += rank == rank2; + root = root2; + } + assert( nodes[root][PARENT] < 0 ); + + int k = j, parent; + + // compress the path from node2 to root + while( (parent = nodes[k][PARENT]) >= 0 ) + { + nodes[k][PARENT] = root; + k = parent; + } + + // compress the path from node to root + k = i; + while( (parent = nodes[k][PARENT]) >= 0 ) + { + nodes[k][PARENT] = root; + k = parent; + } + } + } + } + + // Final O(N) pass: enumerate classes + labels.resize(N); + int nclasses = 0; + + for( i = 0; i < N; i++ ) + { + int root = i; + while( nodes[root][PARENT] >= 0 ) + root = nodes[root][PARENT]; + // re-use the rank as the class label + if( nodes[root][RANK] >= 0 ) + nodes[root][RANK] = ~nclasses++; + labels[i] = ~nodes[root][RANK]; + } + + return nclasses; +} + + +////////////////////////////////////////////////////////////////////////////// + +// bridge C++ => C Seq API +CV_EXPORTS schar* seqPush( CvSeq* seq, const void* element=0); +CV_EXPORTS schar* seqPushFront( CvSeq* seq, const void* element=0); +CV_EXPORTS void seqPop( CvSeq* seq, void* element=0); +CV_EXPORTS void seqPopFront( CvSeq* seq, void* element=0); +CV_EXPORTS void seqPopMulti( CvSeq* seq, void* elements, + int count, int in_front=0 ); +CV_EXPORTS void seqRemove( CvSeq* seq, int index ); +CV_EXPORTS void clearSeq( CvSeq* seq ); +CV_EXPORTS schar* getSeqElem( const CvSeq* seq, int index ); +CV_EXPORTS void seqRemoveSlice( CvSeq* seq, CvSlice slice ); +CV_EXPORTS void seqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr ); + +template inline Seq<_Tp>::Seq() : seq(0) {} +template inline Seq<_Tp>::Seq( const CvSeq* _seq ) : seq((CvSeq*)_seq) +{ + CV_Assert(!_seq || _seq->elem_size == sizeof(_Tp)); +} + +template inline Seq<_Tp>::Seq( MemStorage& storage, + int headerSize ) +{ + CV_Assert(headerSize >= (int)sizeof(CvSeq)); + seq = cvCreateSeq(DataType<_Tp>::type, headerSize, sizeof(_Tp), storage); +} + +template inline _Tp& Seq<_Tp>::operator [](int idx) +{ return *(_Tp*)getSeqElem(seq, idx); } + +template inline const _Tp& Seq<_Tp>::operator [](int idx) const +{ return *(_Tp*)getSeqElem(seq, idx); } + +template inline SeqIterator<_Tp> Seq<_Tp>::begin() const +{ return SeqIterator<_Tp>(*this); } + +template inline SeqIterator<_Tp> Seq<_Tp>::end() const +{ return SeqIterator<_Tp>(*this, true); } + +template inline size_t Seq<_Tp>::size() const +{ return seq ? seq->total : 0; } + +template inline int Seq<_Tp>::type() const +{ return seq ? CV_MAT_TYPE(seq->flags) : 0; } + +template inline int Seq<_Tp>::depth() const +{ return seq ? CV_MAT_DEPTH(seq->flags) : 0; } + +template inline int Seq<_Tp>::channels() const +{ return seq ? CV_MAT_CN(seq->flags) : 0; } + +template inline size_t Seq<_Tp>::elemSize() const +{ return seq ? seq->elem_size : 0; } + +template inline size_t Seq<_Tp>::index(const _Tp& elem) const +{ return cvSeqElemIdx(seq, &elem); } + +template inline void Seq<_Tp>::push_back(const _Tp& elem) +{ cvSeqPush(seq, &elem); } + +template inline void Seq<_Tp>::push_front(const _Tp& elem) +{ cvSeqPushFront(seq, &elem); } + +template inline void Seq<_Tp>::push_back(const _Tp* elem, size_t count) +{ cvSeqPushMulti(seq, elem, (int)count, 0); } + +template inline void Seq<_Tp>::push_front(const _Tp* elem, size_t count) +{ cvSeqPushMulti(seq, elem, (int)count, 1); } + +template inline _Tp& Seq<_Tp>::back() +{ return *(_Tp*)getSeqElem(seq, -1); } + +template inline const _Tp& Seq<_Tp>::back() const +{ return *(const _Tp*)getSeqElem(seq, -1); } + +template inline _Tp& Seq<_Tp>::front() +{ return *(_Tp*)getSeqElem(seq, 0); } + +template inline const _Tp& Seq<_Tp>::front() const +{ return *(const _Tp*)getSeqElem(seq, 0); } + +template inline bool Seq<_Tp>::empty() const +{ return !seq || seq->total == 0; } + +template inline void Seq<_Tp>::clear() +{ if(seq) clearSeq(seq); } + +template inline void Seq<_Tp>::pop_back() +{ seqPop(seq); } + +template inline void Seq<_Tp>::pop_front() +{ seqPopFront(seq); } + +template inline void Seq<_Tp>::pop_back(_Tp* elem, size_t count) +{ seqPopMulti(seq, elem, (int)count, 0); } + +template inline void Seq<_Tp>::pop_front(_Tp* elem, size_t count) +{ seqPopMulti(seq, elem, (int)count, 1); } + +template inline void Seq<_Tp>::insert(int idx, const _Tp& elem) +{ seqInsert(seq, idx, &elem); } + +template inline void Seq<_Tp>::insert(int idx, const _Tp* elems, size_t count) +{ + CvMat m = cvMat(1, count, DataType<_Tp>::type, elems); + seqInsertSlice(seq, idx, &m); +} + +template inline void Seq<_Tp>::remove(int idx) +{ seqRemove(seq, idx); } + +template inline void Seq<_Tp>::remove(const Range& r) +{ seqRemoveSlice(seq, r); } + +template inline void Seq<_Tp>::copyTo(vector<_Tp>& vec, const Range& range) const +{ + size_t len = !seq ? 0 : range == Range::all() ? seq->total : range.end - range.start; + vec.resize(len); + if( seq && len ) + cvCvtSeqToArray(seq, &vec[0], range); +} + +template inline Seq<_Tp>::operator vector<_Tp>() const +{ + vector<_Tp> vec; + copyTo(vec); + return vec; +} + +template inline SeqIterator<_Tp>::SeqIterator() +{ memset(this, 0, sizeof(*this)); } + +template inline SeqIterator<_Tp>::SeqIterator(const Seq<_Tp>& seq, bool seekEnd) +{ + cvStartReadSeq(seq.seq, this); + index = seekEnd ? seq.seq->total : 0; +} + +template inline void SeqIterator<_Tp>::seek(size_t pos) +{ + cvSetSeqReaderPos(this, (int)pos, false); + index = pos; +} + +template inline size_t SeqIterator<_Tp>::tell() const +{ return index; } + +template inline _Tp& SeqIterator<_Tp>::operator *() +{ return *(_Tp*)ptr; } + +template inline const _Tp& SeqIterator<_Tp>::operator *() const +{ return *(const _Tp*)ptr; } + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator ++() +{ + CV_NEXT_SEQ_ELEM(sizeof(_Tp), *this); + if( ++index >= seq->total*2 ) + index = 0; + return *this; +} + +template inline SeqIterator<_Tp> SeqIterator<_Tp>::operator ++(int) const +{ + SeqIterator<_Tp> it = *this; + ++*this; + return it; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator --() +{ + CV_PREV_SEQ_ELEM(sizeof(_Tp), *this); + if( --index < 0 ) + index = seq->total*2-1; + return *this; +} + +template inline SeqIterator<_Tp> SeqIterator<_Tp>::operator --(int) const +{ + SeqIterator<_Tp> it = *this; + --*this; + return it; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator +=(int delta) +{ + cvSetSeqReaderPos(this, delta, 1); + index += delta; + int n = seq->total*2; + if( index < 0 ) + index += n; + if( index >= n ) + index -= n; + return *this; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator -=(int delta) +{ + return (*this += -delta); +} + +template inline ptrdiff_t operator - (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + ptrdiff_t delta = a.index - b.index, n = a.seq->total; + if( std::abs(static_cast(delta)) > n ) + delta += delta < 0 ? n : -n; + return delta; +} + +template inline bool operator == (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + return a.seq == b.seq && a.index == b.index; +} + +template inline bool operator != (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + return !(a == b); +} + + +template struct CV_EXPORTS RTTIImpl +{ +public: + static int isInstance(const void* ptr) + { + static _ClsName dummy; + union + { + const void* p; + const void** pp; + } a, b; + a.p = &dummy; + b.p = ptr; + return *a.pp == *b.pp; + } + static void release(void** dbptr) + { + if(dbptr && *dbptr) + { + delete (_ClsName*)*dbptr; + *dbptr = 0; + } + } + static void* read(CvFileStorage* fs, CvFileNode* n) + { + FileNode fn(fs, n); + _ClsName* obj = new _ClsName; + if(obj->read(fn)) + return obj; + delete obj; + return 0; + } + + static void write(CvFileStorage* _fs, const char* name, const void* ptr, CvAttrList) + { + if(ptr && _fs) + { + FileStorage fs(_fs); + fs.fs.addref(); + ((const _ClsName*)ptr)->write(fs, string(name)); + } + } + + static void* clone(const void* ptr) + { + if(!ptr) + return 0; + return new _ClsName(*(const _ClsName*)ptr); + } +}; + + +class CV_EXPORTS Formatter +{ +public: + virtual ~Formatter() {} + virtual void write(std::ostream& out, const Mat& m, const int* params=0, int nparams=0) const = 0; + virtual void write(std::ostream& out, const void* data, int nelems, int type, + const int* params=0, int nparams=0) const = 0; + static const Formatter* get(const char* fmt=""); + static const Formatter* setDefault(const Formatter* fmt); +}; + + +struct CV_EXPORTS Formatted +{ + Formatted(const Mat& m, const Formatter* fmt, + const vector& params); + Formatted(const Mat& m, const Formatter* fmt, + const int* params=0); + Mat mtx; + const Formatter* fmt; + vector params; +}; + + +/** Writes a point to an output stream in Matlab notation + */ +template inline std::ostream& operator<<(std::ostream& out, const Point_<_Tp>& p) +{ + out << "[" << p.x << ", " << p.y << "]"; + return out; +} + +/** Writes a point to an output stream in Matlab notation + */ +template inline std::ostream& operator<<(std::ostream& out, const Point3_<_Tp>& p) +{ + out << "[" << p.x << ", " << p.y << ", " << p.z << "]"; + return out; +} + +static inline Formatted format(const Mat& mtx, const char* fmt, + const vector& params=vector()) +{ + return Formatted(mtx, Formatter::get(fmt), params); +} + +template static inline Formatted format(const vector >& vec, + const char* fmt, const vector& params=vector()) +{ + return Formatted(Mat(vec), Formatter::get(fmt), params); +} + +template static inline Formatted format(const vector >& vec, + const char* fmt, const vector& params=vector()) +{ + return Formatted(Mat(vec), Formatter::get(fmt), params); +} + +/** \brief prints Mat to the output stream in Matlab notation + * use like + @verbatim + Mat my_mat = Mat::eye(3,3,CV_32F); + std::cout << my_mat; + @endverbatim + */ +static inline std::ostream& operator << (std::ostream& out, const Mat& mtx) +{ + Formatter::get()->write(out, mtx); + return out; +} + +/** \brief prints Mat to the output stream allows in the specified notation (see format) + * use like + @verbatim + Mat my_mat = Mat::eye(3,3,CV_32F); + std::cout << my_mat; + @endverbatim + */ +static inline std::ostream& operator << (std::ostream& out, const Formatted& fmtd) +{ + fmtd.fmt->write(out, fmtd.mtx); + return out; +} + + +template static inline std::ostream& operator << (std::ostream& out, + const vector >& vec) +{ + Formatter::get()->write(out, Mat(vec)); + return out; +} + + +template static inline std::ostream& operator << (std::ostream& out, + const vector >& vec) +{ + Formatter::get()->write(out, Mat(vec)); + return out; +} + +/*template struct AlgorithmParamType {}; +template<> struct AlgorithmParamType { enum { type = CV_PARAM_TYPE_INT }; }; +template<> struct AlgorithmParamType { enum { type = CV_PARAM_TYPE_REAL }; }; +template<> struct AlgorithmParamType { enum { type = CV_PARAM_TYPE_STRING }; }; +template<> struct AlgorithmParamType { enum { type = CV_PARAM_TYPE_MAT }; }; + +template _Tp Algorithm::get(int paramId) const +{ + _Tp value = _Tp(); + get_(paramId, AlgorithmParamType<_Tp>::type, &value); + return value; +} + +template bool Algorithm::set(int paramId, const _Tp& value) +{ + set_(paramId, AlgorithmParamType<_Tp>::type, &value); + return value; +} + +template _Tp Algorithm::paramDefaultValue(int paramId) const +{ + _Tp value = _Tp(); + paramDefaultValue_(paramId, AlgorithmParamType<_Tp>::type, &value); + return value; +} + +template bool Algorithm::paramRange(int paramId, _Tp& minVal, _Tp& maxVal) const +{ + return paramRange_(paramId, AlgorithmParamType<_Tp>::type, &minVal, &maxVal); +} + +template void Algorithm::addParam(int propId, _Tp& value, bool readOnly, const string& name, + const string& help, const _Tp& defaultValue, + _Tp (Algorithm::*getter)(), bool (Algorithm::*setter)(const _Tp&)) +{ + addParam_(propId, AlgorithmParamType<_Tp>::type, &value, readOnly, name, help, &defaultValue, + (void*)getter, (void*)setter); +} + +template void Algorithm::setParamRange(int propId, const _Tp& minVal, const _Tp& maxVal) +{ + setParamRange_(propId, AlgorithmParamType<_Tp>::type, &minVal, &maxVal); +}*/ + +} + +#endif // __cplusplus +#endif diff --git a/include/opencv2/core/types_c.h b/include/opencv2/core/types_c.h new file mode 100644 index 0000000..7c78477 --- /dev/null +++ b/include/opencv2/core/types_c.h @@ -0,0 +1,1876 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_TYPES_H__ +#define __OPENCV_CORE_TYPES_H__ + +#if !defined _CRT_SECURE_NO_DEPRECATE && _MSC_VER > 1300 +#define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio 2005 warnings */ +#endif + + +#ifndef SKIP_INCLUDES + #include + #include + #include + #include + +#if !defined _MSC_VER && !defined __BORLANDC__ + #include +#endif + + #if defined __ICL + #define CV_ICC __ICL + #elif defined __ICC + #define CV_ICC __ICC + #elif defined __ECL + #define CV_ICC __ECL + #elif defined __ECC + #define CV_ICC __ECC + #elif defined __INTEL_COMPILER + #define CV_ICC __INTEL_COMPILER + #endif + + #if (_MSC_VER >= 1400 && defined _M_X64) || (__GNUC__ >= 4 && defined __x86_64__) + #if defined WIN32 + #include + #endif + #include + #endif + + #if defined __BORLANDC__ + #include + #else + #include + #endif + + #ifdef HAVE_IPL + #ifndef __IPL_H__ + #if defined WIN32 || defined _WIN32 + #include + #else + #include + #endif + #endif + #elif defined __IPL_H__ + #define HAVE_IPL + #endif +#endif // SKIP_INCLUDES + +#if defined WIN32 || defined _WIN32 + #define CV_CDECL __cdecl + #define CV_STDCALL __stdcall +#else + #define CV_CDECL + #define CV_STDCALL +#endif + +#ifndef CV_EXTERN_C + #ifdef __cplusplus + #define CV_EXTERN_C extern "C" + #define CV_DEFAULT(val) = val + #else + #define CV_EXTERN_C + #define CV_DEFAULT(val) + #endif +#endif + +#ifndef CV_EXTERN_C_FUNCPTR + #ifdef __cplusplus + #define CV_EXTERN_C_FUNCPTR(x) extern "C" { typedef x; } + #else + #define CV_EXTERN_C_FUNCPTR(x) typedef x + #endif +#endif + +#ifndef CV_INLINE +#if defined __cplusplus + #define CV_INLINE inline +#elif (defined WIN32 || defined _WIN32 || defined WINCE) && !defined __GNUC__ + #define CV_INLINE __inline +#else + #define CV_INLINE static +#endif +#endif /* CV_INLINE */ + +#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS + #define CV_EXPORTS __declspec(dllexport) +#else + #define CV_EXPORTS +#endif + +#ifndef CVAPI + #define CVAPI(rettype) CV_EXTERN_C CV_EXPORTS rettype CV_CDECL +#endif + +#if defined _MSC_VER || defined __BORLANDC__ +typedef __int64 int64; +typedef unsigned __int64 uint64; +#define CV_BIG_INT(n) n##I64 +#define CV_BIG_UINT(n) n##UI64 +#else +typedef int64_t int64; +typedef uint64_t uint64; +#define CV_BIG_INT(n) n##LL +#define CV_BIG_UINT(n) n##ULL +#endif + +#ifndef HAVE_IPL +typedef unsigned char uchar; +typedef unsigned short ushort; +#endif + +typedef signed char schar; + +/* special informative macros for wrapper generators */ +#define CV_CARRAY(counter) +#define CV_CUSTOM_CARRAY(args) +#define CV_EXPORTS_W CV_EXPORTS +#define CV_EXPORTS_W_SIMPLE CV_EXPORTS +#define CV_EXPORTS_AS(synonym) CV_EXPORTS +#define CV_EXPORTS_W_MAP CV_EXPORTS +#define CV_IN_OUT +#define CV_OUT +#define CV_PROP +#define CV_PROP_RW +#define CV_WRAP +#define CV_WRAP_AS(synonym) +#define CV_WRAP_DEFAULT(value) + +/* CvArr* is used to pass arbitrary + * array-like data structures + * into functions where the particular + * array type is recognized at runtime: + */ +typedef void CvArr; + +typedef union Cv32suf +{ + int i; + unsigned u; + float f; +} +Cv32suf; + +typedef union Cv64suf +{ + int64 i; + uint64 u; + double f; +} +Cv64suf; + +typedef int CVStatus; + +enum { + CV_StsOk= 0, /* everithing is ok */ + CV_StsBackTrace= -1, /* pseudo error for back trace */ + CV_StsError= -2, /* unknown /unspecified error */ + CV_StsInternal= -3, /* internal error (bad state) */ + CV_StsNoMem= -4, /* insufficient memory */ + CV_StsBadArg= -5, /* function arg/param is bad */ + CV_StsBadFunc= -6, /* unsupported function */ + CV_StsNoConv= -7, /* iter. didn't converge */ + CV_StsAutoTrace= -8, /* tracing */ + CV_HeaderIsNull= -9, /* image header is NULL */ + CV_BadImageSize= -10, /* image size is invalid */ + CV_BadOffset= -11, /* offset is invalid */ + CV_BadDataPtr= -12, /**/ + CV_BadStep= -13, /**/ + CV_BadModelOrChSeq= -14, /**/ + CV_BadNumChannels= -15, /**/ + CV_BadNumChannel1U= -16, /**/ + CV_BadDepth= -17, /**/ + CV_BadAlphaChannel= -18, /**/ + CV_BadOrder= -19, /**/ + CV_BadOrigin= -20, /**/ + CV_BadAlign= -21, /**/ + CV_BadCallBack= -22, /**/ + CV_BadTileSize= -23, /**/ + CV_BadCOI= -24, /**/ + CV_BadROISize= -25, /**/ + CV_MaskIsTiled= -26, /**/ + CV_StsNullPtr= -27, /* null pointer */ + CV_StsVecLengthErr= -28, /* incorrect vector length */ + CV_StsFilterStructContentErr= -29, /* incorr. filter structure content */ + CV_StsKernelStructContentErr= -30, /* incorr. transform kernel content */ + CV_StsFilterOffsetErr= -31, /* incorrect filter ofset value */ + CV_StsBadSize= -201, /* the input/output structure size is incorrect */ + CV_StsDivByZero= -202, /* division by zero */ + CV_StsInplaceNotSupported= -203, /* in-place operation is not supported */ + CV_StsObjectNotFound= -204, /* request can't be completed */ + CV_StsUnmatchedFormats= -205, /* formats of input/output arrays differ */ + CV_StsBadFlag= -206, /* flag is wrong or not supported */ + CV_StsBadPoint= -207, /* bad CvPoint */ + CV_StsBadMask= -208, /* bad format of mask (neither 8uC1 nor 8sC1)*/ + CV_StsUnmatchedSizes= -209, /* sizes of input/output structures do not match */ + CV_StsUnsupportedFormat= -210, /* the data format/type is not supported by the function*/ + CV_StsOutOfRange= -211, /* some of parameters are out of range */ + CV_StsParseError= -212, /* invalid syntax/structure of the parsed file */ + CV_StsNotImplemented= -213, /* the requested function/feature is not implemented */ + CV_StsBadMemBlock= -214, /* an allocated block has been corrupted */ + CV_StsAssert= -215, /* assertion failed */ + CV_GpuNotSupported= -216, + CV_GpuApiCallError= -217, + CV_GpuNppCallError= -218, + CV_GpuCufftCallError= -219 +}; + +/****************************************************************************************\ +* Common macros and inline functions * +\****************************************************************************************/ + +#define CV_PI 3.1415926535897932384626433832795 +#define CV_LOG2 0.69314718055994530941723212145818 + +#define CV_SWAP(a,b,t) ((t) = (a), (a) = (b), (b) = (t)) + +#ifndef MIN +#define MIN(a,b) ((a) > (b) ? (b) : (a)) +#endif + +#ifndef MAX +#define MAX(a,b) ((a) < (b) ? (b) : (a)) +#endif + +/* min & max without jumps */ +#define CV_IMIN(a, b) ((a) ^ (((a)^(b)) & (((a) < (b)) - 1))) + +#define CV_IMAX(a, b) ((a) ^ (((a)^(b)) & (((a) > (b)) - 1))) + +/* absolute value without jumps */ +#ifndef __cplusplus +#define CV_IABS(a) (((a) ^ ((a) < 0 ? -1 : 0)) - ((a) < 0 ? -1 : 0)) +#else +#define CV_IABS(a) abs(a) +#endif +#define CV_CMP(a,b) (((a) > (b)) - ((a) < (b))) +#define CV_SIGN(a) CV_CMP((a),0) + +CV_INLINE int cvRound( double value ) +{ +#if (defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__ && !defined __APPLE__) + __m128d t = _mm_set_sd( value ); + return _mm_cvtsd_si32(t); +#elif defined _MSC_VER && defined _M_IX86 + int t; + __asm + { + fld value; + fistp t; + } + return t; +#elif defined HAVE_LRINT || defined CV_ICCXX || defined __XXGNUC__ + + return (int)lrint(value); +#else + // while this is not IEEE754-compliant rounding, it's usually a good enough approximation + return (int)(value + (value >= 0 ? 0.5 : -0.5)); +#endif +} + + +CV_INLINE int cvFloor( double value ) +{ +#ifdef __GNUC__ + int i = (int)value; + return i - (i > value); +#elif defined _MSC_VER && defined _M_X64 + __m128d t = _mm_set_sd( value ); + int i = _mm_cvtsd_si32(t); + return i - _mm_movemask_pd(_mm_cmplt_sd(t, _mm_cvtsi32_sd(t,i))); +#else + int i = cvRound(value); + Cv32suf diff; + diff.f = (float)(value - i); + return i - (diff.i < 0); +#endif +} + + +CV_INLINE int cvCeil( double value ) +{ +#ifdef __GNUC__ + int i = (int)value; + return i + (i < value); +#elif defined _MSC_VER && defined _M_X64 + __m128d t = _mm_set_sd( value ); + int i = _mm_cvtsd_si32(t); + return i + _mm_movemask_pd(_mm_cmplt_sd(_mm_cvtsi32_sd(t,i), t)); +#else + int i = cvRound(value); + Cv32suf diff; + diff.f = (float)(i - value); + return i + (diff.i < 0); +#endif +} + +#define cvInvSqrt(value) ((float)(1./sqrt(value))) +#define cvSqrt(value) ((float)sqrt(value)) + +CV_INLINE int cvIsNaN( double value ) +{ +#if 1/*defined _MSC_VER || defined __BORLANDC__ + return _isnan(value); +#elif defined __GNUC__ + return isnan(value); +#else*/ + Cv64suf ieee754; + ieee754.f = value; + return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) + + ((unsigned)ieee754.u != 0) > 0x7ff00000; +#endif +} + + +CV_INLINE int cvIsInf( double value ) +{ +#if 1/*defined _MSC_VER || defined __BORLANDC__ + return !_finite(value); +#elif defined __GNUC__ + return isinf(value); +#else*/ + Cv64suf ieee754; + ieee754.f = value; + return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) == 0x7ff00000 && + (unsigned)ieee754.u == 0; +#endif +} + + +/*************** Random number generation *******************/ + +typedef uint64 CvRNG; + +#define CV_RNG_COEFF 4164903690U + +CV_INLINE CvRNG cvRNG( int64 seed CV_DEFAULT(-1)) +{ + CvRNG rng = seed ? (uint64)seed : (uint64)(int64)-1; + return rng; +} + +/* Return random 32-bit unsigned integer: */ +CV_INLINE unsigned cvRandInt( CvRNG* rng ) +{ + uint64 temp = *rng; + temp = (uint64)(unsigned)temp*CV_RNG_COEFF + (temp >> 32); + *rng = temp; + return (unsigned)temp; +} + +/* Returns random floating-point number between 0 and 1: */ +CV_INLINE double cvRandReal( CvRNG* rng ) +{ + return cvRandInt(rng)*2.3283064365386962890625e-10 /* 2^-32 */; +} + +/****************************************************************************************\ +* Image type (IplImage) * +\****************************************************************************************/ + +#ifndef HAVE_IPL + +/* + * The following definitions (until #endif) + * is an extract from IPL headers. + * Copyright (c) 1995 Intel Corporation. + */ +#define IPL_DEPTH_SIGN 0x80000000 + +#define IPL_DEPTH_1U 1 +#define IPL_DEPTH_8U 8 +#define IPL_DEPTH_16U 16 +#define IPL_DEPTH_32F 32 + +#define IPL_DEPTH_8S (IPL_DEPTH_SIGN| 8) +#define IPL_DEPTH_16S (IPL_DEPTH_SIGN|16) +#define IPL_DEPTH_32S (IPL_DEPTH_SIGN|32) + +#define IPL_DATA_ORDER_PIXEL 0 +#define IPL_DATA_ORDER_PLANE 1 + +#define IPL_ORIGIN_TL 0 +#define IPL_ORIGIN_BL 1 + +#define IPL_ALIGN_4BYTES 4 +#define IPL_ALIGN_8BYTES 8 +#define IPL_ALIGN_16BYTES 16 +#define IPL_ALIGN_32BYTES 32 + +#define IPL_ALIGN_DWORD IPL_ALIGN_4BYTES +#define IPL_ALIGN_QWORD IPL_ALIGN_8BYTES + +#define IPL_BORDER_CONSTANT 0 +#define IPL_BORDER_REPLICATE 1 +#define IPL_BORDER_REFLECT 2 +#define IPL_BORDER_WRAP 3 + +typedef struct _IplImage +{ + int nSize; /* sizeof(IplImage) */ + int ID; /* version (=0)*/ + int nChannels; /* Most of OpenCV functions support 1,2,3 or 4 channels */ + int alphaChannel; /* Ignored by OpenCV */ + int depth; /* Pixel depth in bits: IPL_DEPTH_8U, IPL_DEPTH_8S, IPL_DEPTH_16S, + IPL_DEPTH_32S, IPL_DEPTH_32F and IPL_DEPTH_64F are supported. */ + char colorModel[4]; /* Ignored by OpenCV */ + char channelSeq[4]; /* ditto */ + int dataOrder; /* 0 - interleaved color channels, 1 - separate color channels. + cvCreateImage can only create interleaved images */ + int origin; /* 0 - top-left origin, + 1 - bottom-left origin (Windows bitmaps style). */ + int align; /* Alignment of image rows (4 or 8). + OpenCV ignores it and uses widthStep instead. */ + int width; /* Image width in pixels. */ + int height; /* Image height in pixels. */ + struct _IplROI *roi; /* Image ROI. If NULL, the whole image is selected. */ + struct _IplImage *maskROI; /* Must be NULL. */ + void *imageId; /* " " */ + struct _IplTileInfo *tileInfo; /* " " */ + int imageSize; /* Image data size in bytes + (==image->height*image->widthStep + in case of interleaved data)*/ + char *imageData; /* Pointer to aligned image data. */ + int widthStep; /* Size of aligned image row in bytes. */ + int BorderMode[4]; /* Ignored by OpenCV. */ + int BorderConst[4]; /* Ditto. */ + char *imageDataOrigin; /* Pointer to very origin of image data + (not necessarily aligned) - + needed for correct deallocation */ +} +IplImage; + +typedef struct _IplTileInfo IplTileInfo; + +typedef struct _IplROI +{ + int coi; /* 0 - no COI (all channels are selected), 1 - 0th channel is selected ...*/ + int xOffset; + int yOffset; + int width; + int height; +} +IplROI; + +typedef struct _IplConvKernel +{ + int nCols; + int nRows; + int anchorX; + int anchorY; + int *values; + int nShiftR; +} +IplConvKernel; + +typedef struct _IplConvKernelFP +{ + int nCols; + int nRows; + int anchorX; + int anchorY; + float *values; +} +IplConvKernelFP; + +#define IPL_IMAGE_HEADER 1 +#define IPL_IMAGE_DATA 2 +#define IPL_IMAGE_ROI 4 + +#endif/*HAVE_IPL*/ + +/* extra border mode */ +#define IPL_BORDER_REFLECT_101 4 +#define IPL_BORDER_TRANSPARENT 5 + +#define IPL_IMAGE_MAGIC_VAL ((int)sizeof(IplImage)) +#define CV_TYPE_NAME_IMAGE "opencv-image" + +#define CV_IS_IMAGE_HDR(img) \ + ((img) != NULL && ((const IplImage*)(img))->nSize == sizeof(IplImage)) + +#define CV_IS_IMAGE(img) \ + (CV_IS_IMAGE_HDR(img) && ((IplImage*)img)->imageData != NULL) + +/* for storing double-precision + floating point data in IplImage's */ +#define IPL_DEPTH_64F 64 + +/* get reference to pixel at (col,row), + for multi-channel images (col) should be multiplied by number of channels */ +#define CV_IMAGE_ELEM( image, elemtype, row, col ) \ + (((elemtype*)((image)->imageData + (image)->widthStep*(row)))[(col)]) + +/****************************************************************************************\ +* Matrix type (CvMat) * +\****************************************************************************************/ + +#define CV_CN_MAX 512 +#define CV_CN_SHIFT 3 +#define CV_DEPTH_MAX (1 << CV_CN_SHIFT) + +#define CV_8U 0 +#define CV_8S 1 +#define CV_16U 2 +#define CV_16S 3 +#define CV_32S 4 +#define CV_32F 5 +#define CV_64F 6 +#define CV_USRTYPE1 7 + +#define CV_MAT_DEPTH_MASK (CV_DEPTH_MAX - 1) +#define CV_MAT_DEPTH(flags) ((flags) & CV_MAT_DEPTH_MASK) + +#define CV_MAKETYPE(depth,cn) (CV_MAT_DEPTH(depth) + (((cn)-1) << CV_CN_SHIFT)) +#define CV_MAKE_TYPE CV_MAKETYPE + +#define CV_8UC1 CV_MAKETYPE(CV_8U,1) +#define CV_8UC2 CV_MAKETYPE(CV_8U,2) +#define CV_8UC3 CV_MAKETYPE(CV_8U,3) +#define CV_8UC4 CV_MAKETYPE(CV_8U,4) +#define CV_8UC(n) CV_MAKETYPE(CV_8U,(n)) + +#define CV_8SC1 CV_MAKETYPE(CV_8S,1) +#define CV_8SC2 CV_MAKETYPE(CV_8S,2) +#define CV_8SC3 CV_MAKETYPE(CV_8S,3) +#define CV_8SC4 CV_MAKETYPE(CV_8S,4) +#define CV_8SC(n) CV_MAKETYPE(CV_8S,(n)) + +#define CV_16UC1 CV_MAKETYPE(CV_16U,1) +#define CV_16UC2 CV_MAKETYPE(CV_16U,2) +#define CV_16UC3 CV_MAKETYPE(CV_16U,3) +#define CV_16UC4 CV_MAKETYPE(CV_16U,4) +#define CV_16UC(n) CV_MAKETYPE(CV_16U,(n)) + +#define CV_16SC1 CV_MAKETYPE(CV_16S,1) +#define CV_16SC2 CV_MAKETYPE(CV_16S,2) +#define CV_16SC3 CV_MAKETYPE(CV_16S,3) +#define CV_16SC4 CV_MAKETYPE(CV_16S,4) +#define CV_16SC(n) CV_MAKETYPE(CV_16S,(n)) + +#define CV_32SC1 CV_MAKETYPE(CV_32S,1) +#define CV_32SC2 CV_MAKETYPE(CV_32S,2) +#define CV_32SC3 CV_MAKETYPE(CV_32S,3) +#define CV_32SC4 CV_MAKETYPE(CV_32S,4) +#define CV_32SC(n) CV_MAKETYPE(CV_32S,(n)) + +#define CV_32FC1 CV_MAKETYPE(CV_32F,1) +#define CV_32FC2 CV_MAKETYPE(CV_32F,2) +#define CV_32FC3 CV_MAKETYPE(CV_32F,3) +#define CV_32FC4 CV_MAKETYPE(CV_32F,4) +#define CV_32FC(n) CV_MAKETYPE(CV_32F,(n)) + +#define CV_64FC1 CV_MAKETYPE(CV_64F,1) +#define CV_64FC2 CV_MAKETYPE(CV_64F,2) +#define CV_64FC3 CV_MAKETYPE(CV_64F,3) +#define CV_64FC4 CV_MAKETYPE(CV_64F,4) +#define CV_64FC(n) CV_MAKETYPE(CV_64F,(n)) + +#define CV_AUTO_STEP 0x7fffffff +#define CV_WHOLE_ARR cvSlice( 0, 0x3fffffff ) + +#define CV_MAT_CN_MASK ((CV_CN_MAX - 1) << CV_CN_SHIFT) +#define CV_MAT_CN(flags) ((((flags) & CV_MAT_CN_MASK) >> CV_CN_SHIFT) + 1) +#define CV_MAT_TYPE_MASK (CV_DEPTH_MAX*CV_CN_MAX - 1) +#define CV_MAT_TYPE(flags) ((flags) & CV_MAT_TYPE_MASK) +#define CV_MAT_CONT_FLAG_SHIFT 14 +#define CV_MAT_CONT_FLAG (1 << CV_MAT_CONT_FLAG_SHIFT) +#define CV_IS_MAT_CONT(flags) ((flags) & CV_MAT_CONT_FLAG) +#define CV_IS_CONT_MAT CV_IS_MAT_CONT +#define CV_SUBMAT_FLAG_SHIFT 15 +#define CV_SUBMAT_FLAG (1 << CV_SUBMAT_FLAG_SHIFT) +#define CV_IS_SUBMAT(flags) ((flags) & CV_MAT_SUBMAT_FLAG) + +#define CV_MAGIC_MASK 0xFFFF0000 +#define CV_MAT_MAGIC_VAL 0x42420000 +#define CV_TYPE_NAME_MAT "opencv-matrix" + +typedef struct CvMat +{ + int type; + int step; + + /* for internal use only */ + int* refcount; + int hdr_refcount; + + union + { + uchar* ptr; + short* s; + int* i; + float* fl; + double* db; + } data; + +#ifdef __cplusplus + union + { + int rows; + int height; + }; + + union + { + int cols; + int width; + }; +#else + int rows; + int cols; +#endif + +} +CvMat; + + +#define CV_IS_MAT_HDR(mat) \ + ((mat) != NULL && \ + (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \ + ((const CvMat*)(mat))->cols > 0 && ((const CvMat*)(mat))->rows > 0) + +#define CV_IS_MAT_HDR_Z(mat) \ + ((mat) != NULL && \ + (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \ + ((const CvMat*)(mat))->cols >= 0 && ((const CvMat*)(mat))->rows >= 0) + +#define CV_IS_MAT(mat) \ + (CV_IS_MAT_HDR(mat) && ((const CvMat*)(mat))->data.ptr != NULL) + +#define CV_IS_MASK_ARR(mat) \ + (((mat)->type & (CV_MAT_TYPE_MASK & ~CV_8SC1)) == 0) + +#define CV_ARE_TYPES_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_TYPE_MASK) == 0) + +#define CV_ARE_CNS_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_CN_MASK) == 0) + +#define CV_ARE_DEPTHS_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_DEPTH_MASK) == 0) + +#define CV_ARE_SIZES_EQ(mat1, mat2) \ + ((mat1)->rows == (mat2)->rows && (mat1)->cols == (mat2)->cols) + +#define CV_IS_MAT_CONST(mat) \ + (((mat)->rows|(mat)->cols) == 1) + +/* Size of each channel item, + 0x124489 = 1000 0100 0100 0010 0010 0001 0001 ~ array of sizeof(arr_type_elem) */ +#define CV_ELEM_SIZE1(type) \ + ((((sizeof(size_t)<<28)|0x8442211) >> CV_MAT_DEPTH(type)*4) & 15) + +/* 0x3a50 = 11 10 10 01 01 00 00 ~ array of log2(sizeof(arr_type_elem)) */ +#define CV_ELEM_SIZE(type) \ + (CV_MAT_CN(type) << ((((sizeof(size_t)/4+1)*16384|0x3a50) >> CV_MAT_DEPTH(type)*2) & 3)) + +#define IPL2CV_DEPTH(depth) \ + ((((CV_8U)+(CV_16U<<4)+(CV_32F<<8)+(CV_64F<<16)+(CV_8S<<20)+ \ + (CV_16S<<24)+(CV_32S<<28)) >> ((((depth) & 0xF0) >> 2) + \ + (((depth) & IPL_DEPTH_SIGN) ? 20 : 0))) & 15) + +/* Inline constructor. No data is allocated internally!!! + * (Use together with cvCreateData, or use cvCreateMat instead to + * get a matrix with allocated data): + */ +CV_INLINE CvMat cvMat( int rows, int cols, int type, void* data CV_DEFAULT(NULL)) +{ + CvMat m; + + assert( (unsigned)CV_MAT_DEPTH(type) <= CV_64F ); + type = CV_MAT_TYPE(type); + m.type = CV_MAT_MAGIC_VAL | CV_MAT_CONT_FLAG | type; + m.cols = cols; + m.rows = rows; + m.step = m.cols*CV_ELEM_SIZE(type); + m.data.ptr = (uchar*)data; + m.refcount = NULL; + m.hdr_refcount = 0; + + return m; +} + + +#define CV_MAT_ELEM_PTR_FAST( mat, row, col, pix_size ) \ + (assert( (unsigned)(row) < (unsigned)(mat).rows && \ + (unsigned)(col) < (unsigned)(mat).cols ), \ + (mat).data.ptr + (size_t)(mat).step*(row) + (pix_size)*(col)) + +#define CV_MAT_ELEM_PTR( mat, row, col ) \ + CV_MAT_ELEM_PTR_FAST( mat, row, col, CV_ELEM_SIZE((mat).type) ) + +#define CV_MAT_ELEM( mat, elemtype, row, col ) \ + (*(elemtype*)CV_MAT_ELEM_PTR_FAST( mat, row, col, sizeof(elemtype))) + + +CV_INLINE double cvmGet( const CvMat* mat, int row, int col ) +{ + int type; + + type = CV_MAT_TYPE(mat->type); + assert( (unsigned)row < (unsigned)mat->rows && + (unsigned)col < (unsigned)mat->cols ); + + if( type == CV_32FC1 ) + return ((float*)(mat->data.ptr + (size_t)mat->step*row))[col]; + else + { + assert( type == CV_64FC1 ); + return ((double*)(mat->data.ptr + (size_t)mat->step*row))[col]; + } +} + + +CV_INLINE void cvmSet( CvMat* mat, int row, int col, double value ) +{ + int type; + type = CV_MAT_TYPE(mat->type); + assert( (unsigned)row < (unsigned)mat->rows && + (unsigned)col < (unsigned)mat->cols ); + + if( type == CV_32FC1 ) + ((float*)(mat->data.ptr + (size_t)mat->step*row))[col] = (float)value; + else + { + assert( type == CV_64FC1 ); + ((double*)(mat->data.ptr + (size_t)mat->step*row))[col] = (double)value; + } +} + + +CV_INLINE int cvIplDepth( int type ) +{ + int depth = CV_MAT_DEPTH(type); + return CV_ELEM_SIZE1(depth)*8 | (depth == CV_8S || depth == CV_16S || + depth == CV_32S ? IPL_DEPTH_SIGN : 0); +} + + +/****************************************************************************************\ +* Multi-dimensional dense array (CvMatND) * +\****************************************************************************************/ + +#define CV_MATND_MAGIC_VAL 0x42430000 +#define CV_TYPE_NAME_MATND "opencv-nd-matrix" + +#define CV_MAX_DIM 32 +#define CV_MAX_DIM_HEAP 1024 + +typedef struct CvMatND +{ + int type; + int dims; + + int* refcount; + int hdr_refcount; + + union + { + uchar* ptr; + float* fl; + double* db; + int* i; + short* s; + } data; + + struct + { + int size; + int step; + } + dim[CV_MAX_DIM]; +} +CvMatND; + +#define CV_IS_MATND_HDR(mat) \ + ((mat) != NULL && (((const CvMatND*)(mat))->type & CV_MAGIC_MASK) == CV_MATND_MAGIC_VAL) + +#define CV_IS_MATND(mat) \ + (CV_IS_MATND_HDR(mat) && ((const CvMatND*)(mat))->data.ptr != NULL) + + +/****************************************************************************************\ +* Multi-dimensional sparse array (CvSparseMat) * +\****************************************************************************************/ + +#define CV_SPARSE_MAT_MAGIC_VAL 0x42440000 +#define CV_TYPE_NAME_SPARSE_MAT "opencv-sparse-matrix" + +struct CvSet; + +typedef struct CvSparseMat +{ + int type; + int dims; + int* refcount; + int hdr_refcount; + + struct CvSet* heap; + void** hashtable; + int hashsize; + int valoffset; + int idxoffset; + int size[CV_MAX_DIM]; +} +CvSparseMat; + +#define CV_IS_SPARSE_MAT_HDR(mat) \ + ((mat) != NULL && \ + (((const CvSparseMat*)(mat))->type & CV_MAGIC_MASK) == CV_SPARSE_MAT_MAGIC_VAL) + +#define CV_IS_SPARSE_MAT(mat) \ + CV_IS_SPARSE_MAT_HDR(mat) + +/**************** iteration through a sparse array *****************/ + +typedef struct CvSparseNode +{ + unsigned hashval; + struct CvSparseNode* next; +} +CvSparseNode; + +typedef struct CvSparseMatIterator +{ + CvSparseMat* mat; + CvSparseNode* node; + int curidx; +} +CvSparseMatIterator; + +#define CV_NODE_VAL(mat,node) ((void*)((uchar*)(node) + (mat)->valoffset)) +#define CV_NODE_IDX(mat,node) ((int*)((uchar*)(node) + (mat)->idxoffset)) + +/****************************************************************************************\ +* Histogram * +\****************************************************************************************/ + +typedef int CvHistType; + +#define CV_HIST_MAGIC_VAL 0x42450000 +#define CV_HIST_UNIFORM_FLAG (1 << 10) + +/* indicates whether bin ranges are set already or not */ +#define CV_HIST_RANGES_FLAG (1 << 11) + +#define CV_HIST_ARRAY 0 +#define CV_HIST_SPARSE 1 +#define CV_HIST_TREE CV_HIST_SPARSE + +/* should be used as a parameter only, + it turns to CV_HIST_UNIFORM_FLAG of hist->type */ +#define CV_HIST_UNIFORM 1 + +typedef struct CvHistogram +{ + int type; + CvArr* bins; + float thresh[CV_MAX_DIM][2]; /* For uniform histograms. */ + float** thresh2; /* For non-uniform histograms. */ + CvMatND mat; /* Embedded matrix header for array histograms. */ +} +CvHistogram; + +#define CV_IS_HIST( hist ) \ + ((hist) != NULL && \ + (((CvHistogram*)(hist))->type & CV_MAGIC_MASK) == CV_HIST_MAGIC_VAL && \ + (hist)->bins != NULL) + +#define CV_IS_UNIFORM_HIST( hist ) \ + (((hist)->type & CV_HIST_UNIFORM_FLAG) != 0) + +#define CV_IS_SPARSE_HIST( hist ) \ + CV_IS_SPARSE_MAT((hist)->bins) + +#define CV_HIST_HAS_RANGES( hist ) \ + (((hist)->type & CV_HIST_RANGES_FLAG) != 0) + +/****************************************************************************************\ +* Other supplementary data type definitions * +\****************************************************************************************/ + +/*************************************** CvRect *****************************************/ + +typedef struct CvRect +{ + int x; + int y; + int width; + int height; +} +CvRect; + +CV_INLINE CvRect cvRect( int x, int y, int width, int height ) +{ + CvRect r; + + r.x = x; + r.y = y; + r.width = width; + r.height = height; + + return r; +} + + +CV_INLINE IplROI cvRectToROI( CvRect rect, int coi ) +{ + IplROI roi; + roi.xOffset = rect.x; + roi.yOffset = rect.y; + roi.width = rect.width; + roi.height = rect.height; + roi.coi = coi; + + return roi; +} + + +CV_INLINE CvRect cvROIToRect( IplROI roi ) +{ + return cvRect( roi.xOffset, roi.yOffset, roi.width, roi.height ); +} + +/*********************************** CvTermCriteria *************************************/ + +#define CV_TERMCRIT_ITER 1 +#define CV_TERMCRIT_NUMBER CV_TERMCRIT_ITER +#define CV_TERMCRIT_EPS 2 + +typedef struct CvTermCriteria +{ + int type; /* may be combination of + CV_TERMCRIT_ITER + CV_TERMCRIT_EPS */ + int max_iter; + double epsilon; +} +CvTermCriteria; + +CV_INLINE CvTermCriteria cvTermCriteria( int type, int max_iter, double epsilon ) +{ + CvTermCriteria t; + + t.type = type; + t.max_iter = max_iter; + t.epsilon = (float)epsilon; + + return t; +} + + +/******************************* CvPoint and variants ***********************************/ + +typedef struct CvPoint +{ + int x; + int y; +} +CvPoint; + + +CV_INLINE CvPoint cvPoint( int x, int y ) +{ + CvPoint p; + + p.x = x; + p.y = y; + + return p; +} + + +typedef struct CvPoint2D32f +{ + float x; + float y; +} +CvPoint2D32f; + + +CV_INLINE CvPoint2D32f cvPoint2D32f( double x, double y ) +{ + CvPoint2D32f p; + + p.x = (float)x; + p.y = (float)y; + + return p; +} + + +CV_INLINE CvPoint2D32f cvPointTo32f( CvPoint point ) +{ + return cvPoint2D32f( (float)point.x, (float)point.y ); +} + + +CV_INLINE CvPoint cvPointFrom32f( CvPoint2D32f point ) +{ + CvPoint ipt; + ipt.x = cvRound(point.x); + ipt.y = cvRound(point.y); + + return ipt; +} + + +typedef struct CvPoint3D32f +{ + float x; + float y; + float z; +} +CvPoint3D32f; + + +CV_INLINE CvPoint3D32f cvPoint3D32f( double x, double y, double z ) +{ + CvPoint3D32f p; + + p.x = (float)x; + p.y = (float)y; + p.z = (float)z; + + return p; +} + + +typedef struct CvPoint2D64f +{ + double x; + double y; +} +CvPoint2D64f; + + +CV_INLINE CvPoint2D64f cvPoint2D64f( double x, double y ) +{ + CvPoint2D64f p; + + p.x = x; + p.y = y; + + return p; +} + + +typedef struct CvPoint3D64f +{ + double x; + double y; + double z; +} +CvPoint3D64f; + + +CV_INLINE CvPoint3D64f cvPoint3D64f( double x, double y, double z ) +{ + CvPoint3D64f p; + + p.x = x; + p.y = y; + p.z = z; + + return p; +} + + +/******************************** CvSize's & CvBox **************************************/ + +typedef struct +{ + int width; + int height; +} +CvSize; + +CV_INLINE CvSize cvSize( int width, int height ) +{ + CvSize s; + + s.width = width; + s.height = height; + + return s; +} + +typedef struct CvSize2D32f +{ + float width; + float height; +} +CvSize2D32f; + + +CV_INLINE CvSize2D32f cvSize2D32f( double width, double height ) +{ + CvSize2D32f s; + + s.width = (float)width; + s.height = (float)height; + + return s; +} + +typedef struct CvBox2D +{ + CvPoint2D32f center; /* Center of the box. */ + CvSize2D32f size; /* Box width and length. */ + float angle; /* Angle between the horizontal axis */ + /* and the first side (i.e. length) in degrees */ +} +CvBox2D; + + +/* Line iterator state: */ +typedef struct CvLineIterator +{ + /* Pointer to the current point: */ + uchar* ptr; + + /* Bresenham algorithm state: */ + int err; + int plus_delta; + int minus_delta; + int plus_step; + int minus_step; +} +CvLineIterator; + + + +/************************************* CvSlice ******************************************/ + +typedef struct CvSlice +{ + int start_index, end_index; +} +CvSlice; + +CV_INLINE CvSlice cvSlice( int start, int end ) +{ + CvSlice slice; + slice.start_index = start; + slice.end_index = end; + + return slice; +} + +#define CV_WHOLE_SEQ_END_INDEX 0x3fffffff +#define CV_WHOLE_SEQ cvSlice(0, CV_WHOLE_SEQ_END_INDEX) + + +/************************************* CvScalar *****************************************/ + +typedef struct CvScalar +{ + double val[4]; +} +CvScalar; + +CV_INLINE CvScalar cvScalar( double val0, double val1 CV_DEFAULT(0), + double val2 CV_DEFAULT(0), double val3 CV_DEFAULT(0)) +{ + CvScalar scalar; + scalar.val[0] = val0; scalar.val[1] = val1; + scalar.val[2] = val2; scalar.val[3] = val3; + return scalar; +} + + +CV_INLINE CvScalar cvRealScalar( double val0 ) +{ + CvScalar scalar; + scalar.val[0] = val0; + scalar.val[1] = scalar.val[2] = scalar.val[3] = 0; + return scalar; +} + +CV_INLINE CvScalar cvScalarAll( double val0123 ) +{ + CvScalar scalar; + scalar.val[0] = val0123; + scalar.val[1] = val0123; + scalar.val[2] = val0123; + scalar.val[3] = val0123; + return scalar; +} + +/****************************************************************************************\ +* Dynamic Data structures * +\****************************************************************************************/ + +/******************************** Memory storage ****************************************/ + +typedef struct CvMemBlock +{ + struct CvMemBlock* prev; + struct CvMemBlock* next; +} +CvMemBlock; + +#define CV_STORAGE_MAGIC_VAL 0x42890000 + +typedef struct CvMemStorage +{ + int signature; + CvMemBlock* bottom; /* First allocated block. */ + CvMemBlock* top; /* Current memory block - top of the stack. */ + struct CvMemStorage* parent; /* We get new blocks from parent as needed. */ + int block_size; /* Block size. */ + int free_space; /* Remaining free space in current block. */ +} +CvMemStorage; + +#define CV_IS_STORAGE(storage) \ + ((storage) != NULL && \ + (((CvMemStorage*)(storage))->signature & CV_MAGIC_MASK) == CV_STORAGE_MAGIC_VAL) + + +typedef struct CvMemStoragePos +{ + CvMemBlock* top; + int free_space; +} +CvMemStoragePos; + + +/*********************************** Sequence *******************************************/ + +typedef struct CvSeqBlock +{ + struct CvSeqBlock* prev; /* Previous sequence block. */ + struct CvSeqBlock* next; /* Next sequence block. */ + int start_index; /* Index of the first element in the block + */ + /* sequence->first->start_index. */ + int count; /* Number of elements in the block. */ + schar* data; /* Pointer to the first element of the block. */ +} +CvSeqBlock; + + +#define CV_TREE_NODE_FIELDS(node_type) \ + int flags; /* Miscellaneous flags. */ \ + int header_size; /* Size of sequence header. */ \ + struct node_type* h_prev; /* Previous sequence. */ \ + struct node_type* h_next; /* Next sequence. */ \ + struct node_type* v_prev; /* 2nd previous sequence. */ \ + struct node_type* v_next /* 2nd next sequence. */ + +/* + Read/Write sequence. + Elements can be dynamically inserted to or deleted from the sequence. +*/ +#define CV_SEQUENCE_FIELDS() \ + CV_TREE_NODE_FIELDS(CvSeq); \ + int total; /* Total number of elements. */ \ + int elem_size; /* Size of sequence element in bytes. */ \ + schar* block_max; /* Maximal bound of the last block. */ \ + schar* ptr; /* Current write pointer. */ \ + int delta_elems; /* Grow seq this many at a time. */ \ + CvMemStorage* storage; /* Where the seq is stored. */ \ + CvSeqBlock* free_blocks; /* Free blocks list. */ \ + CvSeqBlock* first; /* Pointer to the first sequence block. */ + +typedef struct CvSeq +{ + CV_SEQUENCE_FIELDS() +} +CvSeq; + +#define CV_TYPE_NAME_SEQ "opencv-sequence" +#define CV_TYPE_NAME_SEQ_TREE "opencv-sequence-tree" + +/*************************************** Set ********************************************/ +/* + Set. + Order is not preserved. There can be gaps between sequence elements. + After the element has been inserted it stays in the same place all the time. + The MSB(most-significant or sign bit) of the first field (flags) is 0 iff the element exists. +*/ +#define CV_SET_ELEM_FIELDS(elem_type) \ + int flags; \ + struct elem_type* next_free; + +typedef struct CvSetElem +{ + CV_SET_ELEM_FIELDS(CvSetElem) +} +CvSetElem; + +#define CV_SET_FIELDS() \ + CV_SEQUENCE_FIELDS() \ + CvSetElem* free_elems; \ + int active_count; + +typedef struct CvSet +{ + CV_SET_FIELDS() +} +CvSet; + + +#define CV_SET_ELEM_IDX_MASK ((1 << 26) - 1) +#define CV_SET_ELEM_FREE_FLAG (1 << (sizeof(int)*8-1)) + +/* Checks whether the element pointed by ptr belongs to a set or not */ +#define CV_IS_SET_ELEM( ptr ) (((CvSetElem*)(ptr))->flags >= 0) + +/************************************* Graph ********************************************/ + +/* + We represent a graph as a set of vertices. + Vertices contain their adjacency lists (more exactly, pointers to first incoming or + outcoming edge (or 0 if isolated vertex)). Edges are stored in another set. + There is a singly-linked list of incoming/outcoming edges for each vertex. + + Each edge consists of + + o Two pointers to the starting and ending vertices + (vtx[0] and vtx[1] respectively). + + A graph may be oriented or not. In the latter case, edges between + vertex i to vertex j are not distinguished during search operations. + + o Two pointers to next edges for the starting and ending vertices, where + next[0] points to the next edge in the vtx[0] adjacency list and + next[1] points to the next edge in the vtx[1] adjacency list. +*/ +#define CV_GRAPH_EDGE_FIELDS() \ + int flags; \ + float weight; \ + struct CvGraphEdge* next[2]; \ + struct CvGraphVtx* vtx[2]; + + +#define CV_GRAPH_VERTEX_FIELDS() \ + int flags; \ + struct CvGraphEdge* first; + + +typedef struct CvGraphEdge +{ + CV_GRAPH_EDGE_FIELDS() +} +CvGraphEdge; + +typedef struct CvGraphVtx +{ + CV_GRAPH_VERTEX_FIELDS() +} +CvGraphVtx; + +typedef struct CvGraphVtx2D +{ + CV_GRAPH_VERTEX_FIELDS() + CvPoint2D32f* ptr; +} +CvGraphVtx2D; + +/* + Graph is "derived" from the set (this is set a of vertices) + and includes another set (edges) +*/ +#define CV_GRAPH_FIELDS() \ + CV_SET_FIELDS() \ + CvSet* edges; + +typedef struct CvGraph +{ + CV_GRAPH_FIELDS() +} +CvGraph; + +#define CV_TYPE_NAME_GRAPH "opencv-graph" + +/*********************************** Chain/Countour *************************************/ + +typedef struct CvChain +{ + CV_SEQUENCE_FIELDS() + CvPoint origin; +} +CvChain; + +#define CV_CONTOUR_FIELDS() \ + CV_SEQUENCE_FIELDS() \ + CvRect rect; \ + int color; \ + int reserved[3]; + +typedef struct CvContour +{ + CV_CONTOUR_FIELDS() +} +CvContour; + +typedef CvContour CvPoint2DSeq; + +/****************************************************************************************\ +* Sequence types * +\****************************************************************************************/ + +#define CV_SEQ_MAGIC_VAL 0x42990000 + +#define CV_IS_SEQ(seq) \ + ((seq) != NULL && (((CvSeq*)(seq))->flags & CV_MAGIC_MASK) == CV_SEQ_MAGIC_VAL) + +#define CV_SET_MAGIC_VAL 0x42980000 +#define CV_IS_SET(set) \ + ((set) != NULL && (((CvSeq*)(set))->flags & CV_MAGIC_MASK) == CV_SET_MAGIC_VAL) + +#define CV_SEQ_ELTYPE_BITS 12 +#define CV_SEQ_ELTYPE_MASK ((1 << CV_SEQ_ELTYPE_BITS) - 1) + +#define CV_SEQ_ELTYPE_POINT CV_32SC2 /* (x,y) */ +#define CV_SEQ_ELTYPE_CODE CV_8UC1 /* freeman code: 0..7 */ +#define CV_SEQ_ELTYPE_GENERIC 0 +#define CV_SEQ_ELTYPE_PTR CV_USRTYPE1 +#define CV_SEQ_ELTYPE_PPOINT CV_SEQ_ELTYPE_PTR /* &(x,y) */ +#define CV_SEQ_ELTYPE_INDEX CV_32SC1 /* #(x,y) */ +#define CV_SEQ_ELTYPE_GRAPH_EDGE 0 /* &next_o, &next_d, &vtx_o, &vtx_d */ +#define CV_SEQ_ELTYPE_GRAPH_VERTEX 0 /* first_edge, &(x,y) */ +#define CV_SEQ_ELTYPE_TRIAN_ATR 0 /* vertex of the binary tree */ +#define CV_SEQ_ELTYPE_CONNECTED_COMP 0 /* connected component */ +#define CV_SEQ_ELTYPE_POINT3D CV_32FC3 /* (x,y,z) */ + +#define CV_SEQ_KIND_BITS 2 +#define CV_SEQ_KIND_MASK (((1 << CV_SEQ_KIND_BITS) - 1)<flags & CV_SEQ_ELTYPE_MASK) +#define CV_SEQ_KIND( seq ) ((seq)->flags & CV_SEQ_KIND_MASK ) + +/* flag checking */ +#define CV_IS_SEQ_INDEX( seq ) ((CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_INDEX) && \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_GENERIC)) + +#define CV_IS_SEQ_CURVE( seq ) (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE) +#define CV_IS_SEQ_CLOSED( seq ) (((seq)->flags & CV_SEQ_FLAG_CLOSED) != 0) +#define CV_IS_SEQ_CONVEX( seq ) 0 +#define CV_IS_SEQ_HOLE( seq ) (((seq)->flags & CV_SEQ_FLAG_HOLE) != 0) +#define CV_IS_SEQ_SIMPLE( seq ) 1 + +/* type checking macros */ +#define CV_IS_SEQ_POINT_SET( seq ) \ + ((CV_SEQ_ELTYPE(seq) == CV_32SC2 || CV_SEQ_ELTYPE(seq) == CV_32FC2)) + +#define CV_IS_SEQ_POINT_SUBSET( seq ) \ + (CV_IS_SEQ_INDEX( seq ) || CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_PPOINT) + +#define CV_IS_SEQ_POLYLINE( seq ) \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && CV_IS_SEQ_POINT_SET(seq)) + +#define CV_IS_SEQ_POLYGON( seq ) \ + (CV_IS_SEQ_POLYLINE(seq) && CV_IS_SEQ_CLOSED(seq)) + +#define CV_IS_SEQ_CHAIN( seq ) \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && (seq)->elem_size == 1) + +#define CV_IS_SEQ_CONTOUR( seq ) \ + (CV_IS_SEQ_CLOSED(seq) && (CV_IS_SEQ_POLYLINE(seq) || CV_IS_SEQ_CHAIN(seq))) + +#define CV_IS_SEQ_CHAIN_CONTOUR( seq ) \ + (CV_IS_SEQ_CHAIN( seq ) && CV_IS_SEQ_CLOSED( seq )) + +#define CV_IS_SEQ_POLYGON_TREE( seq ) \ + (CV_SEQ_ELTYPE (seq) == CV_SEQ_ELTYPE_TRIAN_ATR && \ + CV_SEQ_KIND( seq ) == CV_SEQ_KIND_BIN_TREE ) + +#define CV_IS_GRAPH( seq ) \ + (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_GRAPH) + +#define CV_IS_GRAPH_ORIENTED( seq ) \ + (((seq)->flags & CV_GRAPH_FLAG_ORIENTED) != 0) + +#define CV_IS_SUBDIV2D( seq ) \ + (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_SUBDIV2D) + +/****************************************************************************************/ +/* Sequence writer & reader */ +/****************************************************************************************/ + +#define CV_SEQ_WRITER_FIELDS() \ + int header_size; \ + CvSeq* seq; /* the sequence written */ \ + CvSeqBlock* block; /* current block */ \ + schar* ptr; /* pointer to free space */ \ + schar* block_min; /* pointer to the beginning of block*/\ + schar* block_max; /* pointer to the end of block */ + +typedef struct CvSeqWriter +{ + CV_SEQ_WRITER_FIELDS() +} +CvSeqWriter; + + +#define CV_SEQ_READER_FIELDS() \ + int header_size; \ + CvSeq* seq; /* sequence, beign read */ \ + CvSeqBlock* block; /* current block */ \ + schar* ptr; /* pointer to element be read next */ \ + schar* block_min; /* pointer to the beginning of block */\ + schar* block_max; /* pointer to the end of block */ \ + int delta_index;/* = seq->first->start_index */ \ + schar* prev_elem; /* pointer to previous element */ + + +typedef struct CvSeqReader +{ + CV_SEQ_READER_FIELDS() +} +CvSeqReader; + +/****************************************************************************************/ +/* Operations on sequences */ +/****************************************************************************************/ + +#define CV_SEQ_ELEM( seq, elem_type, index ) \ +/* assert gives some guarantee that parameter is valid */ \ +( assert(sizeof((seq)->first[0]) == sizeof(CvSeqBlock) && \ + (seq)->elem_size == sizeof(elem_type)), \ + (elem_type*)((seq)->first && (unsigned)index < \ + (unsigned)((seq)->first->count) ? \ + (seq)->first->data + (index) * sizeof(elem_type) : \ + cvGetSeqElem( (CvSeq*)(seq), (index) ))) +#define CV_GET_SEQ_ELEM( elem_type, seq, index ) CV_SEQ_ELEM( (seq), elem_type, (index) ) + +/* Add element to sequence: */ +#define CV_WRITE_SEQ_ELEM_VAR( elem_ptr, writer ) \ +{ \ + if( (writer).ptr >= (writer).block_max ) \ + { \ + cvCreateSeqBlock( &writer); \ + } \ + memcpy((writer).ptr, elem_ptr, (writer).seq->elem_size);\ + (writer).ptr += (writer).seq->elem_size; \ +} + +#define CV_WRITE_SEQ_ELEM( elem, writer ) \ +{ \ + assert( (writer).seq->elem_size == sizeof(elem)); \ + if( (writer).ptr >= (writer).block_max ) \ + { \ + cvCreateSeqBlock( &writer); \ + } \ + assert( (writer).ptr <= (writer).block_max - sizeof(elem));\ + memcpy((writer).ptr, &(elem), sizeof(elem)); \ + (writer).ptr += sizeof(elem); \ +} + + +/* Move reader position forward: */ +#define CV_NEXT_SEQ_ELEM( elem_size, reader ) \ +{ \ + if( ((reader).ptr += (elem_size)) >= (reader).block_max ) \ + { \ + cvChangeSeqBlock( &(reader), 1 ); \ + } \ +} + + +/* Move reader position backward: */ +#define CV_PREV_SEQ_ELEM( elem_size, reader ) \ +{ \ + if( ((reader).ptr -= (elem_size)) < (reader).block_min ) \ + { \ + cvChangeSeqBlock( &(reader), -1 ); \ + } \ +} + +/* Read element and move read position forward: */ +#define CV_READ_SEQ_ELEM( elem, reader ) \ +{ \ + assert( (reader).seq->elem_size == sizeof(elem)); \ + memcpy( &(elem), (reader).ptr, sizeof((elem))); \ + CV_NEXT_SEQ_ELEM( sizeof(elem), reader ) \ +} + +/* Read element and move read position backward: */ +#define CV_REV_READ_SEQ_ELEM( elem, reader ) \ +{ \ + assert( (reader).seq->elem_size == sizeof(elem)); \ + memcpy(&(elem), (reader).ptr, sizeof((elem))); \ + CV_PREV_SEQ_ELEM( sizeof(elem), reader ) \ +} + + +#define CV_READ_CHAIN_POINT( _pt, reader ) \ +{ \ + (_pt) = (reader).pt; \ + if( (reader).ptr ) \ + { \ + CV_READ_SEQ_ELEM( (reader).code, (reader)); \ + assert( ((reader).code & ~7) == 0 ); \ + (reader).pt.x += (reader).deltas[(int)(reader).code][0]; \ + (reader).pt.y += (reader).deltas[(int)(reader).code][1]; \ + } \ +} + +#define CV_CURRENT_POINT( reader ) (*((CvPoint*)((reader).ptr))) +#define CV_PREV_POINT( reader ) (*((CvPoint*)((reader).prev_elem))) + +#define CV_READ_EDGE( pt1, pt2, reader ) \ +{ \ + assert( sizeof(pt1) == sizeof(CvPoint) && \ + sizeof(pt2) == sizeof(CvPoint) && \ + reader.seq->elem_size == sizeof(CvPoint)); \ + (pt1) = CV_PREV_POINT( reader ); \ + (pt2) = CV_CURRENT_POINT( reader ); \ + (reader).prev_elem = (reader).ptr; \ + CV_NEXT_SEQ_ELEM( sizeof(CvPoint), (reader)); \ +} + +/************ Graph macros ************/ + +/* Return next graph edge for given vertex: */ +#define CV_NEXT_GRAPH_EDGE( edge, vertex ) \ + (assert((edge)->vtx[0] == (vertex) || (edge)->vtx[1] == (vertex)), \ + (edge)->next[(edge)->vtx[1] == (vertex)]) + + + +/****************************************************************************************\ +* Data structures for persistence (a.k.a serialization) functionality * +\****************************************************************************************/ + +/* "black box" file storage */ +typedef struct CvFileStorage CvFileStorage; + +/* Storage flags: */ +#define CV_STORAGE_READ 0 +#define CV_STORAGE_WRITE 1 +#define CV_STORAGE_WRITE_TEXT CV_STORAGE_WRITE +#define CV_STORAGE_WRITE_BINARY CV_STORAGE_WRITE +#define CV_STORAGE_APPEND 2 + +/* List of attributes: */ +typedef struct CvAttrList +{ + const char** attr; /* NULL-terminated array of (attribute_name,attribute_value) pairs. */ + struct CvAttrList* next; /* Pointer to next chunk of the attributes list. */ +} +CvAttrList; + +CV_INLINE CvAttrList cvAttrList( const char** attr CV_DEFAULT(NULL), + CvAttrList* next CV_DEFAULT(NULL) ) +{ + CvAttrList l; + l.attr = attr; + l.next = next; + + return l; +} + +struct CvTypeInfo; + +#define CV_NODE_NONE 0 +#define CV_NODE_INT 1 +#define CV_NODE_INTEGER CV_NODE_INT +#define CV_NODE_REAL 2 +#define CV_NODE_FLOAT CV_NODE_REAL +#define CV_NODE_STR 3 +#define CV_NODE_STRING CV_NODE_STR +#define CV_NODE_REF 4 /* not used */ +#define CV_NODE_SEQ 5 +#define CV_NODE_MAP 6 +#define CV_NODE_TYPE_MASK 7 + +#define CV_NODE_TYPE(flags) ((flags) & CV_NODE_TYPE_MASK) + +/* file node flags */ +#define CV_NODE_FLOW 8 /* Used only for writing structures in YAML format. */ +#define CV_NODE_USER 16 +#define CV_NODE_EMPTY 32 +#define CV_NODE_NAMED 64 + +#define CV_NODE_IS_INT(flags) (CV_NODE_TYPE(flags) == CV_NODE_INT) +#define CV_NODE_IS_REAL(flags) (CV_NODE_TYPE(flags) == CV_NODE_REAL) +#define CV_NODE_IS_STRING(flags) (CV_NODE_TYPE(flags) == CV_NODE_STRING) +#define CV_NODE_IS_SEQ(flags) (CV_NODE_TYPE(flags) == CV_NODE_SEQ) +#define CV_NODE_IS_MAP(flags) (CV_NODE_TYPE(flags) == CV_NODE_MAP) +#define CV_NODE_IS_COLLECTION(flags) (CV_NODE_TYPE(flags) >= CV_NODE_SEQ) +#define CV_NODE_IS_FLOW(flags) (((flags) & CV_NODE_FLOW) != 0) +#define CV_NODE_IS_EMPTY(flags) (((flags) & CV_NODE_EMPTY) != 0) +#define CV_NODE_IS_USER(flags) (((flags) & CV_NODE_USER) != 0) +#define CV_NODE_HAS_NAME(flags) (((flags) & CV_NODE_NAMED) != 0) + +#define CV_NODE_SEQ_SIMPLE 256 +#define CV_NODE_SEQ_IS_SIMPLE(seq) (((seq)->flags & CV_NODE_SEQ_SIMPLE) != 0) + +typedef struct CvString +{ + int len; + char* ptr; +} +CvString; + +/* All the keys (names) of elements in the readed file storage + are stored in the hash to speed up the lookup operations: */ +typedef struct CvStringHashNode +{ + unsigned hashval; + CvString str; + struct CvStringHashNode* next; +} +CvStringHashNode; + +typedef struct CvGenericHash CvFileNodeHash; + +/* Basic element of the file storage - scalar or collection: */ +typedef struct CvFileNode +{ + int tag; + struct CvTypeInfo* info; /* type information + (only for user-defined object, for others it is 0) */ + union + { + double f; /* scalar floating-point number */ + int i; /* scalar integer number */ + CvString str; /* text string */ + CvSeq* seq; /* sequence (ordered collection of file nodes) */ + CvFileNodeHash* map; /* map (collection of named file nodes) */ + } data; +} +CvFileNode; + +#ifdef __cplusplus +extern "C" { +#endif +typedef int (CV_CDECL *CvIsInstanceFunc)( const void* struct_ptr ); +typedef void (CV_CDECL *CvReleaseFunc)( void** struct_dblptr ); +typedef void* (CV_CDECL *CvReadFunc)( CvFileStorage* storage, CvFileNode* node ); +typedef void (CV_CDECL *CvWriteFunc)( CvFileStorage* storage, const char* name, + const void* struct_ptr, CvAttrList attributes ); +typedef void* (CV_CDECL *CvCloneFunc)( const void* struct_ptr ); +#ifdef __cplusplus +} +#endif + +typedef struct CvTypeInfo +{ + int flags; + int header_size; + struct CvTypeInfo* prev; + struct CvTypeInfo* next; + const char* type_name; + CvIsInstanceFunc is_instance; + CvReleaseFunc release; + CvReadFunc read; + CvWriteFunc write; + CvCloneFunc clone; +} +CvTypeInfo; + + +/**** System data types ******/ + +typedef struct CvPluginFuncInfo +{ + void** func_addr; + void* default_func_addr; + const char* func_names; + int search_modules; + int loaded_from; +} +CvPluginFuncInfo; + +typedef struct CvModuleInfo +{ + struct CvModuleInfo* next; + const char* name; + const char* version; + CvPluginFuncInfo* func_tab; +} +CvModuleInfo; + +enum { CV_PARAM_TYPE_INT=0, CV_PARAM_TYPE_REAL=1, CV_PARAM_TYPE_STRING=2, CV_PARAM_TYPE_MAT=3 }; + +#endif /*_CXCORE_TYPES_H_*/ + +/* End of file. */ diff --git a/include/opencv2/core/version.hpp b/include/opencv2/core/version.hpp new file mode 100644 index 0000000..0cd25ca --- /dev/null +++ b/include/opencv2/core/version.hpp @@ -0,0 +1,58 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright( C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +//(including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort(including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* + definition of the current version of OpenCV + Usefull to test in user programs +*/ + +#ifndef __OPENCV_VERSION_HPP__ +#define __OPENCV_VERSION_HPP__ + +#define CV_MAJOR_VERSION 2 +#define CV_MINOR_VERSION 3 +#define CV_SUBMINOR_VERSION 1 + +#define CVAUX_STR_EXP(__A) #__A +#define CVAUX_STR(__A) CVAUX_STR_EXP(__A) +#define CV_VERSION CVAUX_STR(CV_MAJOR_VERSION) "." CVAUX_STR(CV_MINOR_VERSION) "." CVAUX_STR(CV_SUBMINOR_VERSION) + +#endif diff --git a/include/opencv2/core/wimage.hpp b/include/opencv2/core/wimage.hpp new file mode 100644 index 0000000..579c009 --- /dev/null +++ b/include/opencv2/core/wimage.hpp @@ -0,0 +1,621 @@ +/////////////////////////////////////////////////////////////////////////////// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to +// this license. If you do not agree to this license, do not download, +// install, copy or use the software. +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2008, Google, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation or contributors may not be used to endorse +// or promote products derived from this software without specific +// prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" +// and any express or implied warranties, including, but not limited to, the +// implied warranties of merchantability and fitness for a particular purpose +// are disclaimed. In no event shall the Intel Corporation or contributors be +// liable for any direct, indirect, incidental, special, exemplary, or +// consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. + + +///////////////////////////////////////////////////////////////////////////////// +// +// Image class which provides a thin layer around an IplImage. The goals +// of the class design are: +// 1. All the data has explicit ownership to avoid memory leaks +// 2. No hidden allocations or copies for performance. +// 3. Easy access to OpenCV methods (which will access IPP if available) +// 4. Can easily treat external data as an image +// 5. Easy to create images which are subsets of other images +// 6. Fast pixel access which can take advantage of number of channels +// if known at compile time. +// +// The WImage class is the image class which provides the data accessors. +// The 'W' comes from the fact that it is also a wrapper around the popular +// but inconvenient IplImage class. A WImage can be constructed either using a +// WImageBuffer class which allocates and frees the data, +// or using a WImageView class which constructs a subimage or a view into +// external data. The view class does no memory management. Each class +// actually has two versions, one when the number of channels is known at +// compile time and one when it isn't. Using the one with the number of +// channels specified can provide some compile time optimizations by using the +// fact that the number of channels is a constant. +// +// We use the convention (c,r) to refer to column c and row r with (0,0) being +// the upper left corner. This is similar to standard Euclidean coordinates +// with the first coordinate varying in the horizontal direction and the second +// coordinate varying in the vertical direction. +// Thus (c,r) is usually in the domain [0, width) X [0, height) +// +// Example usage: +// WImageBuffer3_b im(5,7); // Make a 5X7 3 channel image of type uchar +// WImageView3_b sub_im(im, 2,2, 3,3); // 3X3 submatrix +// vector vec(10, 3.0f); +// WImageView1_f user_im(&vec[0], 2, 5); // 2X5 image w/ supplied data +// +// im.SetZero(); // same as cvSetZero(im.Ipl()) +// *im(2, 3) = 15; // Modify the element at column 2, row 3 +// MySetRand(&sub_im); +// +// // Copy the second row into the first. This can be done with no memory +// // allocation and will use SSE if IPP is available. +// int w = im.Width(); +// im.View(0,0, w,1).CopyFrom(im.View(0,1, w,1)); +// +// // Doesn't care about source of data since using WImage +// void MySetRand(WImage_b* im) { // Works with any number of channels +// for (int r = 0; r < im->Height(); ++r) { +// float* row = im->Row(r); +// for (int c = 0; c < im->Width(); ++c) { +// for (int ch = 0; ch < im->Channels(); ++ch, ++row) { +// *row = uchar(rand() & 255); +// } +// } +// } +// } +// +// Functions that are not part of the basic image allocation, viewing, and +// access should come from OpenCV, except some useful functions that are not +// part of OpenCV can be found in wimage_util.h +#ifndef __OPENCV_CORE_WIMAGE_HPP__ +#define __OPENCV_CORE_WIMAGE_HPP__ + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus + +namespace cv { + +template class WImage; +template class WImageBuffer; +template class WImageView; + +template class WImageC; +template class WImageBufferC; +template class WImageViewC; + +// Commonly used typedefs. +typedef WImage WImage_b; +typedef WImageView WImageView_b; +typedef WImageBuffer WImageBuffer_b; + +typedef WImageC WImage1_b; +typedef WImageViewC WImageView1_b; +typedef WImageBufferC WImageBuffer1_b; + +typedef WImageC WImage3_b; +typedef WImageViewC WImageView3_b; +typedef WImageBufferC WImageBuffer3_b; + +typedef WImage WImage_f; +typedef WImageView WImageView_f; +typedef WImageBuffer WImageBuffer_f; + +typedef WImageC WImage1_f; +typedef WImageViewC WImageView1_f; +typedef WImageBufferC WImageBuffer1_f; + +typedef WImageC WImage3_f; +typedef WImageViewC WImageView3_f; +typedef WImageBufferC WImageBuffer3_f; + +// There isn't a standard for signed and unsigned short so be more +// explicit in the typename for these cases. +typedef WImage WImage_16s; +typedef WImageView WImageView_16s; +typedef WImageBuffer WImageBuffer_16s; + +typedef WImageC WImage1_16s; +typedef WImageViewC WImageView1_16s; +typedef WImageBufferC WImageBuffer1_16s; + +typedef WImageC WImage3_16s; +typedef WImageViewC WImageView3_16s; +typedef WImageBufferC WImageBuffer3_16s; + +typedef WImage WImage_16u; +typedef WImageView WImageView_16u; +typedef WImageBuffer WImageBuffer_16u; + +typedef WImageC WImage1_16u; +typedef WImageViewC WImageView1_16u; +typedef WImageBufferC WImageBuffer1_16u; + +typedef WImageC WImage3_16u; +typedef WImageViewC WImageView3_16u; +typedef WImageBufferC WImageBuffer3_16u; + +// +// WImage definitions +// +// This WImage class gives access to the data it refers to. It can be +// constructed either by allocating the data with a WImageBuffer class or +// using the WImageView class to refer to a subimage or outside data. +template +class WImage +{ +public: + typedef T BaseType; + + // WImage is an abstract class with no other virtual methods so make the + // destructor virtual. + virtual ~WImage() = 0; + + // Accessors + IplImage* Ipl() {return image_; } + const IplImage* Ipl() const {return image_; } + T* ImageData() { return reinterpret_cast(image_->imageData); } + const T* ImageData() const { + return reinterpret_cast(image_->imageData); + } + + int Width() const {return image_->width; } + int Height() const {return image_->height; } + + // WidthStep is the number of bytes to go to the pixel with the next y coord + int WidthStep() const {return image_->widthStep; } + + int Channels() const {return image_->nChannels; } + int ChannelSize() const {return sizeof(T); } // number of bytes per channel + + // Number of bytes per pixel + int PixelSize() const {return Channels() * ChannelSize(); } + + // Return depth type (e.g. IPL_DEPTH_8U, IPL_DEPTH_32F) which is the number + // of bits per channel and with the signed bit set. + // This is known at compile time using specializations. + int Depth() const; + + inline const T* Row(int r) const { + return reinterpret_cast(image_->imageData + r*image_->widthStep); + } + + inline T* Row(int r) { + return reinterpret_cast(image_->imageData + r*image_->widthStep); + } + + // Pixel accessors which returns a pointer to the start of the channel + inline T* operator() (int c, int r) { + return reinterpret_cast(image_->imageData + r*image_->widthStep) + + c*Channels(); + } + + inline const T* operator() (int c, int r) const { + return reinterpret_cast(image_->imageData + r*image_->widthStep) + + c*Channels(); + } + + // Copy the contents from another image which is just a convenience to cvCopy + void CopyFrom(const WImage& src) { cvCopy(src.Ipl(), image_); } + + // Set contents to zero which is just a convenient to cvSetZero + void SetZero() { cvSetZero(image_); } + + // Construct a view into a region of this image + WImageView View(int c, int r, int width, int height); + +protected: + // Disallow copy and assignment + WImage(const WImage&); + void operator=(const WImage&); + + explicit WImage(IplImage* img) : image_(img) { + assert(!img || img->depth == Depth()); + } + + void SetIpl(IplImage* image) { + assert(!image || image->depth == Depth()); + image_ = image; + } + + IplImage* image_; +}; + + + +// Image class when both the pixel type and number of channels +// are known at compile time. This wrapper will speed up some of the operations +// like accessing individual pixels using the () operator. +template +class WImageC : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + explicit WImageC(IplImage* img) : WImage(img) { + assert(!img || img->nChannels == Channels()); + } + + // Construct a view into a region of this image + WImageViewC View(int c, int r, int width, int height); + + // Copy the contents from another image which is just a convenience to cvCopy + void CopyFrom(const WImageC& src) { + cvCopy(src.Ipl(), WImage::image_); + } + + // WImageC is an abstract class with no other virtual methods so make the + // destructor virtual. + virtual ~WImageC() = 0; + + int Channels() const {return C; } + +protected: + // Disallow copy and assignment + WImageC(const WImageC&); + void operator=(const WImageC&); + + void SetIpl(IplImage* image) { + assert(!image || image->depth == WImage::Depth()); + WImage::SetIpl(image); + } +}; + +// +// WImageBuffer definitions +// +// Image class which owns the data, so it can be allocated and is always +// freed. It cannot be copied but can be explicity cloned. +// +template +class WImageBuffer : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + + // Default constructor which creates an object that can be + WImageBuffer() : WImage(0) {} + + WImageBuffer(int width, int height, int nchannels) : WImage(0) { + Allocate(width, height, nchannels); + } + + // Constructor which takes ownership of a given IplImage so releases + // the image on destruction. + explicit WImageBuffer(IplImage* img) : WImage(img) {} + + // Allocate an image. Does nothing if current size is the same as + // the new size. + void Allocate(int width, int height, int nchannels); + + // Set the data to point to an image, releasing the old data + void SetIpl(IplImage* img) { + ReleaseImage(); + WImage::SetIpl(img); + } + + // Clone an image which reallocates the image if of a different dimension. + void CloneFrom(const WImage& src) { + Allocate(src.Width(), src.Height(), src.Channels()); + CopyFrom(src); + } + + ~WImageBuffer() { + ReleaseImage(); + } + + // Release the image if it isn't null. + void ReleaseImage() { + if (WImage::image_) { + IplImage* image = WImage::image_; + cvReleaseImage(&image); + WImage::SetIpl(0); + } + } + + bool IsNull() const {return WImage::image_ == NULL; } + +private: + // Disallow copy and assignment + WImageBuffer(const WImageBuffer&); + void operator=(const WImageBuffer&); +}; + +// Like a WImageBuffer class but when the number of channels is known +// at compile time. +template +class WImageBufferC : public WImageC +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + // Default constructor which creates an object that can be + WImageBufferC() : WImageC(0) {} + + WImageBufferC(int width, int height) : WImageC(0) { + Allocate(width, height); + } + + // Constructor which takes ownership of a given IplImage so releases + // the image on destruction. + explicit WImageBufferC(IplImage* img) : WImageC(img) {} + + // Allocate an image. Does nothing if current size is the same as + // the new size. + void Allocate(int width, int height); + + // Set the data to point to an image, releasing the old data + void SetIpl(IplImage* img) { + ReleaseImage(); + WImageC::SetIpl(img); + } + + // Clone an image which reallocates the image if of a different dimension. + void CloneFrom(const WImageC& src) { + Allocate(src.Width(), src.Height()); + CopyFrom(src); + } + + ~WImageBufferC() { + ReleaseImage(); + } + + // Release the image if it isn't null. + void ReleaseImage() { + if (WImage::image_) { + IplImage* image = WImage::image_; + cvReleaseImage(&image); + WImageC::SetIpl(0); + } + } + + bool IsNull() const {return WImage::image_ == NULL; } + +private: + // Disallow copy and assignment + WImageBufferC(const WImageBufferC&); + void operator=(const WImageBufferC&); +}; + +// +// WImageView definitions +// +// View into an image class which allows treating a subimage as an image +// or treating external data as an image +// +template +class WImageView : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + + // Construct a subimage. No checks are done that the subimage lies + // completely inside the original image. + WImageView(WImage* img, int c, int r, int width, int height); + + // Refer to external data. + // If not given width_step assumed to be same as width. + WImageView(T* data, int width, int height, int channels, int width_step = -1); + + // Refer to external data. This does NOT take ownership + // of the supplied IplImage. + WImageView(IplImage* img) : WImage(img) {} + + // Copy constructor + WImageView(const WImage& img) : WImage(0) { + header_ = *(img.Ipl()); + WImage::SetIpl(&header_); + } + + WImageView& operator=(const WImage& img) { + header_ = *(img.Ipl()); + WImage::SetIpl(&header_); + return *this; + } + +protected: + IplImage header_; +}; + + +template +class WImageViewC : public WImageC +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + // Default constructor needed for vectors of views. + WImageViewC(); + + virtual ~WImageViewC() {} + + // Construct a subimage. No checks are done that the subimage lies + // completely inside the original image. + WImageViewC(WImageC* img, + int c, int r, int width, int height); + + // Refer to external data + WImageViewC(T* data, int width, int height, int width_step = -1); + + // Refer to external data. This does NOT take ownership + // of the supplied IplImage. + WImageViewC(IplImage* img) : WImageC(img) {} + + // Copy constructor which does a shallow copy to allow multiple views + // of same data. gcc-4.1.1 gets confused if both versions of + // the constructor and assignment operator are not provided. + WImageViewC(const WImageC& img) : WImageC(0) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + } + WImageViewC(const WImageViewC& img) : WImageC(0) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + } + + WImageViewC& operator=(const WImageC& img) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + return *this; + } + WImageViewC& operator=(const WImageViewC& img) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + return *this; + } + +protected: + IplImage header_; +}; + + +// Specializations for depth +template<> +inline int WImage::Depth() const {return IPL_DEPTH_8U; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_8S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_16S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_16U; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_32S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_32F; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_64F; } + +// +// Pure virtual destructors still need to be defined. +// +template inline WImage::~WImage() {} +template inline WImageC::~WImageC() {} + +// +// Allocate ImageData +// +template +inline void WImageBuffer::Allocate(int width, int height, int nchannels) +{ + if (IsNull() || WImage::Width() != width || + WImage::Height() != height || WImage::Channels() != nchannels) { + ReleaseImage(); + WImage::image_ = cvCreateImage(cvSize(width, height), + WImage::Depth(), nchannels); + } +} + +template +inline void WImageBufferC::Allocate(int width, int height) +{ + if (IsNull() || WImage::Width() != width || WImage::Height() != height) { + ReleaseImage(); + WImageC::SetIpl(cvCreateImage(cvSize(width, height),WImage::Depth(), C)); + } +} + +// +// ImageView methods +// +template +WImageView::WImageView(WImage* img, int c, int r, int width, int height) + : WImage(0) +{ + header_ = *(img->Ipl()); + header_.imageData = reinterpret_cast((*img)(c, r)); + header_.width = width; + header_.height = height; + WImage::SetIpl(&header_); +} + +template +WImageView::WImageView(T* data, int width, int height, int nchannels, int width_step) + : WImage(0) +{ + cvInitImageHeader(&header_, cvSize(width, height), WImage::Depth(), nchannels); + header_.imageData = reinterpret_cast(data); + if (width_step > 0) { + header_.widthStep = width_step; + } + WImage::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC(WImageC* img, int c, int r, int width, int height) + : WImageC(0) +{ + header_ = *(img->Ipl()); + header_.imageData = reinterpret_cast((*img)(c, r)); + header_.width = width; + header_.height = height; + WImageC::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC() : WImageC(0) { + cvInitImageHeader(&header_, cvSize(0, 0), WImage::Depth(), C); + header_.imageData = reinterpret_cast(0); + WImageC::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC(T* data, int width, int height, int width_step) + : WImageC(0) +{ + cvInitImageHeader(&header_, cvSize(width, height), WImage::Depth(), C); + header_.imageData = reinterpret_cast(data); + if (width_step > 0) { + header_.widthStep = width_step; + } + WImageC::SetIpl(&header_); +} + +// Construct a view into a region of an image +template +WImageView WImage::View(int c, int r, int width, int height) { + return WImageView(this, c, r, width, height); +} + +template +WImageViewC WImageC::View(int c, int r, int width, int height) { + return WImageViewC(this, c, r, width, height); +} + +} // end of namespace + +#endif // __cplusplus + +#endif diff --git a/include/opencv2/imgproc/imgproc.hpp b/include/opencv2/imgproc/imgproc.hpp new file mode 100644 index 0000000..bd79955 --- /dev/null +++ b/include/opencv2/imgproc/imgproc.hpp @@ -0,0 +1,1139 @@ +/*! \file imgproc.hpp + \brief The Image Processing + */ + +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_HPP__ +#define __OPENCV_IMGPROC_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/imgproc/types_c.h" + +#ifdef __cplusplus + +/*! \namespace cv + Namespace where all the C++ OpenCV functionality resides + */ +namespace cv +{ + +//! various border interpolation methods +enum { BORDER_REPLICATE=IPL_BORDER_REPLICATE, BORDER_CONSTANT=IPL_BORDER_CONSTANT, + BORDER_REFLECT=IPL_BORDER_REFLECT, BORDER_WRAP=IPL_BORDER_WRAP, + BORDER_REFLECT_101=IPL_BORDER_REFLECT_101, BORDER_REFLECT101=BORDER_REFLECT_101, + BORDER_TRANSPARENT=IPL_BORDER_TRANSPARENT, + BORDER_DEFAULT=BORDER_REFLECT_101, BORDER_ISOLATED=16 }; + +//! 1D interpolation function: returns coordinate of the "donor" pixel for the specified location p. +CV_EXPORTS_W int borderInterpolate( int p, int len, int borderType ); + +/*! + The Base Class for 1D or Row-wise Filters + + This is the base class for linear or non-linear filters that process 1D data. + In particular, such filters are used for the "horizontal" filtering parts in separable filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. +*/ +class CV_EXPORTS BaseRowFilter +{ +public: + //! the default constructor + BaseRowFilter(); + //! the destructor + virtual ~BaseRowFilter(); + //! the filtering operator. Must be overrided in the derived classes. The horizontal border interpolation is done outside of the class. + virtual void operator()(const uchar* src, uchar* dst, + int width, int cn) = 0; + int ksize, anchor; +}; + + +/*! + The Base Class for Column-wise Filters + + This is the base class for linear or non-linear filters that process columns of 2D arrays. + Such filters are used for the "vertical" filtering parts in separable filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. + + Unlike cv::BaseRowFilter, cv::BaseColumnFilter may have some context information, + i.e. box filter keeps the sliding sum of elements. To reset the state BaseColumnFilter::reset() + must be called (e.g. the method is called by cv::FilterEngine) + */ +class CV_EXPORTS BaseColumnFilter +{ +public: + //! the default constructor + BaseColumnFilter(); + //! the destructor + virtual ~BaseColumnFilter(); + //! the filtering operator. Must be overrided in the derived classes. The vertical border interpolation is done outside of the class. + virtual void operator()(const uchar** src, uchar* dst, int dststep, + int dstcount, int width) = 0; + //! resets the internal buffers, if any + virtual void reset(); + int ksize, anchor; +}; + +/*! + The Base Class for Non-Separable 2D Filters. + + This is the base class for linear or non-linear 2D filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. + + Similar to cv::BaseColumnFilter, the class may have some context information, + that should be reset using BaseFilter::reset() method before processing the new array. +*/ +class CV_EXPORTS BaseFilter +{ +public: + //! the default constructor + BaseFilter(); + //! the destructor + virtual ~BaseFilter(); + //! the filtering operator. The horizontal and the vertical border interpolation is done outside of the class. + virtual void operator()(const uchar** src, uchar* dst, int dststep, + int dstcount, int width, int cn) = 0; + //! resets the internal buffers, if any + virtual void reset(); + Size ksize; + Point anchor; +}; + +/*! + The Main Class for Image Filtering. + + The class can be used to apply an arbitrary filtering operation to an image. + It contains all the necessary intermediate buffers, it computes extrapolated values + of the "virtual" pixels outside of the image etc. + Pointers to the initialized cv::FilterEngine instances + are returned by various OpenCV functions, such as cv::createSeparableLinearFilter(), + cv::createLinearFilter(), cv::createGaussianFilter(), cv::createDerivFilter(), + cv::createBoxFilter() and cv::createMorphologyFilter(). + + Using the class you can process large images by parts and build complex pipelines + that include filtering as some of the stages. If all you need is to apply some pre-defined + filtering operation, you may use cv::filter2D(), cv::erode(), cv::dilate() etc. + functions that create FilterEngine internally. + + Here is the example on how to use the class to implement Laplacian operator, which is the sum of + second-order derivatives. More complex variant for different types is implemented in cv::Laplacian(). + + \code + void laplace_f(const Mat& src, Mat& dst) + { + CV_Assert( src.type() == CV_32F ); + // make sure the destination array has the proper size and type + dst.create(src.size(), src.type()); + + // get the derivative and smooth kernels for d2I/dx2. + // for d2I/dy2 we could use the same kernels, just swapped + Mat kd, ks; + getSobelKernels( kd, ks, 2, 0, ksize, false, ktype ); + + // let's process 10 source rows at once + int DELTA = std::min(10, src.rows); + Ptr Fxx = createSeparableLinearFilter(src.type(), + dst.type(), kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() ); + Ptr Fyy = createSeparableLinearFilter(src.type(), + dst.type(), ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() ); + + int y = Fxx->start(src), dsty = 0, dy = 0; + Fyy->start(src); + const uchar* sptr = src.data + y*src.step; + + // allocate the buffers for the spatial image derivatives; + // the buffers need to have more than DELTA rows, because at the + // last iteration the output may take max(kd.rows-1,ks.rows-1) + // rows more than the input. + Mat Ixx( DELTA + kd.rows - 1, src.cols, dst.type() ); + Mat Iyy( DELTA + kd.rows - 1, src.cols, dst.type() ); + + // inside the loop we always pass DELTA rows to the filter + // (note that the "proceed" method takes care of possibe overflow, since + // it was given the actual image height in the "start" method) + // on output we can get: + // * < DELTA rows (the initial buffer accumulation stage) + // * = DELTA rows (settled state in the middle) + // * > DELTA rows (then the input image is over, but we generate + // "virtual" rows using the border mode and filter them) + // this variable number of output rows is dy. + // dsty is the current output row. + // sptr is the pointer to the first input row in the portion to process + for( ; dsty < dst.rows; sptr += DELTA*src.step, dsty += dy ) + { + Fxx->proceed( sptr, (int)src.step, DELTA, Ixx.data, (int)Ixx.step ); + dy = Fyy->proceed( sptr, (int)src.step, DELTA, d2y.data, (int)Iyy.step ); + if( dy > 0 ) + { + Mat dstripe = dst.rowRange(dsty, dsty + dy); + add(Ixx.rowRange(0, dy), Iyy.rowRange(0, dy), dstripe); + } + } + } + \endcode +*/ +class CV_EXPORTS FilterEngine +{ +public: + //! the default constructor + FilterEngine(); + //! the full constructor. Either _filter2D or both _rowFilter and _columnFilter must be non-empty. + FilterEngine(const Ptr& _filter2D, + const Ptr& _rowFilter, + const Ptr& _columnFilter, + int srcType, int dstType, int bufType, + int _rowBorderType=BORDER_REPLICATE, + int _columnBorderType=-1, + const Scalar& _borderValue=Scalar()); + //! the destructor + virtual ~FilterEngine(); + //! reinitializes the engine. The previously assigned filters are released. + void init(const Ptr& _filter2D, + const Ptr& _rowFilter, + const Ptr& _columnFilter, + int srcType, int dstType, int bufType, + int _rowBorderType=BORDER_REPLICATE, int _columnBorderType=-1, + const Scalar& _borderValue=Scalar()); + //! starts filtering of the specified ROI of an image of size wholeSize. + virtual int start(Size wholeSize, Rect roi, int maxBufRows=-1); + //! starts filtering of the specified ROI of the specified image. + virtual int start(const Mat& src, const Rect& srcRoi=Rect(0,0,-1,-1), + bool isolated=false, int maxBufRows=-1); + //! processes the next srcCount rows of the image. + virtual int proceed(const uchar* src, int srcStep, int srcCount, + uchar* dst, int dstStep); + //! applies filter to the specified ROI of the image. if srcRoi=(0,0,-1,-1), the whole image is filtered. + virtual void apply( const Mat& src, Mat& dst, + const Rect& srcRoi=Rect(0,0,-1,-1), + Point dstOfs=Point(0,0), + bool isolated=false); + //! returns true if the filter is separable + bool isSeparable() const { return (const BaseFilter*)filter2D == 0; } + //! returns the number + int remainingInputRows() const; + int remainingOutputRows() const; + + int srcType, dstType, bufType; + Size ksize; + Point anchor; + int maxWidth; + Size wholeSize; + Rect roi; + int dx1, dx2; + int rowBorderType, columnBorderType; + vector borderTab; + int borderElemSize; + vector ringBuf; + vector srcRow; + vector constBorderValue; + vector constBorderRow; + int bufStep, startY, startY0, endY, rowCount, dstY; + vector rows; + + Ptr filter2D; + Ptr rowFilter; + Ptr columnFilter; +}; + +//! type of the kernel +enum { KERNEL_GENERAL=0, KERNEL_SYMMETRICAL=1, KERNEL_ASYMMETRICAL=2, + KERNEL_SMOOTH=4, KERNEL_INTEGER=8 }; + +//! returns type (one of KERNEL_*) of 1D or 2D kernel specified by its coefficients. +CV_EXPORTS int getKernelType(InputArray kernel, Point anchor); + +//! returns the primitive row filter with the specified kernel +CV_EXPORTS Ptr getLinearRowFilter(int srcType, int bufType, + InputArray kernel, int anchor, + int symmetryType); + +//! returns the primitive column filter with the specified kernel +CV_EXPORTS Ptr getLinearColumnFilter(int bufType, int dstType, + InputArray kernel, int anchor, + int symmetryType, double delta=0, + int bits=0); + +//! returns 2D filter with the specified kernel +CV_EXPORTS Ptr getLinearFilter(int srcType, int dstType, + InputArray kernel, + Point anchor=Point(-1,-1), + double delta=0, int bits=0); + +//! returns the separable linear filter engine +CV_EXPORTS Ptr createSeparableLinearFilter(int srcType, int dstType, + InputArray rowKernel, InputArray columnKernel, + Point _anchor=Point(-1,-1), double delta=0, + int _rowBorderType=BORDER_DEFAULT, + int _columnBorderType=-1, + const Scalar& _borderValue=Scalar()); + +//! returns the non-separable linear filter engine +CV_EXPORTS Ptr createLinearFilter(int srcType, int dstType, + InputArray kernel, Point _anchor=Point(-1,-1), + double delta=0, int _rowBorderType=BORDER_DEFAULT, + int _columnBorderType=-1, const Scalar& _borderValue=Scalar()); + +//! returns the Gaussian kernel with the specified parameters +CV_EXPORTS_W Mat getGaussianKernel( int ksize, double sigma, int ktype=CV_64F ); + +//! returns the Gaussian filter engine +CV_EXPORTS Ptr createGaussianFilter( int type, Size ksize, + double sigma1, double sigma2=0, + int borderType=BORDER_DEFAULT); +//! initializes kernels of the generalized Sobel operator +CV_EXPORTS_W void getDerivKernels( OutputArray kx, OutputArray ky, + int dx, int dy, int ksize, + bool normalize=false, int ktype=CV_32F ); +//! returns filter engine for the generalized Sobel operator +CV_EXPORTS Ptr createDerivFilter( int srcType, int dstType, + int dx, int dy, int ksize, + int borderType=BORDER_DEFAULT ); +//! returns horizontal 1D box filter +CV_EXPORTS Ptr getRowSumFilter(int srcType, int sumType, + int ksize, int anchor=-1); +//! returns vertical 1D box filter +CV_EXPORTS Ptr getColumnSumFilter( int sumType, int dstType, + int ksize, int anchor=-1, + double scale=1); +//! returns box filter engine +CV_EXPORTS Ptr createBoxFilter( int srcType, int dstType, Size ksize, + Point anchor=Point(-1,-1), + bool normalize=true, + int borderType=BORDER_DEFAULT); +//! type of morphological operation +enum { MORPH_ERODE=CV_MOP_ERODE, MORPH_DILATE=CV_MOP_DILATE, + MORPH_OPEN=CV_MOP_OPEN, MORPH_CLOSE=CV_MOP_CLOSE, + MORPH_GRADIENT=CV_MOP_GRADIENT, MORPH_TOPHAT=CV_MOP_TOPHAT, + MORPH_BLACKHAT=CV_MOP_BLACKHAT }; + +//! returns horizontal 1D morphological filter +CV_EXPORTS Ptr getMorphologyRowFilter(int op, int type, int ksize, int anchor=-1); +//! returns vertical 1D morphological filter +CV_EXPORTS Ptr getMorphologyColumnFilter(int op, int type, int ksize, int anchor=-1); +//! returns 2D morphological filter +CV_EXPORTS Ptr getMorphologyFilter(int op, int type, InputArray kernel, + Point anchor=Point(-1,-1)); + +//! returns "magic" border value for erosion and dilation. It is automatically transformed to Scalar::all(-DBL_MAX) for dilation. +static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); } + +//! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported. +CV_EXPORTS Ptr createMorphologyFilter(int op, int type, InputArray kernel, + Point anchor=Point(-1,-1), int _rowBorderType=BORDER_CONSTANT, + int _columnBorderType=-1, + const Scalar& _borderValue=morphologyDefaultBorderValue()); + +//! shape of the structuring element +enum { MORPH_RECT=0, MORPH_CROSS=1, MORPH_ELLIPSE=2 }; +//! returns structuring element of the specified shape and size +CV_EXPORTS_W Mat getStructuringElement(int shape, Size ksize, Point anchor=Point(-1,-1)); + +template<> CV_EXPORTS void Ptr::delete_obj(); + +//! copies 2D array to a larger destination array with extrapolation of the outer part of src using the specified border mode +CV_EXPORTS_W void copyMakeBorder( InputArray src, OutputArray dst, + int top, int bottom, int left, int right, + int borderType, const Scalar& value=Scalar() ); + +//! smooths the image using median filter. +CV_EXPORTS_W void medianBlur( InputArray src, OutputArray dst, int ksize ); +//! smooths the image using Gaussian filter. +CV_EXPORTS_W void GaussianBlur( InputArray src, + OutputArray dst, Size ksize, + double sigma1, double sigma2=0, + int borderType=BORDER_DEFAULT ); +//! smooths the image using bilateral filter +CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d, + double sigmaColor, double sigmaSpace, + int borderType=BORDER_DEFAULT ); +//! smooths the image using the box filter. Each pixel is processed in O(1) time +CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth, + Size ksize, Point anchor=Point(-1,-1), + bool normalize=true, + int borderType=BORDER_DEFAULT ); +//! a synonym for normalized box filter +CV_EXPORTS_W void blur( InputArray src, OutputArray dst, + Size ksize, Point anchor=Point(-1,-1), + int borderType=BORDER_DEFAULT ); + +//! applies non-separable 2D linear filter to the image +CV_EXPORTS_W void filter2D( InputArray src, OutputArray dst, int ddepth, + InputArray kernel, Point anchor=Point(-1,-1), + double delta=0, int borderType=BORDER_DEFAULT ); + +//! applies separable 2D linear filter to the image +CV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth, + InputArray kernelX, InputArray kernelY, + Point anchor=Point(-1,-1), + double delta=0, int borderType=BORDER_DEFAULT ); + +//! applies generalized Sobel operator to the image +CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth, + int dx, int dy, int ksize=3, + double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies the vertical or horizontal Scharr operator to the image +CV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth, + int dx, int dy, double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies Laplacian operator to the image +CV_EXPORTS_W void Laplacian( InputArray src, OutputArray dst, int ddepth, + int ksize=1, double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies Canny edge detector and produces the edge map. +CV_EXPORTS_W void Canny( InputArray image, OutputArray edges, + double threshold1, double threshold2, + int apertureSize=3, bool L2gradient=false ); + +//! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria +CV_EXPORTS_W void cornerMinEigenVal( InputArray src, OutputArray dst, + int blockSize, int ksize=3, + int borderType=BORDER_DEFAULT ); + +//! computes Harris cornerness criteria at each image pixel +CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize, + int ksize, double k, + int borderType=BORDER_DEFAULT ); + +//! computes both eigenvalues and the eigenvectors of 2x2 derivative covariation matrix at each pixel. The output is stored as 6-channel matrix. +CV_EXPORTS_W void cornerEigenValsAndVecs( InputArray src, OutputArray dst, + int blockSize, int ksize, + int borderType=BORDER_DEFAULT ); + +//! computes another complex cornerness criteria at each pixel +CV_EXPORTS_W void preCornerDetect( InputArray src, OutputArray dst, int ksize, + int borderType=BORDER_DEFAULT ); + +//! adjusts the corner locations with sub-pixel accuracy to maximize the certain cornerness criteria +CV_EXPORTS_W void cornerSubPix( InputArray image, InputOutputArray corners, + Size winSize, Size zeroZone, + TermCriteria criteria ); + +//! finds the strong enough corners where the cornerMinEigenVal() or cornerHarris() report the local maxima +CV_EXPORTS_W void goodFeaturesToTrack( InputArray image, OutputArray corners, + int maxCorners, double qualityLevel, double minDistance, + InputArray mask=noArray(), int blockSize=3, + bool useHarrisDetector=false, double k=0.04 ); + +//! finds lines in the black-n-white image using the standard or pyramid Hough transform +CV_EXPORTS_W void HoughLines( InputArray image, OutputArray lines, + double rho, double theta, int threshold, + double srn=0, double stn=0 ); + +//! finds line segments in the black-n-white image using probabalistic Hough transform +CV_EXPORTS_W void HoughLinesP( InputArray image, OutputArray lines, + double rho, double theta, int threshold, + double minLineLength=0, double maxLineGap=0 ); + +//! finds circles in the grayscale image using 2+1 gradient Hough transform +CV_EXPORTS_W void HoughCircles( InputArray image, OutputArray circles, + int method, double dp, double minDist, + double param1=100, double param2=100, + int minRadius=0, int maxRadius=0 ); + +//! erodes the image (applies the local minimum operator) +CV_EXPORTS_W void erode( InputArray src, OutputArray dst, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! dilates the image (applies the local maximum operator) +CV_EXPORTS_W void dilate( InputArray src, OutputArray dst, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! applies an advanced morphological operation to the image +CV_EXPORTS_W void morphologyEx( InputArray src, OutputArray dst, + int op, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! interpolation algorithm +enum +{ + INTER_NEAREST=CV_INTER_NN, //!< nearest neighbor interpolation + INTER_LINEAR=CV_INTER_LINEAR, //!< bilinear interpolation + INTER_CUBIC=CV_INTER_CUBIC, //!< bicubic interpolation + INTER_AREA=CV_INTER_AREA, //!< area-based (or super) interpolation + INTER_LANCZOS4=CV_INTER_LANCZOS4, //!< Lanczos interpolation over 8x8 neighborhood + INTER_MAX=7, + WARP_INVERSE_MAP=CV_WARP_INVERSE_MAP +}; + +//! resizes the image +CV_EXPORTS_W void resize( InputArray src, OutputArray dst, + Size dsize, double fx=0, double fy=0, + int interpolation=INTER_LINEAR ); + +//! warps the image using affine transformation +CV_EXPORTS_W void warpAffine( InputArray src, OutputArray dst, + InputArray M, Size dsize, + int flags=INTER_LINEAR, + int borderMode=BORDER_CONSTANT, + const Scalar& borderValue=Scalar()); + +//! warps the image using perspective transformation +CV_EXPORTS_W void warpPerspective( InputArray src, OutputArray dst, + InputArray M, Size dsize, + int flags=INTER_LINEAR, + int borderMode=BORDER_CONSTANT, + const Scalar& borderValue=Scalar()); + +enum +{ + INTER_BITS=5, INTER_BITS2=INTER_BITS*2, + INTER_TAB_SIZE=(1< CV_EXPORTS void Ptr::delete_obj(); + +//! computes the joint dense histogram for a set of images. +CV_EXPORTS void calcHist( const Mat* images, int nimages, + const int* channels, InputArray mask, + OutputArray hist, int dims, const int* histSize, + const float** ranges, bool uniform=true, bool accumulate=false ); + +//! computes the joint sparse histogram for a set of images. +CV_EXPORTS void calcHist( const Mat* images, int nimages, + const int* channels, InputArray mask, + SparseMat& hist, int dims, + const int* histSize, const float** ranges, + bool uniform=true, bool accumulate=false ); + +CV_EXPORTS_W void calcHist( InputArrayOfArrays images, + const vector& channels, + InputArray mask, OutputArray hist, + const vector& histSize, + const vector& ranges, + bool accumulate=false ); + +//! computes back projection for the set of images +CV_EXPORTS void calcBackProject( const Mat* images, int nimages, + const int* channels, InputArray hist, + OutputArray backProject, const float** ranges, + double scale=1, bool uniform=true ); + +//! computes back projection for the set of images +CV_EXPORTS void calcBackProject( const Mat* images, int nimages, + const int* channels, const SparseMat& hist, + OutputArray backProject, const float** ranges, + double scale=1, bool uniform=true ); + +CV_EXPORTS_W void calcBackProject( InputArrayOfArrays images, const vector& channels, + InputArray hist, OutputArray dst, + const vector& ranges, + double scale ); + +//! compares two histograms stored in dense arrays +CV_EXPORTS_W double compareHist( InputArray H1, InputArray H2, int method ); + +//! compares two histograms stored in sparse arrays +CV_EXPORTS double compareHist( const SparseMat& H1, const SparseMat& H2, int method ); + +//! normalizes the grayscale image brightness and contrast by normalizing its histogram +CV_EXPORTS_W void equalizeHist( InputArray src, OutputArray dst ); + +CV_EXPORTS float EMD( InputArray signature1, InputArray signature2, + int distType, InputArray cost=noArray(), + float* lowerBound=0, OutputArray flow=noArray() ); + +//! segments the image using watershed algorithm +CV_EXPORTS_W void watershed( InputArray image, InputOutputArray markers ); + +//! filters image using meanshift algorithm +CV_EXPORTS_W void pyrMeanShiftFiltering( InputArray src, OutputArray dst, + double sp, double sr, int maxLevel=1, + TermCriteria termcrit=TermCriteria( + TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) ); + +//! class of the pixel in GrabCut algorithm +enum +{ + GC_BGD = 0, //!< background + GC_FGD = 1, //!< foreground + GC_PR_BGD = 2, //!< most probably background + GC_PR_FGD = 3 //!< most probably foreground +}; + +//! GrabCut algorithm flags +enum +{ + GC_INIT_WITH_RECT = 0, + GC_INIT_WITH_MASK = 1, + GC_EVAL = 2 +}; + +//! segments the image using GrabCut algorithm +CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect, + InputOutputArray bgdModel, InputOutputArray fgdModel, + int iterCount, int mode = GC_EVAL ); + +//! the inpainting algorithm +enum +{ + INPAINT_NS=CV_INPAINT_NS, // Navier-Stokes algorithm + INPAINT_TELEA=CV_INPAINT_TELEA // A. Telea algorithm +}; + +//! restores the damaged image areas using one of the available intpainting algorithms +CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask, + OutputArray dst, double inpaintRange, int flags ); + +//! builds the discrete Voronoi diagram +CV_EXPORTS_W void distanceTransform( InputArray src, OutputArray dst, + OutputArray labels, int distanceType, int maskSize ); + +//! computes the distance transform map +CV_EXPORTS void distanceTransform( InputArray src, OutputArray dst, + int distanceType, int maskSize ); + +enum { FLOODFILL_FIXED_RANGE = 1 << 16, FLOODFILL_MASK_ONLY = 1 << 17 }; + +//! fills the semi-uniform image region starting from the specified seed point +CV_EXPORTS int floodFill( InputOutputArray image, + Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0, + Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), + int flags=4 ); + +//! fills the semi-uniform image region and/or the mask starting from the specified seed point +CV_EXPORTS_W int floodFill( InputOutputArray image, InputOutputArray mask, + Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0, + Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), + int flags=4 ); + + +enum +{ + COLOR_BGR2BGRA =0, + COLOR_RGB2RGBA =COLOR_BGR2BGRA, + + COLOR_BGRA2BGR =1, + COLOR_RGBA2RGB =COLOR_BGRA2BGR, + + COLOR_BGR2RGBA =2, + COLOR_RGB2BGRA =COLOR_BGR2RGBA, + + COLOR_RGBA2BGR =3, + COLOR_BGRA2RGB =COLOR_RGBA2BGR, + + COLOR_BGR2RGB =4, + COLOR_RGB2BGR =COLOR_BGR2RGB, + + COLOR_BGRA2RGBA =5, + COLOR_RGBA2BGRA =COLOR_BGRA2RGBA, + + COLOR_BGR2GRAY =6, + COLOR_RGB2GRAY =7, + COLOR_GRAY2BGR =8, + COLOR_GRAY2RGB =COLOR_GRAY2BGR, + COLOR_GRAY2BGRA =9, + COLOR_GRAY2RGBA =COLOR_GRAY2BGRA, + COLOR_BGRA2GRAY =10, + COLOR_RGBA2GRAY =11, + + COLOR_BGR2BGR565 =12, + COLOR_RGB2BGR565 =13, + COLOR_BGR5652BGR =14, + COLOR_BGR5652RGB =15, + COLOR_BGRA2BGR565 =16, + COLOR_RGBA2BGR565 =17, + COLOR_BGR5652BGRA =18, + COLOR_BGR5652RGBA =19, + + COLOR_GRAY2BGR565 =20, + COLOR_BGR5652GRAY =21, + + COLOR_BGR2BGR555 =22, + COLOR_RGB2BGR555 =23, + COLOR_BGR5552BGR =24, + COLOR_BGR5552RGB =25, + COLOR_BGRA2BGR555 =26, + COLOR_RGBA2BGR555 =27, + COLOR_BGR5552BGRA =28, + COLOR_BGR5552RGBA =29, + + COLOR_GRAY2BGR555 =30, + COLOR_BGR5552GRAY =31, + + COLOR_BGR2XYZ =32, + COLOR_RGB2XYZ =33, + COLOR_XYZ2BGR =34, + COLOR_XYZ2RGB =35, + + COLOR_BGR2YCrCb =36, + COLOR_RGB2YCrCb =37, + COLOR_YCrCb2BGR =38, + COLOR_YCrCb2RGB =39, + + COLOR_BGR2HSV =40, + COLOR_RGB2HSV =41, + + COLOR_BGR2Lab =44, + COLOR_RGB2Lab =45, + + COLOR_BayerBG2BGR =46, + COLOR_BayerGB2BGR =47, + COLOR_BayerRG2BGR =48, + COLOR_BayerGR2BGR =49, + + COLOR_BayerBG2RGB =COLOR_BayerRG2BGR, + COLOR_BayerGB2RGB =COLOR_BayerGR2BGR, + COLOR_BayerRG2RGB =COLOR_BayerBG2BGR, + COLOR_BayerGR2RGB =COLOR_BayerGB2BGR, + + COLOR_BGR2Luv =50, + COLOR_RGB2Luv =51, + COLOR_BGR2HLS =52, + COLOR_RGB2HLS =53, + + COLOR_HSV2BGR =54, + COLOR_HSV2RGB =55, + + COLOR_Lab2BGR =56, + COLOR_Lab2RGB =57, + COLOR_Luv2BGR =58, + COLOR_Luv2RGB =59, + COLOR_HLS2BGR =60, + COLOR_HLS2RGB =61, + + COLOR_BayerBG2BGR_VNG =62, + COLOR_BayerGB2BGR_VNG =63, + COLOR_BayerRG2BGR_VNG =64, + COLOR_BayerGR2BGR_VNG =65, + + COLOR_BayerBG2RGB_VNG =COLOR_BayerRG2BGR_VNG, + COLOR_BayerGB2RGB_VNG =COLOR_BayerGR2BGR_VNG, + COLOR_BayerRG2RGB_VNG =COLOR_BayerBG2BGR_VNG, + COLOR_BayerGR2RGB_VNG =COLOR_BayerGB2BGR_VNG, + + COLOR_BGR2HSV_FULL = 66, + COLOR_RGB2HSV_FULL = 67, + COLOR_BGR2HLS_FULL = 68, + COLOR_RGB2HLS_FULL = 69, + + COLOR_HSV2BGR_FULL = 70, + COLOR_HSV2RGB_FULL = 71, + COLOR_HLS2BGR_FULL = 72, + COLOR_HLS2RGB_FULL = 73, + + COLOR_LBGR2Lab = 74, + COLOR_LRGB2Lab = 75, + COLOR_LBGR2Luv = 76, + COLOR_LRGB2Luv = 77, + + COLOR_Lab2LBGR = 78, + COLOR_Lab2LRGB = 79, + COLOR_Luv2LBGR = 80, + COLOR_Luv2LRGB = 81, + + COLOR_BGR2YUV = 82, + COLOR_RGB2YUV = 83, + COLOR_YUV2BGR = 84, + COLOR_YUV2RGB = 85, + + COLOR_BayerBG2GRAY = 86, + COLOR_BayerGB2GRAY = 87, + COLOR_BayerRG2GRAY = 88, + COLOR_BayerGR2GRAY = 89, + + COLOR_YUV420i2RGB = 90, + COLOR_YUV420i2BGR = 91, + COLOR_YUV420sp2RGB = 92, + COLOR_YUV420sp2BGR = 93, + + COLOR_COLORCVT_MAX =100 +}; + + +//! converts image from one color space to another +CV_EXPORTS_W void cvtColor( InputArray src, OutputArray dst, int code, int dstCn=0 ); + +//! raster image moments +class CV_EXPORTS_W_MAP Moments +{ +public: + //! the default constructor + Moments(); + //! the full constructor + Moments(double m00, double m10, double m01, double m20, double m11, + double m02, double m30, double m21, double m12, double m03 ); + //! the conversion from CvMoments + Moments( const CvMoments& moments ); + //! the conversion to CvMoments + operator CvMoments() const; + + //! spatial moments + CV_PROP_RW double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; + //! central moments + CV_PROP_RW double mu20, mu11, mu02, mu30, mu21, mu12, mu03; + //! central normalized moments + CV_PROP_RW double nu20, nu11, nu02, nu30, nu21, nu12, nu03; +}; + +//! computes moments of the rasterized shape or a vector of points +CV_EXPORTS_W Moments moments( InputArray array, bool binaryImage=false ); + +//! computes 7 Hu invariants from the moments +CV_EXPORTS void HuMoments( const Moments& moments, double hu[7] ); +CV_EXPORTS_W void HuMoments( const Moments& m, CV_OUT OutputArray hu ); + +//! type of the template matching operation +enum { TM_SQDIFF=0, TM_SQDIFF_NORMED=1, TM_CCORR=2, TM_CCORR_NORMED=3, TM_CCOEFF=4, TM_CCOEFF_NORMED=5 }; + +//! computes the proximity map for the raster template and the image where the template is searched for +CV_EXPORTS_W void matchTemplate( InputArray image, InputArray templ, + OutputArray result, int method ); + +//! mode of the contour retrieval algorithm +enum +{ + RETR_EXTERNAL=CV_RETR_EXTERNAL, //!< retrieve only the most external (top-level) contours + RETR_LIST=CV_RETR_LIST, //!< retrieve all the contours without any hierarchical information + RETR_CCOMP=CV_RETR_CCOMP, //!< retrieve the connected components (that can possibly be nested) + RETR_TREE=CV_RETR_TREE //!< retrieve all the contours and the whole hierarchy +}; + +//! the contour approximation algorithm +enum +{ + CHAIN_APPROX_NONE=CV_CHAIN_APPROX_NONE, + CHAIN_APPROX_SIMPLE=CV_CHAIN_APPROX_SIMPLE, + CHAIN_APPROX_TC89_L1=CV_CHAIN_APPROX_TC89_L1, + CHAIN_APPROX_TC89_KCOS=CV_CHAIN_APPROX_TC89_KCOS +}; + +//! retrieves contours and the hierarchical information from black-n-white image. +CV_EXPORTS_W void findContours( InputOutputArray image, OutputArrayOfArrays contours, + OutputArray hierarchy, int mode, + int method, Point offset=Point()); + +//! retrieves contours from black-n-white image. +CV_EXPORTS void findContours( InputOutputArray image, OutputArrayOfArrays contours, + int mode, int method, Point offset=Point()); + +//! draws contours in the image +CV_EXPORTS_W void drawContours( InputOutputArray image, InputArrayOfArrays contours, + int contourIdx, const Scalar& color, + int thickness=1, int lineType=8, + InputArray hierarchy=noArray(), + int maxLevel=INT_MAX, Point offset=Point() ); + +//! approximates contour or a curve using Douglas-Peucker algorithm +CV_EXPORTS_W void approxPolyDP( InputArray curve, + OutputArray approxCurve, + double epsilon, bool closed ); + +//! computes the contour perimeter (closed=true) or a curve length +CV_EXPORTS_W double arcLength( InputArray curve, bool closed ); +//! computes the bounding rectangle for a contour +CV_EXPORTS_W Rect boundingRect( InputArray points ); +//! computes the contour area +CV_EXPORTS_W double contourArea( InputArray contour, bool oriented=false ); +//! computes the minimal rotated rectangle for a set of points +CV_EXPORTS_W RotatedRect minAreaRect( InputArray points ); +//! computes the minimal enclosing circle for a set of points +CV_EXPORTS_W void minEnclosingCircle( InputArray points, + CV_OUT Point2f& center, CV_OUT float& radius ); +//! matches two contours using one of the available algorithms +CV_EXPORTS_W double matchShapes( InputArray contour1, InputArray contour2, + int method, double parameter ); +//! computes convex hull for a set of 2D points. +CV_EXPORTS_W void convexHull( InputArray points, OutputArray hull, + bool clockwise=false, bool returnPoints=true ); + +//! returns true iff the contour is convex. Does not support contours with self-intersection +CV_EXPORTS_W bool isContourConvex( InputArray contour ); + +//! fits ellipse to the set of 2D points +CV_EXPORTS_W RotatedRect fitEllipse( InputArray points ); + +//! fits line to the set of 2D points using M-estimator algorithm +CV_EXPORTS_W void fitLine( InputArray points, OutputArray line, int distType, + double param, double reps, double aeps ); +//! checks if the point is inside the contour. Optionally computes the signed distance from the point to the contour boundary +CV_EXPORTS_W double pointPolygonTest( InputArray contour, Point2f pt, bool measureDist ); + + +class CV_EXPORTS_W Subdiv2D +{ +public: + enum + { + PTLOC_ERROR = -2, + PTLOC_OUTSIDE_RECT = -1, + PTLOC_INSIDE = 0, + PTLOC_VERTEX = 1, + PTLOC_ON_EDGE = 2 + }; + + enum + { + NEXT_AROUND_ORG = 0x00, + NEXT_AROUND_DST = 0x22, + PREV_AROUND_ORG = 0x11, + PREV_AROUND_DST = 0x33, + NEXT_AROUND_LEFT = 0x13, + NEXT_AROUND_RIGHT = 0x31, + PREV_AROUND_LEFT = 0x20, + PREV_AROUND_RIGHT = 0x02 + }; + + CV_WRAP Subdiv2D(); + CV_WRAP Subdiv2D(Rect rect); + CV_WRAP void initDelaunay(Rect rect); + + CV_WRAP int insert(Point2f pt); + CV_WRAP void insert(const vector& ptvec); + CV_WRAP int locate(Point2f pt, CV_OUT int& edge, CV_OUT int& vertex); + + CV_WRAP int findNearest(Point2f pt, CV_OUT Point2f* nearestPt=0); + CV_WRAP void getEdgeList(CV_OUT vector& edgeList) const; + CV_WRAP void getTriangleList(CV_OUT vector& triangleList) const; + CV_WRAP void getVoronoiFacetList(const vector& idx, CV_OUT vector >& facetList, + CV_OUT vector& facetCenters); + + CV_WRAP Point2f getVertex(int vertex, CV_OUT int* firstEdge=0) const; + + CV_WRAP int getEdge( int edge, int nextEdgeType ) const; + CV_WRAP int nextEdge(int edge) const; + CV_WRAP int rotateEdge(int edge, int rotate) const; + CV_WRAP int symEdge(int edge) const; + CV_WRAP int edgeOrg(int edge, CV_OUT Point2f* orgpt=0) const; + CV_WRAP int edgeDst(int edge, CV_OUT Point2f* dstpt=0) const; + +protected: + int newEdge(); + void deleteEdge(int edge); + int newPoint(Point2f pt, bool isvirtual, int firstEdge=0); + void deletePoint(int vtx); + void setEdgePoints( int edge, int orgPt, int dstPt ); + void splice( int edgeA, int edgeB ); + int connectEdges( int edgeA, int edgeB ); + void swapEdges( int edge ); + int isRightOf(Point2f pt, int edge) const; + void calcVoronoi(); + void clearVoronoi(); + void check() const; + + struct CV_EXPORTS Vertex + { + Vertex(); + Vertex(Point2f pt, bool _isvirtual, int _firstEdge=0); + bool isvirtual() const; + bool isfree() const; + int firstEdge; + int type; + Point2f pt; + }; + struct CV_EXPORTS QuadEdge + { + QuadEdge(); + QuadEdge(int edgeidx); + bool isfree() const; + int next[4]; + int pt[4]; + }; + + vector vtx; + vector qedges; + int freeQEdge; + int freePoint; + bool validGeometry; + + int recentEdge; + Point2f topLeft; + Point2f bottomRight; +}; + +} + +// 2009-01-12, Xavier Delacour + +struct lsh_hash { + int h1, h2; +}; + +struct CvLSHOperations +{ + virtual ~CvLSHOperations() {} + + virtual int vector_add(const void* data) = 0; + virtual void vector_remove(int i) = 0; + virtual const void* vector_lookup(int i) = 0; + virtual void vector_reserve(int n) = 0; + virtual unsigned int vector_count() = 0; + + virtual void hash_insert(lsh_hash h, int l, int i) = 0; + virtual void hash_remove(lsh_hash h, int l, int i) = 0; + virtual int hash_lookup(lsh_hash h, int l, int* ret_i, int ret_i_max) = 0; +}; + +#endif /* __cplusplus */ + +#endif + +/* End of file. */ diff --git a/include/opencv2/imgproc/imgproc_c.h b/include/opencv2/imgproc/imgproc_c.h new file mode 100644 index 0000000..b845e1c --- /dev/null +++ b/include/opencv2/imgproc/imgproc_c.h @@ -0,0 +1,783 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_IMGPROC_C_H__ +#define __OPENCV_IMGPROC_IMGPROC_C_H__ + +#include "opencv2/core/core_c.h" +#include "opencv2/imgproc/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*********************** Background statistics accumulation *****************************/ + +/* Adds image to accumulator */ +CVAPI(void) cvAcc( const CvArr* image, CvArr* sum, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds squared image to accumulator */ +CVAPI(void) cvSquareAcc( const CvArr* image, CvArr* sqsum, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds a product of two images to accumulator */ +CVAPI(void) cvMultiplyAcc( const CvArr* image1, const CvArr* image2, CvArr* acc, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds image to accumulator with weights: acc = acc*(1-alpha) + image*alpha */ +CVAPI(void) cvRunningAvg( const CvArr* image, CvArr* acc, double alpha, + const CvArr* mask CV_DEFAULT(NULL) ); + +/****************************************************************************************\ +* Image Processing * +\****************************************************************************************/ + +/* Copies source 2D array inside of the larger destination array and + makes a border of the specified type (IPL_BORDER_*) around the copied area. */ +CVAPI(void) cvCopyMakeBorder( const CvArr* src, CvArr* dst, CvPoint offset, + int bordertype, CvScalar value CV_DEFAULT(cvScalarAll(0))); + +/* Smoothes array (removes noise) */ +CVAPI(void) cvSmooth( const CvArr* src, CvArr* dst, + int smoothtype CV_DEFAULT(CV_GAUSSIAN), + int size1 CV_DEFAULT(3), + int size2 CV_DEFAULT(0), + double sigma1 CV_DEFAULT(0), + double sigma2 CV_DEFAULT(0)); + +/* Convolves the image with the kernel */ +CVAPI(void) cvFilter2D( const CvArr* src, CvArr* dst, const CvMat* kernel, + CvPoint anchor CV_DEFAULT(cvPoint(-1,-1))); + +/* Finds integral image: SUM(X,Y) = sum(xnext[(edge + (int)type) & 3]; + return (edge & ~3) + ((edge + ((int)type >> 4)) & 3); +} + + +CV_INLINE CvSubdiv2DPoint* cvSubdiv2DEdgeOrg( CvSubdiv2DEdge edge ) +{ + CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3); + return (CvSubdiv2DPoint*)e->pt[edge & 3]; +} + + +CV_INLINE CvSubdiv2DPoint* cvSubdiv2DEdgeDst( CvSubdiv2DEdge edge ) +{ + CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3); + return (CvSubdiv2DPoint*)e->pt[(edge + 2) & 3]; +} + + +CV_INLINE double cvTriangleArea( CvPoint2D32f a, CvPoint2D32f b, CvPoint2D32f c ) +{ + return ((double)b.x - a.x) * ((double)c.y - a.y) - ((double)b.y - a.y) * ((double)c.x - a.x); +} + + +/****************************************************************************************\ +* Contour Processing and Shape Analysis * +\****************************************************************************************/ + +/* Approximates a single polygonal curve (contour) or + a tree of polygonal curves (contours) */ +CVAPI(CvSeq*) cvApproxPoly( const void* src_seq, + int header_size, CvMemStorage* storage, + int method, double parameter, + int parameter2 CV_DEFAULT(0)); + +/* Calculates perimeter of a contour or length of a part of contour */ +CVAPI(double) cvArcLength( const void* curve, + CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ), + int is_closed CV_DEFAULT(-1)); + +CV_INLINE double cvContourPerimeter( const void* contour ) +{ + return cvArcLength( contour, CV_WHOLE_SEQ, 1 ); +} + + +/* Calculates contour boundning rectangle (update=1) or + just retrieves pre-calculated rectangle (update=0) */ +CVAPI(CvRect) cvBoundingRect( CvArr* points, int update CV_DEFAULT(0) ); + +/* Calculates area of a contour or contour segment */ +CVAPI(double) cvContourArea( const CvArr* contour, + CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ), + int oriented CV_DEFAULT(0)); + +/* Finds minimum area rotated rectangle bounding a set of points */ +CVAPI(CvBox2D) cvMinAreaRect2( const CvArr* points, + CvMemStorage* storage CV_DEFAULT(NULL)); + +/* Finds minimum enclosing circle for a set of points */ +CVAPI(int) cvMinEnclosingCircle( const CvArr* points, + CvPoint2D32f* center, float* radius ); + +/* Compares two contours by matching their moments */ +CVAPI(double) cvMatchShapes( const void* object1, const void* object2, + int method, double parameter CV_DEFAULT(0)); + +/* Calculates exact convex hull of 2d point set */ +CVAPI(CvSeq*) cvConvexHull2( const CvArr* input, + void* hull_storage CV_DEFAULT(NULL), + int orientation CV_DEFAULT(CV_CLOCKWISE), + int return_points CV_DEFAULT(0)); + +/* Checks whether the contour is convex or not (returns 1 if convex, 0 if not) */ +CVAPI(int) cvCheckContourConvexity( const CvArr* contour ); + + +/* Finds convexity defects for the contour */ +CVAPI(CvSeq*) cvConvexityDefects( const CvArr* contour, const CvArr* convexhull, + CvMemStorage* storage CV_DEFAULT(NULL)); + +/* Fits ellipse into a set of 2d points */ +CVAPI(CvBox2D) cvFitEllipse2( const CvArr* points ); + +/* Finds minimum rectangle containing two given rectangles */ +CVAPI(CvRect) cvMaxRect( const CvRect* rect1, const CvRect* rect2 ); + +/* Finds coordinates of the box vertices */ +CVAPI(void) cvBoxPoints( CvBox2D box, CvPoint2D32f pt[4] ); + +/* Initializes sequence header for a matrix (column or row vector) of points - + a wrapper for cvMakeSeqHeaderForArray (it does not initialize bounding rectangle!!!) */ +CVAPI(CvSeq*) cvPointSeqFromMat( int seq_kind, const CvArr* mat, + CvContour* contour_header, + CvSeqBlock* block ); + +/* Checks whether the point is inside polygon, outside, on an edge (at a vertex). + Returns positive, negative or zero value, correspondingly. + Optionally, measures a signed distance between + the point and the nearest polygon edge (measure_dist=1) */ +CVAPI(double) cvPointPolygonTest( const CvArr* contour, + CvPoint2D32f pt, int measure_dist ); + +/****************************************************************************************\ +* Histogram functions * +\****************************************************************************************/ + +/* Creates new histogram */ +CVAPI(CvHistogram*) cvCreateHist( int dims, int* sizes, int type, + float** ranges CV_DEFAULT(NULL), + int uniform CV_DEFAULT(1)); + +/* Assignes histogram bin ranges */ +CVAPI(void) cvSetHistBinRanges( CvHistogram* hist, float** ranges, + int uniform CV_DEFAULT(1)); + +/* Creates histogram header for array */ +CVAPI(CvHistogram*) cvMakeHistHeaderForArray( + int dims, int* sizes, CvHistogram* hist, + float* data, float** ranges CV_DEFAULT(NULL), + int uniform CV_DEFAULT(1)); + +/* Releases histogram */ +CVAPI(void) cvReleaseHist( CvHistogram** hist ); + +/* Clears all the histogram bins */ +CVAPI(void) cvClearHist( CvHistogram* hist ); + +/* Finds indices and values of minimum and maximum histogram bins */ +CVAPI(void) cvGetMinMaxHistValue( const CvHistogram* hist, + float* min_value, float* max_value, + int* min_idx CV_DEFAULT(NULL), + int* max_idx CV_DEFAULT(NULL)); + + +/* Normalizes histogram by dividing all bins by sum of the bins, multiplied by . + After that sum of histogram bins is equal to */ +CVAPI(void) cvNormalizeHist( CvHistogram* hist, double factor ); + + +/* Clear all histogram bins that are below the threshold */ +CVAPI(void) cvThreshHist( CvHistogram* hist, double threshold ); + + +/* Compares two histogram */ +CVAPI(double) cvCompareHist( const CvHistogram* hist1, + const CvHistogram* hist2, + int method); + +/* Copies one histogram to another. Destination histogram is created if + the destination pointer is NULL */ +CVAPI(void) cvCopyHist( const CvHistogram* src, CvHistogram** dst ); + + +/* Calculates bayesian probabilistic histograms + (each or src and dst is an array of histograms */ +CVAPI(void) cvCalcBayesianProb( CvHistogram** src, int number, + CvHistogram** dst); + +/* Calculates array histogram */ +CVAPI(void) cvCalcArrHist( CvArr** arr, CvHistogram* hist, + int accumulate CV_DEFAULT(0), + const CvArr* mask CV_DEFAULT(NULL) ); + +CV_INLINE void cvCalcHist( IplImage** image, CvHistogram* hist, + int accumulate CV_DEFAULT(0), + const CvArr* mask CV_DEFAULT(NULL) ) +{ + cvCalcArrHist( (CvArr**)image, hist, accumulate, mask ); +} + +/* Calculates back project */ +CVAPI(void) cvCalcArrBackProject( CvArr** image, CvArr* dst, + const CvHistogram* hist ); +#define cvCalcBackProject(image, dst, hist) cvCalcArrBackProject((CvArr**)image, dst, hist) + + +/* Does some sort of template matching but compares histograms of + template and each window location */ +CVAPI(void) cvCalcArrBackProjectPatch( CvArr** image, CvArr* dst, CvSize range, + CvHistogram* hist, int method, + double factor ); +#define cvCalcBackProjectPatch( image, dst, range, hist, method, factor ) \ + cvCalcArrBackProjectPatch( (CvArr**)image, dst, range, hist, method, factor ) + + +/* calculates probabilistic density (divides one histogram by another) */ +CVAPI(void) cvCalcProbDensity( const CvHistogram* hist1, const CvHistogram* hist2, + CvHistogram* dst_hist, double scale CV_DEFAULT(255) ); + +/* equalizes histogram of 8-bit single-channel image */ +CVAPI(void) cvEqualizeHist( const CvArr* src, CvArr* dst ); + + +/* Applies distance transform to binary image */ +CVAPI(void) cvDistTransform( const CvArr* src, CvArr* dst, + int distance_type CV_DEFAULT(CV_DIST_L2), + int mask_size CV_DEFAULT(3), + const float* mask CV_DEFAULT(NULL), + CvArr* labels CV_DEFAULT(NULL)); + + +/* Applies fixed-level threshold to grayscale image. + This is a basic operation applied before retrieving contours */ +CVAPI(double) cvThreshold( const CvArr* src, CvArr* dst, + double threshold, double max_value, + int threshold_type ); + +/* Applies adaptive threshold to grayscale image. + The two parameters for methods CV_ADAPTIVE_THRESH_MEAN_C and + CV_ADAPTIVE_THRESH_GAUSSIAN_C are: + neighborhood size (3, 5, 7 etc.), + and a constant subtracted from mean (...,-3,-2,-1,0,1,2,3,...) */ +CVAPI(void) cvAdaptiveThreshold( const CvArr* src, CvArr* dst, double max_value, + int adaptive_method CV_DEFAULT(CV_ADAPTIVE_THRESH_MEAN_C), + int threshold_type CV_DEFAULT(CV_THRESH_BINARY), + int block_size CV_DEFAULT(3), + double param1 CV_DEFAULT(5)); + +/* Fills the connected component until the color difference gets large enough */ +CVAPI(void) cvFloodFill( CvArr* image, CvPoint seed_point, + CvScalar new_val, CvScalar lo_diff CV_DEFAULT(cvScalarAll(0)), + CvScalar up_diff CV_DEFAULT(cvScalarAll(0)), + CvConnectedComp* comp CV_DEFAULT(NULL), + int flags CV_DEFAULT(4), + CvArr* mask CV_DEFAULT(NULL)); + +/****************************************************************************************\ +* Feature detection * +\****************************************************************************************/ + +/* Runs canny edge detector */ +CVAPI(void) cvCanny( const CvArr* image, CvArr* edges, double threshold1, + double threshold2, int aperture_size CV_DEFAULT(3) ); + +/* Calculates constraint image for corner detection + Dx^2 * Dyy + Dxx * Dy^2 - 2 * Dx * Dy * Dxy. + Applying threshold to the result gives coordinates of corners */ +CVAPI(void) cvPreCornerDetect( const CvArr* image, CvArr* corners, + int aperture_size CV_DEFAULT(3) ); + +/* Calculates eigen values and vectors of 2x2 + gradient covariation matrix at every image pixel */ +CVAPI(void) cvCornerEigenValsAndVecs( const CvArr* image, CvArr* eigenvv, + int block_size, int aperture_size CV_DEFAULT(3) ); + +/* Calculates minimal eigenvalue for 2x2 gradient covariation matrix at + every image pixel */ +CVAPI(void) cvCornerMinEigenVal( const CvArr* image, CvArr* eigenval, + int block_size, int aperture_size CV_DEFAULT(3) ); + +/* Harris corner detector: + Calculates det(M) - k*(trace(M)^2), where M is 2x2 gradient covariation matrix for each pixel */ +CVAPI(void) cvCornerHarris( const CvArr* image, CvArr* harris_responce, + int block_size, int aperture_size CV_DEFAULT(3), + double k CV_DEFAULT(0.04) ); + +/* Adjust corner position using some sort of gradient search */ +CVAPI(void) cvFindCornerSubPix( const CvArr* image, CvPoint2D32f* corners, + int count, CvSize win, CvSize zero_zone, + CvTermCriteria criteria ); + +/* Finds a sparse set of points within the selected region + that seem to be easy to track */ +CVAPI(void) cvGoodFeaturesToTrack( const CvArr* image, CvArr* eig_image, + CvArr* temp_image, CvPoint2D32f* corners, + int* corner_count, double quality_level, + double min_distance, + const CvArr* mask CV_DEFAULT(NULL), + int block_size CV_DEFAULT(3), + int use_harris CV_DEFAULT(0), + double k CV_DEFAULT(0.04) ); + +/* Finds lines on binary image using one of several methods. + line_storage is either memory storage or 1 x CvMat, its + number of columns is changed by the function. + method is one of CV_HOUGH_*; + rho, theta and threshold are used for each of those methods; + param1 ~ line length, param2 ~ line gap - for probabilistic, + param1 ~ srn, param2 ~ stn - for multi-scale */ +CVAPI(CvSeq*) cvHoughLines2( CvArr* image, void* line_storage, int method, + double rho, double theta, int threshold, + double param1 CV_DEFAULT(0), double param2 CV_DEFAULT(0)); + +/* Finds circles in the image */ +CVAPI(CvSeq*) cvHoughCircles( CvArr* image, void* circle_storage, + int method, double dp, double min_dist, + double param1 CV_DEFAULT(100), + double param2 CV_DEFAULT(100), + int min_radius CV_DEFAULT(0), + int max_radius CV_DEFAULT(0)); + +/* Fits a line into set of 2d or 3d points in a robust way (M-estimator technique) */ +CVAPI(void) cvFitLine( const CvArr* points, int dist_type, double param, + double reps, double aeps, float* line ); + + +/* Constructs kd-tree from set of feature descriptors */ +CVAPI(struct CvFeatureTree*) cvCreateKDTree(CvMat* desc); + +/* Constructs spill-tree from set of feature descriptors */ +CVAPI(struct CvFeatureTree*) cvCreateSpillTree( const CvMat* raw_data, + const int naive CV_DEFAULT(50), + const double rho CV_DEFAULT(.7), + const double tau CV_DEFAULT(.1) ); + +/* Release feature tree */ +CVAPI(void) cvReleaseFeatureTree(struct CvFeatureTree* tr); + +/* Searches feature tree for k nearest neighbors of given reference points, + searching (in case of kd-tree/bbf) at most emax leaves. */ +CVAPI(void) cvFindFeatures(struct CvFeatureTree* tr, const CvMat* query_points, + CvMat* indices, CvMat* dist, int k, int emax CV_DEFAULT(20)); + +/* Search feature tree for all points that are inlier to given rect region. + Only implemented for kd trees */ +CVAPI(int) cvFindFeaturesBoxed(struct CvFeatureTree* tr, + CvMat* bounds_min, CvMat* bounds_max, + CvMat* out_indices); + + +/* Construct a Locality Sensitive Hash (LSH) table, for indexing d-dimensional vectors of + given type. Vectors will be hashed L times with k-dimensional p-stable (p=2) functions. */ +CVAPI(struct CvLSH*) cvCreateLSH(struct CvLSHOperations* ops, int d, + int L CV_DEFAULT(10), int k CV_DEFAULT(10), + int type CV_DEFAULT(CV_64FC1), double r CV_DEFAULT(4), + int64 seed CV_DEFAULT(-1)); + +/* Construct in-memory LSH table, with n bins. */ +CVAPI(struct CvLSH*) cvCreateMemoryLSH(int d, int n, int L CV_DEFAULT(10), int k CV_DEFAULT(10), + int type CV_DEFAULT(CV_64FC1), double r CV_DEFAULT(4), + int64 seed CV_DEFAULT(-1)); + +/* Free the given LSH structure. */ +CVAPI(void) cvReleaseLSH(struct CvLSH** lsh); + +/* Return the number of vectors in the LSH. */ +CVAPI(unsigned int) LSHSize(struct CvLSH* lsh); + +/* Add vectors to the LSH structure, optionally returning indices. */ +CVAPI(void) cvLSHAdd(struct CvLSH* lsh, const CvMat* data, CvMat* indices CV_DEFAULT(0)); + +/* Remove vectors from LSH, as addressed by given indices. */ +CVAPI(void) cvLSHRemove(struct CvLSH* lsh, const CvMat* indices); + +/* Query the LSH n times for at most k nearest points; data is n x d, + indices and dist are n x k. At most emax stored points will be accessed. */ +CVAPI(void) cvLSHQuery(struct CvLSH* lsh, const CvMat* query_points, + CvMat* indices, CvMat* dist, int k, int emax); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/opencv2/imgproc/types_c.h b/include/opencv2/imgproc/types_c.h new file mode 100644 index 0000000..5a984fd --- /dev/null +++ b/include/opencv2/imgproc/types_c.h @@ -0,0 +1,538 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_TYPES_C_H__ +#define __OPENCV_IMGPROC_TYPES_C_H__ + +#include "opencv2/core/core_c.h" +#include "opencv2/imgproc/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Connected component structure */ +typedef struct CvConnectedComp +{ + double area; /* area of the connected component */ + CvScalar value; /* average color of the connected component */ + CvRect rect; /* ROI of the component */ + CvSeq* contour; /* optional component boundary + (the contour might have child contours corresponding to the holes)*/ +} +CvConnectedComp; + +/* Image smooth methods */ +enum +{ + CV_BLUR_NO_SCALE =0, + CV_BLUR =1, + CV_GAUSSIAN =2, + CV_MEDIAN =3, + CV_BILATERAL =4 +}; + +/* Filters used in pyramid decomposition */ +enum +{ + CV_GAUSSIAN_5x5 = 7 +}; + +/* Inpainting algorithms */ +enum +{ + CV_INPAINT_NS =0, + CV_INPAINT_TELEA =1 +}; + +/* Special filters */ +enum +{ + CV_SCHARR =-1, + CV_MAX_SOBEL_KSIZE =7 +}; + +/* Constants for color conversion */ +enum +{ + CV_BGR2BGRA =0, + CV_RGB2RGBA =CV_BGR2BGRA, + + CV_BGRA2BGR =1, + CV_RGBA2RGB =CV_BGRA2BGR, + + CV_BGR2RGBA =2, + CV_RGB2BGRA =CV_BGR2RGBA, + + CV_RGBA2BGR =3, + CV_BGRA2RGB =CV_RGBA2BGR, + + CV_BGR2RGB =4, + CV_RGB2BGR =CV_BGR2RGB, + + CV_BGRA2RGBA =5, + CV_RGBA2BGRA =CV_BGRA2RGBA, + + CV_BGR2GRAY =6, + CV_RGB2GRAY =7, + CV_GRAY2BGR =8, + CV_GRAY2RGB =CV_GRAY2BGR, + CV_GRAY2BGRA =9, + CV_GRAY2RGBA =CV_GRAY2BGRA, + CV_BGRA2GRAY =10, + CV_RGBA2GRAY =11, + + CV_BGR2BGR565 =12, + CV_RGB2BGR565 =13, + CV_BGR5652BGR =14, + CV_BGR5652RGB =15, + CV_BGRA2BGR565 =16, + CV_RGBA2BGR565 =17, + CV_BGR5652BGRA =18, + CV_BGR5652RGBA =19, + + CV_GRAY2BGR565 =20, + CV_BGR5652GRAY =21, + + CV_BGR2BGR555 =22, + CV_RGB2BGR555 =23, + CV_BGR5552BGR =24, + CV_BGR5552RGB =25, + CV_BGRA2BGR555 =26, + CV_RGBA2BGR555 =27, + CV_BGR5552BGRA =28, + CV_BGR5552RGBA =29, + + CV_GRAY2BGR555 =30, + CV_BGR5552GRAY =31, + + CV_BGR2XYZ =32, + CV_RGB2XYZ =33, + CV_XYZ2BGR =34, + CV_XYZ2RGB =35, + + CV_BGR2YCrCb =36, + CV_RGB2YCrCb =37, + CV_YCrCb2BGR =38, + CV_YCrCb2RGB =39, + + CV_BGR2HSV =40, + CV_RGB2HSV =41, + + CV_BGR2Lab =44, + CV_RGB2Lab =45, + + CV_BayerBG2BGR =46, + CV_BayerGB2BGR =47, + CV_BayerRG2BGR =48, + CV_BayerGR2BGR =49, + + CV_BayerBG2RGB =CV_BayerRG2BGR, + CV_BayerGB2RGB =CV_BayerGR2BGR, + CV_BayerRG2RGB =CV_BayerBG2BGR, + CV_BayerGR2RGB =CV_BayerGB2BGR, + + CV_BGR2Luv =50, + CV_RGB2Luv =51, + CV_BGR2HLS =52, + CV_RGB2HLS =53, + + CV_HSV2BGR =54, + CV_HSV2RGB =55, + + CV_Lab2BGR =56, + CV_Lab2RGB =57, + CV_Luv2BGR =58, + CV_Luv2RGB =59, + CV_HLS2BGR =60, + CV_HLS2RGB =61, + + CV_BayerBG2BGR_VNG =62, + CV_BayerGB2BGR_VNG =63, + CV_BayerRG2BGR_VNG =64, + CV_BayerGR2BGR_VNG =65, + + CV_BayerBG2RGB_VNG =CV_BayerRG2BGR_VNG, + CV_BayerGB2RGB_VNG =CV_BayerGR2BGR_VNG, + CV_BayerRG2RGB_VNG =CV_BayerBG2BGR_VNG, + CV_BayerGR2RGB_VNG =CV_BayerGB2BGR_VNG, + + CV_BGR2HSV_FULL = 66, + CV_RGB2HSV_FULL = 67, + CV_BGR2HLS_FULL = 68, + CV_RGB2HLS_FULL = 69, + + CV_HSV2BGR_FULL = 70, + CV_HSV2RGB_FULL = 71, + CV_HLS2BGR_FULL = 72, + CV_HLS2RGB_FULL = 73, + + CV_LBGR2Lab = 74, + CV_LRGB2Lab = 75, + CV_LBGR2Luv = 76, + CV_LRGB2Luv = 77, + + CV_Lab2LBGR = 78, + CV_Lab2LRGB = 79, + CV_Luv2LBGR = 80, + CV_Luv2LRGB = 81, + + CV_BGR2YUV = 82, + CV_RGB2YUV = 83, + CV_YUV2BGR = 84, + CV_YUV2RGB = 85, + + CV_BayerBG2GRAY = 86, + CV_BayerGB2GRAY = 87, + CV_BayerRG2GRAY = 88, + CV_BayerGR2GRAY = 89, + + CV_YUV420i2RGB = 90, + CV_YUV420i2BGR = 91, + CV_YUV420sp2RGB = 92, + CV_YUV420sp2BGR = 93, + + CV_COLORCVT_MAX =100 +}; + + +/* Sub-pixel interpolation methods */ +enum +{ + CV_INTER_NN =0, + CV_INTER_LINEAR =1, + CV_INTER_CUBIC =2, + CV_INTER_AREA =3, + CV_INTER_LANCZOS4 =4 +}; + +/* ... and other image warping flags */ +enum +{ + CV_WARP_FILL_OUTLIERS =8, + CV_WARP_INVERSE_MAP =16 +}; + +/* Shapes of a structuring element for morphological operations */ +enum +{ + CV_SHAPE_RECT =0, + CV_SHAPE_CROSS =1, + CV_SHAPE_ELLIPSE =2, + CV_SHAPE_CUSTOM =100 +}; + +/* Morphological operations */ +enum +{ + CV_MOP_ERODE =0, + CV_MOP_DILATE =1, + CV_MOP_OPEN =2, + CV_MOP_CLOSE =3, + CV_MOP_GRADIENT =4, + CV_MOP_TOPHAT =5, + CV_MOP_BLACKHAT =6 +}; + +/* Spatial and central moments */ +typedef struct CvMoments +{ + double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; /* spatial moments */ + double mu20, mu11, mu02, mu30, mu21, mu12, mu03; /* central moments */ + double inv_sqrt_m00; /* m00 != 0 ? 1/sqrt(m00) : 0 */ +} +CvMoments; + +/* Hu invariants */ +typedef struct CvHuMoments +{ + double hu1, hu2, hu3, hu4, hu5, hu6, hu7; /* Hu invariants */ +} +CvHuMoments; + +/* Template matching methods */ +enum +{ + CV_TM_SQDIFF =0, + CV_TM_SQDIFF_NORMED =1, + CV_TM_CCORR =2, + CV_TM_CCORR_NORMED =3, + CV_TM_CCOEFF =4, + CV_TM_CCOEFF_NORMED =5 +}; + +typedef float (CV_CDECL * CvDistanceFunction)( const float* a, const float* b, void* user_param ); + +/* Contour retrieval modes */ +enum +{ + CV_RETR_EXTERNAL=0, + CV_RETR_LIST=1, + CV_RETR_CCOMP=2, + CV_RETR_TREE=3 +}; + +/* Contour approximation methods */ +enum +{ + CV_CHAIN_CODE=0, + CV_CHAIN_APPROX_NONE=1, + CV_CHAIN_APPROX_SIMPLE=2, + CV_CHAIN_APPROX_TC89_L1=3, + CV_CHAIN_APPROX_TC89_KCOS=4, + CV_LINK_RUNS=5 +}; + +/* +Internal structure that is used for sequental retrieving contours from the image. +It supports both hierarchical and plane variants of Suzuki algorithm. +*/ +typedef struct _CvContourScanner* CvContourScanner; + +/* Freeman chain reader state */ +typedef struct CvChainPtReader +{ + CV_SEQ_READER_FIELDS() + char code; + CvPoint pt; + schar deltas[8][2]; +} +CvChainPtReader; + +/* initializes 8-element array for fast access to 3x3 neighborhood of a pixel */ +#define CV_INIT_3X3_DELTAS( deltas, step, nch ) \ + ((deltas)[0] = (nch), (deltas)[1] = -(step) + (nch), \ + (deltas)[2] = -(step), (deltas)[3] = -(step) - (nch), \ + (deltas)[4] = -(nch), (deltas)[5] = (step) - (nch), \ + (deltas)[6] = (step), (deltas)[7] = (step) + (nch)) + + +/****************************************************************************************\ +* Planar subdivisions * +\****************************************************************************************/ + +typedef size_t CvSubdiv2DEdge; + +#define CV_QUADEDGE2D_FIELDS() \ + int flags; \ + struct CvSubdiv2DPoint* pt[4]; \ + CvSubdiv2DEdge next[4]; + +#define CV_SUBDIV2D_POINT_FIELDS()\ + int flags; \ + CvSubdiv2DEdge first; \ + CvPoint2D32f pt; \ + int id; + +#define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30) + +typedef struct CvQuadEdge2D +{ + CV_QUADEDGE2D_FIELDS() +} +CvQuadEdge2D; + +typedef struct CvSubdiv2DPoint +{ + CV_SUBDIV2D_POINT_FIELDS() +} +CvSubdiv2DPoint; + +#define CV_SUBDIV2D_FIELDS() \ + CV_GRAPH_FIELDS() \ + int quad_edges; \ + int is_geometry_valid; \ + CvSubdiv2DEdge recent_edge; \ + CvPoint2D32f topleft; \ + CvPoint2D32f bottomright; + +typedef struct CvSubdiv2D +{ + CV_SUBDIV2D_FIELDS() +} +CvSubdiv2D; + + +typedef enum CvSubdiv2DPointLocation +{ + CV_PTLOC_ERROR = -2, + CV_PTLOC_OUTSIDE_RECT = -1, + CV_PTLOC_INSIDE = 0, + CV_PTLOC_VERTEX = 1, + CV_PTLOC_ON_EDGE = 2 +} +CvSubdiv2DPointLocation; + +typedef enum CvNextEdgeType +{ + CV_NEXT_AROUND_ORG = 0x00, + CV_NEXT_AROUND_DST = 0x22, + CV_PREV_AROUND_ORG = 0x11, + CV_PREV_AROUND_DST = 0x33, + CV_NEXT_AROUND_LEFT = 0x13, + CV_NEXT_AROUND_RIGHT = 0x31, + CV_PREV_AROUND_LEFT = 0x20, + CV_PREV_AROUND_RIGHT = 0x02 +} +CvNextEdgeType; + +/* get the next edge with the same origin point (counterwise) */ +#define CV_SUBDIV2D_NEXT_EDGE( edge ) (((CvQuadEdge2D*)((edge) & ~3))->next[(edge)&3]) + + +/* Contour approximation algorithms */ +enum +{ + CV_POLY_APPROX_DP = 0 +}; + +/* Shape matching methods */ +enum +{ + CV_CONTOURS_MATCH_I1 =1, + CV_CONTOURS_MATCH_I2 =2, + CV_CONTOURS_MATCH_I3 =3 +}; + +/* Shape orientation */ +enum +{ + CV_CLOCKWISE =1, + CV_COUNTER_CLOCKWISE =2 +}; + + +/* Convexity defect */ +typedef struct CvConvexityDefect +{ + CvPoint* start; /* point of the contour where the defect begins */ + CvPoint* end; /* point of the contour where the defect ends */ + CvPoint* depth_point; /* the farthest from the convex hull point within the defect */ + float depth; /* distance between the farthest point and the convex hull */ +} CvConvexityDefect; + + +/* Histogram comparison methods */ +enum +{ + CV_COMP_CORREL =0, + CV_COMP_CHISQR =1, + CV_COMP_INTERSECT =2, + CV_COMP_BHATTACHARYYA =3 +}; + +/* Mask size for distance transform */ +enum +{ + CV_DIST_MASK_3 =3, + CV_DIST_MASK_5 =5, + CV_DIST_MASK_PRECISE =0 +}; + +/* Distance types for Distance Transform and M-estimators */ +enum +{ + CV_DIST_USER =-1, /* User defined distance */ + CV_DIST_L1 =1, /* distance = |x1-x2| + |y1-y2| */ + CV_DIST_L2 =2, /* the simple euclidean distance */ + CV_DIST_C =3, /* distance = max(|x1-x2|,|y1-y2|) */ + CV_DIST_L12 =4, /* L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) */ + CV_DIST_FAIR =5, /* distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 */ + CV_DIST_WELSCH =6, /* distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 */ + CV_DIST_HUBER =7 /* distance = |x| threshold ? max_value : 0 */ + CV_THRESH_BINARY_INV =1, /* value = value > threshold ? 0 : max_value */ + CV_THRESH_TRUNC =2, /* value = value > threshold ? threshold : value */ + CV_THRESH_TOZERO =3, /* value = value > threshold ? value : 0 */ + CV_THRESH_TOZERO_INV =4, /* value = value > threshold ? 0 : value */ + CV_THRESH_MASK =7, + CV_THRESH_OTSU =8 /* use Otsu algorithm to choose the optimal threshold value; + combine the flag with one of the above CV_THRESH_* values */ +}; + +/* Adaptive threshold methods */ +enum +{ + CV_ADAPTIVE_THRESH_MEAN_C =0, + CV_ADAPTIVE_THRESH_GAUSSIAN_C =1 +}; + +/* FloodFill flags */ +enum +{ + CV_FLOODFILL_FIXED_RANGE =(1 << 16), + CV_FLOODFILL_MASK_ONLY =(1 << 17) +}; + + +/* Canny edge detector flags */ +enum +{ + CV_CANNY_L2_GRADIENT =(1 << 31) +}; + +/* Variants of a Hough transform */ +enum +{ + CV_HOUGH_STANDARD =0, + CV_HOUGH_PROBABILISTIC =1, + CV_HOUGH_MULTI_SCALE =2, + CV_HOUGH_GRADIENT =3 +}; + + +/* Fast search data structures */ +struct CvFeatureTree; +struct CvLSH; +struct CvLSHOperations; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/opencv2/opencv.hpp b/include/opencv2/opencv.hpp new file mode 100644 index 0000000..13ab7de --- /dev/null +++ b/include/opencv2/opencv.hpp @@ -0,0 +1,60 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2010, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_ALL_HPP__ +#define __OPENCV_ALL_HPP__ + +#include "opencv2/core/core_c.h" +#include "opencv2/core/core.hpp" +//#include "opencv2/flann/miniflann.hpp" +#include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/imgproc/imgproc.hpp" +//#include "opencv2/video/video.hpp" +//#include "opencv2/features2d/features2d.hpp" +//#include "opencv2/objdetect/objdetect.hpp" +//#include "opencv2/calib3d/calib3d.hpp" +//#include "opencv2/ml/ml.hpp" +//#include "opencv2/highgui/highgui_c.h" +//#include "opencv2/highgui/highgui.hpp" +//#include "opencv2/contrib/contrib.hpp" + +#endif diff --git a/opencv/core/alloc.cpp b/opencv/core/alloc.cpp new file mode 100644 index 0000000..674e94d --- /dev/null +++ b/opencv/core/alloc.cpp @@ -0,0 +1,697 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +#define CV_USE_SYSTEM_MALLOC 1 + +namespace cv +{ + +static void* OutOfMemoryError(size_t size) +{ + CV_Error_(CV_StsNoMem, ("Failed to allocate %lu bytes", (unsigned long)size)); + return 0; +} + +#if CV_USE_SYSTEM_MALLOC + +void deleteThreadAllocData() {} + +void* fastMalloc( size_t size ) +{ + uchar* udata = (uchar*)malloc(size + sizeof(void*) + CV_MALLOC_ALIGN); + if(!udata) + return OutOfMemoryError(size); + uchar** adata = alignPtr((uchar**)udata + 1, CV_MALLOC_ALIGN); + adata[-1] = udata; + return adata; +} + +void fastFree(void* ptr) +{ + if(ptr) + { + uchar* udata = ((uchar**)ptr)[-1]; + CV_DbgAssert(udata < (uchar*)ptr && + ((uchar*)ptr - udata) <= (ptrdiff_t)(sizeof(void*)+CV_MALLOC_ALIGN)); + free(udata); + } +} + +#else + +#if 0 +#define SANITY_CHECK(block) \ + CV_Assert(((size_t)(block) & (MEM_BLOCK_SIZE-1)) == 0 && \ + (unsigned)(block)->binIdx <= (unsigned)MAX_BIN && \ + (block)->signature == MEM_BLOCK_SIGNATURE) +#else +#define SANITY_CHECK(block) +#endif + +#define STAT(stmt) + +#ifdef WIN32 +struct CriticalSection +{ + CriticalSection() { InitializeCriticalSection(&cs); } + ~CriticalSection() { DeleteCriticalSection(&cs); } + void lock() { EnterCriticalSection(&cs); } + void unlock() { LeaveCriticalSection(&cs); } + bool trylock() { return TryEnterCriticalSection(&cs) != 0; } + + CRITICAL_SECTION cs; +}; + +void* SystemAlloc(size_t size) +{ + void* ptr = malloc(size); + return ptr ? ptr : OutOfMemoryError(size); +} + +void SystemFree(void* ptr, size_t) +{ + free(ptr); +} +#else +struct CriticalSection +{ + CriticalSection() { pthread_mutex_init(&mutex, 0); } + ~CriticalSection() { pthread_mutex_destroy(&mutex); } + void lock() { pthread_mutex_lock(&mutex); } + void unlock() { pthread_mutex_unlock(&mutex); } + bool trylock() { return pthread_mutex_trylock(&mutex) == 0; } + + pthread_mutex_t mutex; +}; + +void* SystemAlloc(size_t size) +{ + #ifndef MAP_ANONYMOUS + #define MAP_ANONYMOUS MAP_ANON + #endif + void* ptr = 0; + ptr = mmap(ptr, size, (PROT_READ | PROT_WRITE), MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + return ptr != MAP_FAILED ? ptr : OutOfMemoryError(size); +} + +void SystemFree(void* ptr, size_t size) +{ + munmap(ptr, size); +} +#endif + +struct AutoLock +{ + AutoLock(CriticalSection& _cs) : cs(&_cs) { cs->lock(); } + ~AutoLock() { cs->unlock(); } + CriticalSection* cs; +}; + +const size_t MEM_BLOCK_SIGNATURE = 0x01234567; +const int MEM_BLOCK_SHIFT = 14; +const size_t MEM_BLOCK_SIZE = 1 << MEM_BLOCK_SHIFT; +const size_t HDR_SIZE = 128; +const size_t MAX_BLOCK_SIZE = MEM_BLOCK_SIZE - HDR_SIZE; +const int MAX_BIN = 28; + +static const int binSizeTab[MAX_BIN+1] = +{ 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 128, 160, 192, 256, 320, 384, 480, 544, 672, 768, +896, 1056, 1328, 1600, 2688, 4048, 5408, 8128, 16256 }; + +struct MallocTables +{ + void initBinTab() + { + int i, j = 0, n; + for( i = 0; i <= MAX_BIN; i++ ) + { + n = binSizeTab[i]>>3; + for( ; j <= n; j++ ) + binIdx[j] = (uchar)i; + } + } + int bin(size_t size) + { + assert( size <= MAX_BLOCK_SIZE ); + return binIdx[(size + 7)>>3]; + } + + MallocTables() + { + initBinTab(); + } + + uchar binIdx[MAX_BLOCK_SIZE/8+1]; +}; + +MallocTables mallocTables; + +struct Node +{ + Node* next; +}; + +struct ThreadData; + +struct Block +{ + Block(Block* _next) + { + signature = MEM_BLOCK_SIGNATURE; + prev = 0; + next = _next; + privateFreeList = publicFreeList = 0; + bumpPtr = endPtr = 0; + objSize = 0; + threadData = 0; + data = (uchar*)this + HDR_SIZE; + } + + ~Block() {} + + void init(Block* _prev, Block* _next, int _objSize, ThreadData* _threadData) + { + prev = _prev; + if(prev) + prev->next = this; + next = _next; + if(next) + next->prev = this; + objSize = _objSize; + binIdx = mallocTables.bin(objSize); + threadData = _threadData; + privateFreeList = publicFreeList = 0; + bumpPtr = data; + int nobjects = MAX_BLOCK_SIZE/objSize; + endPtr = bumpPtr + nobjects*objSize; + almostEmptyThreshold = (nobjects + 1)/2; + allocated = 0; + } + + bool isFilled() const { return allocated > almostEmptyThreshold; } + + size_t signature; + Block* prev; + Block* next; + Node* privateFreeList; + Node* publicFreeList; + uchar* bumpPtr; + uchar* endPtr; + uchar* data; + ThreadData* threadData; + int objSize; + int binIdx; + int allocated; + int almostEmptyThreshold; + CriticalSection cs; +}; + +struct BigBlock +{ + BigBlock(int bigBlockSize, BigBlock* _next) + { + first = alignPtr((Block*)(this+1), MEM_BLOCK_SIZE); + next = _next; + nblocks = (int)(((char*)this + bigBlockSize - (char*)first)/MEM_BLOCK_SIZE); + Block* p = 0; + for( int i = nblocks-1; i >= 0; i-- ) + p = ::new((uchar*)first + i*MEM_BLOCK_SIZE) Block(p); + } + + ~BigBlock() + { + for( int i = nblocks-1; i >= 0; i-- ) + ((Block*)((uchar*)first+i*MEM_BLOCK_SIZE))->~Block(); + } + + BigBlock* next; + Block* first; + int nblocks; +}; + +struct BlockPool +{ + BlockPool(int _bigBlockSize=1<<20) : pool(0), bigBlockSize(_bigBlockSize) + { + } + + ~BlockPool() + { + AutoLock lock(cs); + while( pool ) + { + BigBlock* nextBlock = pool->next; + pool->~BigBlock(); + SystemFree(pool, bigBlockSize); + pool = nextBlock; + } + } + + Block* alloc() + { + AutoLock lock(cs); + Block* block; + if( !freeBlocks ) + { + BigBlock* bblock = ::new(SystemAlloc(bigBlockSize)) BigBlock(bigBlockSize, pool); + assert( bblock != 0 ); + freeBlocks = bblock->first; + pool = bblock; + } + block = freeBlocks; + freeBlocks = freeBlocks->next; + if( freeBlocks ) + freeBlocks->prev = 0; + STAT(stat.bruttoBytes += MEM_BLOCK_SIZE); + return block; + } + + void free(Block* block) + { + AutoLock lock(cs); + block->prev = 0; + block->next = freeBlocks; + freeBlocks = block; + STAT(stat.bruttoBytes -= MEM_BLOCK_SIZE); + } + + CriticalSection cs; + Block* freeBlocks; + BigBlock* pool; + int bigBlockSize; + int blocksPerBigBlock; +}; + +BlockPool mallocPool; + +enum { START=0, FREE=1, GC=2 }; + +struct ThreadData +{ + ThreadData() { for(int i = 0; i <= MAX_BIN; i++) bins[i][START] = bins[i][FREE] = bins[i][GC] = 0; } + ~ThreadData() + { + // mark all the thread blocks as abandoned or even release them + for( int i = 0; i <= MAX_BIN; i++ ) + { + Block *bin = bins[i][START], *block = bin; + bins[i][START] = bins[i][FREE] = bins[i][GC] = 0; + if( block ) + { + do + { + Block* next = block->next; + int allocated = block->allocated; + { + AutoLock lock(block->cs); + block->next = block->prev = 0; + block->threadData = 0; + Node *node = block->publicFreeList; + for( ; node != 0; node = node->next ) + allocated--; + } + if( allocated == 0 ) + mallocPool.free(block); + block = next; + } + while( block != bin ); + } + } + } + + void moveBlockToFreeList( Block* block ) + { + int i = block->binIdx; + Block*& freePtr = bins[i][FREE]; + CV_DbgAssert( block->next->prev == block && block->prev->next == block ); + if( block != freePtr ) + { + Block*& gcPtr = bins[i][GC]; + if( gcPtr == block ) + gcPtr = block->next; + if( block->next != block ) + { + block->prev->next = block->next; + block->next->prev = block->prev; + } + block->next = freePtr->next; + block->prev = freePtr; + freePtr = block->next->prev = block->prev->next = block; + } + } + + Block* bins[MAX_BIN+1][3]; + +#ifdef WIN32 +#ifdef WINCE +# define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF) +#endif + + static DWORD tlsKey; + static ThreadData* get() + { + ThreadData* data; + if( tlsKey == TLS_OUT_OF_INDEXES ) + tlsKey = TlsAlloc(); + data = (ThreadData*)TlsGetValue(tlsKey); + if( !data ) + { + data = new ThreadData; + TlsSetValue(tlsKey, data); + } + return data; + } +#else + static void deleteData(void* data) + { + delete (ThreadData*)data; + } + + static pthread_key_t tlsKey; + static ThreadData* get() + { + ThreadData* data; + if( !tlsKey ) + pthread_key_create(&tlsKey, deleteData); + data = (ThreadData*)pthread_getspecific(tlsKey); + if( !data ) + { + data = new ThreadData; + pthread_setspecific(tlsKey, data); + } + return data; + } +#endif +}; + +#ifdef WIN32 +DWORD ThreadData::tlsKey = TLS_OUT_OF_INDEXES; + +void deleteThreadAllocData() +{ + if( ThreadData::tlsKey != TLS_OUT_OF_INDEXES ) + delete (ThreadData*)TlsGetValue( ThreadData::tlsKey ); +} + +#else +pthread_key_t ThreadData::tlsKey = 0; +#endif + +#if 0 +static void checkList(ThreadData* tls, int idx) +{ + Block* block = tls->bins[idx][START]; + if( !block ) + { + CV_DbgAssert( tls->bins[idx][FREE] == 0 && tls->bins[idx][GC] == 0 ); + } + else + { + bool gcInside = false; + bool freeInside = false; + do + { + if( tls->bins[idx][FREE] == block ) + freeInside = true; + if( tls->bins[idx][GC] == block ) + gcInside = true; + block = block->next; + } + while( block != tls->bins[idx][START] ); + CV_DbgAssert( gcInside && freeInside ); + } +} +#else +#define checkList(tls, idx) +#endif + +void* fastMalloc( size_t size ) +{ + if( size > MAX_BLOCK_SIZE ) + { + size_t size1 = size + sizeof(uchar*)*2 + MEM_BLOCK_SIZE; + uchar* udata = (uchar*)SystemAlloc(size1); + uchar** adata = alignPtr((uchar**)udata + 2, MEM_BLOCK_SIZE); + adata[-1] = udata; + adata[-2] = (uchar*)size1; + return adata; + } + + { + ThreadData* tls = ThreadData::get(); + int idx = mallocTables.bin(size); + Block*& startPtr = tls->bins[idx][START]; + Block*& gcPtr = tls->bins[idx][GC]; + Block*& freePtr = tls->bins[idx][FREE], *block = freePtr; + checkList(tls, idx); + size = binSizeTab[idx]; + STAT( + stat.nettoBytes += size; + stat.mallocCalls++; + ); + uchar* data = 0; + + for(;;) + { + if( block ) + { + // try to find non-full block + for(;;) + { + CV_DbgAssert( block->next->prev == block && block->prev->next == block ); + if( block->bumpPtr ) + { + data = block->bumpPtr; + if( (block->bumpPtr += size) >= block->endPtr ) + block->bumpPtr = 0; + break; + } + + if( block->privateFreeList ) + { + data = (uchar*)block->privateFreeList; + block->privateFreeList = block->privateFreeList->next; + break; + } + + if( block == startPtr ) + break; + block = block->next; + } +#if 0 + avg_k += _k; + avg_nk++; + if( avg_nk == 1000 ) + { + printf("avg search iters per 1e3 allocs = %g\n", (double)avg_k/avg_nk ); + avg_k = avg_nk = 0; + } +#endif + + freePtr = block; + if( !data ) + { + block = gcPtr; + for( int k = 0; k < 2; k++ ) + { + SANITY_CHECK(block); + CV_DbgAssert( block->next->prev == block && block->prev->next == block ); + if( block->publicFreeList ) + { + { + AutoLock lock(block->cs); + block->privateFreeList = block->publicFreeList; + block->publicFreeList = 0; + } + Node* node = block->privateFreeList; + for(;node != 0; node = node->next) + --block->allocated; + data = (uchar*)block->privateFreeList; + block->privateFreeList = block->privateFreeList->next; + gcPtr = block->next; + if( block->allocated+1 <= block->almostEmptyThreshold ) + tls->moveBlockToFreeList(block); + break; + } + block = block->next; + } + if( !data ) + gcPtr = block; + } + } + + if( data ) + break; + block = mallocPool.alloc(); + block->init(startPtr ? startPtr->prev : block, startPtr ? startPtr : block, (int)size, tls); + if( !startPtr ) + startPtr = gcPtr = freePtr = block; + checkList(tls, block->binIdx); + SANITY_CHECK(block); + } + + ++block->allocated; + return data; + } +} + +void fastFree( void* ptr ) +{ + if( ((size_t)ptr & (MEM_BLOCK_SIZE-1)) == 0 ) + { + if( ptr != 0 ) + { + void* origPtr = ((void**)ptr)[-1]; + size_t sz = (size_t)((void**)ptr)[-2]; + SystemFree( origPtr, sz ); + } + return; + } + + { + ThreadData* tls = ThreadData::get(); + Node* node = (Node*)ptr; + Block* block = (Block*)((size_t)ptr & -(int)MEM_BLOCK_SIZE); + assert( block->signature == MEM_BLOCK_SIGNATURE ); + + if( block->threadData == tls ) + { + STAT( + stat.nettoBytes -= block->objSize; + stat.freeCalls++; + float ratio = (float)stat.nettoBytes/stat.bruttoBytes; + if( stat.minUsageRatio > ratio ) + stat.minUsageRatio = ratio; + ); + + SANITY_CHECK(block); + + bool prevFilled = block->isFilled(); + --block->allocated; + if( !block->isFilled() && (block->allocated == 0 || prevFilled) ) + { + if( block->allocated == 0 ) + { + int idx = block->binIdx; + Block*& startPtr = tls->bins[idx][START]; + Block*& freePtr = tls->bins[idx][FREE]; + Block*& gcPtr = tls->bins[idx][GC]; + + if( block == block->next ) + { + CV_DbgAssert( startPtr == block && freePtr == block && gcPtr == block ); + startPtr = freePtr = gcPtr = 0; + } + else + { + if( freePtr == block ) + freePtr = block->next; + if( gcPtr == block ) + gcPtr = block->next; + if( startPtr == block ) + startPtr = block->next; + block->prev->next = block->next; + block->next->prev = block->prev; + } + mallocPool.free(block); + checkList(tls, idx); + return; + } + + tls->moveBlockToFreeList(block); + } + node->next = block->privateFreeList; + block->privateFreeList = node; + } + else + { + AutoLock lock(block->cs); + SANITY_CHECK(block); + + node->next = block->publicFreeList; + block->publicFreeList = node; + if( block->threadData == 0 ) + { + // take ownership of the abandoned block. + // note that it can happen at the same time as + // ThreadData::deleteData() marks the blocks as abandoned, + // so this part of the algorithm needs to be checked for data races + int idx = block->binIdx; + block->threadData = tls; + Block*& startPtr = tls->bins[idx][START]; + + if( startPtr ) + { + block->next = startPtr; + block->prev = startPtr->prev; + block->next->prev = block->prev->next = block; + } + else + startPtr = tls->bins[idx][FREE] = tls->bins[idx][GC] = block; + } + } + } +} + +#endif + +} + +CV_IMPL void cvSetMemoryManager( CvAllocFunc, CvFreeFunc, void * ) +{ + CV_Error( -1, "Custom memory allocator is not supported" ); +} + +CV_IMPL void* cvAlloc( size_t size ) +{ + return cv::fastMalloc( size ); +} + +CV_IMPL void cvFree_( void* ptr ) +{ + cv::fastFree( ptr ); +} + + +/* End of file. */ diff --git a/opencv/core/arithm.cpp b/opencv/core/arithm.cpp new file mode 100644 index 0000000..5f6c04f --- /dev/null +++ b/opencv/core/arithm.cpp @@ -0,0 +1,2716 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* //////////////////////////////////////////////////////////////////// +// +// Arithmetic and logical operations: +, -, *, /, &, |, ^, ~, abs ... +// +// */ + +#include "precomp.hpp" + +namespace cv +{ + +#if ARITHM_USE_IPP +struct IPPArithmInitializer +{ + IPPArithmInitializer(void) + { + ippStaticInit(); + } +}; + +IPPArithmInitializer ippArithmInitializer; +#endif + +struct NOP {}; + +template +void vBinOp8(const T* src1, size_t step1, const T* src2, size_t step2, T* dst, size_t step, Size sz) +{ +#if CV_SSE2 + Op8 op8; +#endif + Op op; + + for( ; sz.height--; src1 += step1/sizeof(src1[0]), + src2 += step2/sizeof(src2[0]), + dst += step/sizeof(dst[0]) ) + { + int x = 0; + + #if CV_SSE2 + if( USE_SSE2 ) + { + for( ; x <= sz.width - 32; x += 32 ) + { + __m128i r0 = _mm_loadu_si128((const __m128i*)(src1 + x)); + __m128i r1 = _mm_loadu_si128((const __m128i*)(src1 + x + 16)); + r0 = op8(r0,_mm_loadu_si128((const __m128i*)(src2 + x))); + r1 = op8(r1,_mm_loadu_si128((const __m128i*)(src2 + x + 16))); + _mm_storeu_si128((__m128i*)(dst + x), r0); + _mm_storeu_si128((__m128i*)(dst + x + 16), r1); + } + for( ; x <= sz.width - 8; x += 8 ) + { + __m128i r0 = _mm_loadl_epi64((const __m128i*)(src1 + x)); + r0 = op8(r0,_mm_loadl_epi64((const __m128i*)(src2 + x))); + _mm_storel_epi64((__m128i*)(dst + x), r0); + } + } + #endif + + for( ; x <= sz.width - 4; x += 4 ) + { + T v0 = op(src1[x], src2[x]); + T v1 = op(src1[x+1], src2[x+1]); + dst[x] = v0; dst[x+1] = v1; + v0 = op(src1[x+2], src2[x+2]); + v1 = op(src1[x+3], src2[x+3]); + dst[x+2] = v0; dst[x+3] = v1; + } + + for( ; x < sz.width; x++ ) + dst[x] = op(src1[x], src2[x]); + } +} + +template +void vBinOp16(const T* src1, size_t step1, const T* src2, size_t step2, + T* dst, size_t step, Size sz) +{ +#if CV_SSE2 + Op16 op16; +#endif + Op op; + + for( ; sz.height--; src1 += step1/sizeof(src1[0]), + src2 += step2/sizeof(src2[0]), + dst += step/sizeof(dst[0]) ) + { + int x = 0; + + #if CV_SSE2 + if( USE_SSE2 ) + { + for( ; x <= sz.width - 16; x += 16 ) + { + __m128i r0 = _mm_loadu_si128((const __m128i*)(src1 + x)); + __m128i r1 = _mm_loadu_si128((const __m128i*)(src1 + x + 8)); + r0 = op16(r0,_mm_loadu_si128((const __m128i*)(src2 + x))); + r1 = op16(r1,_mm_loadu_si128((const __m128i*)(src2 + x + 8))); + _mm_storeu_si128((__m128i*)(dst + x), r0); + _mm_storeu_si128((__m128i*)(dst + x + 16), r1); + } + for( ; x <= sz.width - 4; x += 4 ) + { + __m128i r0 = _mm_loadl_epi64((const __m128i*)(src1 + x)); + r0 = op16(r0,_mm_loadl_epi64((const __m128i*)(src2 + x))); + _mm_storel_epi64((__m128i*)(dst + x), r0); + } + } + else + #endif + + for( ; x <= sz.width - 4; x += 4 ) + { + T v0 = op(src1[x], src2[x]); + T v1 = op(src1[x+1], src2[x+1]); + dst[x] = v0; dst[x+1] = v1; + v0 = op(src1[x+2], src2[x+2]); + v1 = op(src1[x+3], src2[x+3]); + dst[x+2] = v0; dst[x+3] = v1; + } + + for( ; x < sz.width; x++ ) + dst[x] = op(src1[x], src2[x]); + } +} + + +template +void vBinOp32s(const int* src1, size_t step1, const int* src2, size_t step2, + int* dst, size_t step, Size sz) +{ +#if CV_SSE2 + Op32 op32; +#endif + Op op; + + for( ; sz.height--; src1 += step1/sizeof(src1[0]), + src2 += step2/sizeof(src2[0]), + dst += step/sizeof(dst[0]) ) + { + int x = 0; + +#if CV_SSE2 + if( USE_SSE2 ) + { + if( (((size_t)src1|(size_t)src2|(size_t)dst)&15) == 0 ) + for( ; x <= sz.width - 8; x += 8 ) + { + __m128i r0 = _mm_load_si128((const __m128i*)(src1 + x)); + __m128i r1 = _mm_load_si128((const __m128i*)(src1 + x + 4)); + r0 = op32(r0,_mm_load_si128((const __m128i*)(src2 + x))); + r1 = op32(r1,_mm_load_si128((const __m128i*)(src2 + x + 4))); + _mm_store_si128((__m128i*)(dst + x), r0); + _mm_store_si128((__m128i*)(dst + x + 16), r1); + } + else + for( ; x <= sz.width - 8; x += 8 ) + { + __m128i r0 = _mm_loadu_si128((const __m128i*)(src1 + x)); + __m128i r1 = _mm_loadu_si128((const __m128i*)(src1 + x + 4)); + r0 = op32(r0,_mm_loadu_si128((const __m128i*)(src2 + x))); + r1 = op32(r1,_mm_loadu_si128((const __m128i*)(src2 + x + 4))); + _mm_storeu_si128((__m128i*)(dst + x), r0); + _mm_storeu_si128((__m128i*)(dst + x + 16), r1); + } + } +#endif + + for( ; x <= sz.width - 4; x += 4 ) + { + int v0 = op(src1[x], src2[x]); + int v1 = op(src1[x+1], src2[x+1]); + dst[x] = v0; dst[x+1] = v1; + v0 = op(src1[x+2], src2[x+2]); + v1 = op(src1[x+3], src2[x+3]); + dst[x+2] = v0; dst[x+3] = v1; + } + + for( ; x < sz.width; x++ ) + dst[x] = op(src1[x], src2[x]); + } +} + + +template +void vBinOp32f(const float* src1, size_t step1, const float* src2, size_t step2, + float* dst, size_t step, Size sz) +{ +#if CV_SSE2 + Op32 op32; +#endif + Op op; + + for( ; sz.height--; src1 += step1/sizeof(src1[0]), + src2 += step2/sizeof(src2[0]), + dst += step/sizeof(dst[0]) ) + { + int x = 0; + + #if CV_SSE2 + if( USE_SSE2 ) + { + if( (((size_t)src1|(size_t)src2|(size_t)dst)&15) == 0 ) + for( ; x <= sz.width - 8; x += 8 ) + { + __m128 r0 = _mm_load_ps(src1 + x); + __m128 r1 = _mm_load_ps(src1 + x + 4); + r0 = op32(r0,_mm_load_ps(src2 + x)); + r1 = op32(r1,_mm_load_ps(src2 + x + 4)); + _mm_store_ps(dst + x, r0); + _mm_store_ps(dst + x + 4, r1); + } + else + for( ; x <= sz.width - 8; x += 8 ) + { + __m128 r0 = _mm_loadu_ps(src1 + x); + __m128 r1 = _mm_loadu_ps(src1 + x + 4); + r0 = op32(r0,_mm_loadu_ps(src2 + x)); + r1 = op32(r1,_mm_loadu_ps(src2 + x + 4)); + _mm_storeu_ps(dst + x, r0); + _mm_storeu_ps(dst + x + 4, r1); + } + } + #endif + for( ; x <= sz.width - 4; x += 4 ) + { + float v0 = op(src1[x], src2[x]); + float v1 = op(src1[x+1], src2[x+1]); + dst[x] = v0; dst[x+1] = v1; + v0 = op(src1[x+2], src2[x+2]); + v1 = op(src1[x+3], src2[x+3]); + dst[x+2] = v0; dst[x+3] = v1; + } + + for( ; x < sz.width; x++ ) + dst[x] = op(src1[x], src2[x]); + } +} + +template +void vBinOp64f(const double* src1, size_t step1, const double* src2, size_t step2, + double* dst, size_t step, Size sz) +{ +#if CV_SSE2 + Op64 op64; +#endif + Op op; + + for( ; sz.height--; src1 += step1/sizeof(src1[0]), + src2 += step2/sizeof(src2[0]), + dst += step/sizeof(dst[0]) ) + { + int x = 0; + + #if CV_SSE2 + if( USE_SSE2 && (((size_t)src1|(size_t)src2|(size_t)dst)&15) == 0 ) + for( ; x <= sz.width - 4; x += 4 ) + { + __m128d r0 = _mm_load_pd(src1 + x); + __m128d r1 = _mm_load_pd(src1 + x + 2); + r0 = op64(r0,_mm_load_pd(src2 + x)); + r1 = op64(r1,_mm_load_pd(src2 + x + 2)); + _mm_store_pd(dst + x, r0); + _mm_store_pd(dst + x + 2, r1); + } + else + #endif + for( ; x <= sz.width - 4; x += 4 ) + { + double v0 = op(src1[x], src2[x]); + double v1 = op(src1[x+1], src2[x+1]); + dst[x] = v0; dst[x+1] = v1; + v0 = op(src1[x+2], src2[x+2]); + v1 = op(src1[x+3], src2[x+3]); + dst[x+2] = v0; dst[x+3] = v1; + } + + for( ; x < sz.width; x++ ) + dst[x] = op(src1[x], src2[x]); + } +} + +#if CV_SSE2 + +struct _VAdd8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_adds_epu8(a,b); }}; +struct _VSub8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_subs_epu8(a,b); }}; +struct _VMin8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_min_epu8(a,b); }}; +struct _VMax8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_max_epu8(a,b); }}; +struct _VAbsDiff8u +{ + __m128i operator()(const __m128i& a, const __m128i& b) const + { return _mm_add_epi8(_mm_subs_epu8(a,b),_mm_subs_epu8(b,a)); } +}; + +struct _VAdd8s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_adds_epi8(a,b); }}; +struct _VSub8s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_subs_epi8(a,b); }}; +struct _VMin8s +{ + __m128i operator()(const __m128i& a, const __m128i& b) const + { + __m128i m = _mm_cmpgt_epi8(a, b); + return _mm_xor_si128(a, _mm_and_si128(_mm_xor_si128(a, b), m)); + } +}; +struct _VMax8s +{ + __m128i operator()(const __m128i& a, const __m128i& b) const + { + __m128i m = _mm_cmpgt_epi8(b, a); + return _mm_xor_si128(a, _mm_and_si128(_mm_xor_si128(a, b), m)); + } +}; +struct _VAbsDiff8s +{ + __m128i operator()(const __m128i& a, const __m128i& b) const + { + __m128i d = _mm_subs_epi8(a, b); + __m128i m = _mm_cmpgt_epi8(b, a); + return _mm_subs_epi8(_mm_xor_si128(d, m), m); + } +}; + +struct _VAdd16u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_adds_epu16(a,b); }}; +struct _VSub16u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_subs_epu16(a,b); }}; +struct _VMin16u +{ + __m128i operator()(const __m128i& a, const __m128i& b) const + { return _mm_subs_epu16(a,_mm_subs_epu16(a,b)); } +}; +struct _VMax16u +{ + __m128i operator()(const __m128i& a, const __m128i& b) const + { return _mm_adds_epu16(_mm_subs_epu16(a,b),b); } +}; +struct _VAbsDiff16u +{ + __m128i operator()(const __m128i& a, const __m128i& b) const + { return _mm_add_epi16(_mm_subs_epu16(a,b),_mm_subs_epu16(b,a)); } +}; + +struct _VAdd16s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_adds_epi16(a,b); }}; +struct _VSub16s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_subs_epi16(a,b); }}; +struct _VMin16s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_min_epi16(a,b); }}; +struct _VMax16s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_max_epi16(a,b); }}; +struct _VAbsDiff16s +{ + __m128i operator()(const __m128i& a, const __m128i& b) const + { + __m128i M = _mm_max_epi16(a,b), m = _mm_min_epi16(a,b); + return _mm_subs_epi16(M, m); + } +}; + +struct _VAdd32s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_add_epi32(a,b); }}; +struct _VSub32s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_sub_epi32(a,b); }}; +struct _VMin32s +{ + __m128i operator()(const __m128i& a, const __m128i& b) const + { + __m128i m = _mm_cmpgt_epi32(a, b); + return _mm_xor_si128(a, _mm_and_si128(_mm_xor_si128(a, b), m)); + } +}; +struct _VMax32s +{ + __m128i operator()(const __m128i& a, const __m128i& b) const + { + __m128i m = _mm_cmpgt_epi32(b, a); + return _mm_xor_si128(a, _mm_and_si128(_mm_xor_si128(a, b), m)); + } +}; +struct _VAbsDiff32s +{ + __m128i operator()(const __m128i& a, const __m128i& b) const + { + __m128i d = _mm_sub_epi32(a, b); + __m128i m = _mm_cmpgt_epi32(b, a); + return _mm_sub_epi32(_mm_xor_si128(d, m), m); + } +}; + +struct _VAdd32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_add_ps(a,b); }}; +struct _VSub32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_sub_ps(a,b); }}; +struct _VMin32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_min_ps(a,b); }}; +struct _VMax32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_max_ps(a,b); }}; +static int CV_DECL_ALIGNED(16) v32f_absmask[] = { 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff }; +struct _VAbsDiff32f +{ + __m128 operator()(const __m128& a, const __m128& b) const + { + return _mm_and_ps(_mm_sub_ps(a,b), *(const __m128*)v32f_absmask); + } +}; + +struct _VAdd64f { __m128d operator()(const __m128d& a, const __m128d& b) const { return _mm_add_pd(a,b); }}; +struct _VSub64f { __m128d operator()(const __m128d& a, const __m128d& b) const { return _mm_sub_pd(a,b); }}; +struct _VMin64f { __m128d operator()(const __m128d& a, const __m128d& b) const { return _mm_min_pd(a,b); }}; +struct _VMax64f { __m128d operator()(const __m128d& a, const __m128d& b) const { return _mm_max_pd(a,b); }}; + +static int CV_DECL_ALIGNED(16) v64f_absmask[] = { 0xffffffff, 0x7fffffff, 0xffffffff, 0x7fffffff }; +struct _VAbsDiff64f +{ + __m128d operator()(const __m128d& a, const __m128d& b) const + { + return _mm_and_pd(_mm_sub_pd(a,b), *(const __m128d*)v64f_absmask); + } +}; + +struct _VAnd8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_and_si128(a,b); }}; +struct _VOr8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_or_si128(a,b); }}; +struct _VXor8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_xor_si128(a,b); }}; +struct _VNot8u { __m128i operator()(const __m128i& a, const __m128i&) const { return _mm_andnot_si128(_mm_setzero_si128(),a); }}; + +#endif + +#if CV_SSE2 +#define IF_SIMD(op) op +#else +#define IF_SIMD(op) NOP +#endif + +template<> inline uchar OpAdd::operator ()(uchar a, uchar b) const +{ return CV_FAST_CAST_8U(a + b); } +template<> inline uchar OpSub::operator ()(uchar a, uchar b) const +{ return CV_FAST_CAST_8U(a - b); } + +template struct OpAbsDiff +{ + typedef T type1; + typedef T type2; + typedef T rtype; + T operator()(T a, T b) const { return (T)std::abs(a - b); } +}; + +template<> inline short OpAbsDiff::operator ()(short a, short b) const +{ return saturate_cast(std::abs(a - b)); } + +template<> inline schar OpAbsDiff::operator ()(schar a, schar b) const +{ return saturate_cast(std::abs(a - b)); } + +template struct OpAbsDiffS +{ + typedef T type1; + typedef WT type2; + typedef T rtype; + T operator()(T a, WT b) const { return saturate_cast(std::abs(a - b)); } +}; + +template struct OpAnd +{ + typedef T type1; + typedef T type2; + typedef T rtype; + T operator()( T a, T b ) const { return a & b; } +}; + +template struct OpOr +{ + typedef T type1; + typedef T type2; + typedef T rtype; + T operator()( T a, T b ) const { return a | b; } +}; + +template struct OpXor +{ + typedef T type1; + typedef T type2; + typedef T rtype; + T operator()( T a, T b ) const { return a ^ b; } +}; + +template struct OpNot +{ + typedef T type1; + typedef T type2; + typedef T rtype; + T operator()( T a, T ) const { return ~a; } +}; + +static inline void fixSteps(Size sz, size_t elemSize, size_t& step1, size_t& step2, size_t& step) +{ + if( sz.height == 1 ) + step1 = step2 = step = sz.width*elemSize; +} + +static void add8u( const uchar* src1, size_t step1, + const uchar* src2, size_t step2, + uchar* dst, size_t step, Size sz, void* ) +{ + IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); + ippiAdd_8u_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz, 0), + (vBinOp8, IF_SIMD(_VAdd8u)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void add8s( const schar* src1, size_t step1, + const schar* src2, size_t step2, + schar* dst, size_t step, Size sz, void* ) +{ + vBinOp8, IF_SIMD(_VAdd8s)>(src1, step1, src2, step2, dst, step, sz); +} + +static void add16u( const ushort* src1, size_t step1, + const ushort* src2, size_t step2, + ushort* dst, size_t step, Size sz, void* ) +{ + IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); + ippiAdd_16u_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz, 0), + (vBinOp16, IF_SIMD(_VAdd16u)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void add16s( const short* src1, size_t step1, + const short* src2, size_t step2, + short* dst, size_t step, Size sz, void* ) +{ + IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); + ippiAdd_16s_C1RSfs(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz, 0), + (vBinOp16, IF_SIMD(_VAdd16s)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void add32s( const int* src1, size_t step1, + const int* src2, size_t step2, + int* dst, size_t step, Size sz, void* ) +{ + vBinOp32s, IF_SIMD(_VAdd32s)>(src1, step1, src2, step2, dst, step, sz); +} + +static void add32f( const float* src1, size_t step1, + const float* src2, size_t step2, + float* dst, size_t step, Size sz, void* ) +{ + IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); + ippiAdd_32f_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz), + (vBinOp32f, IF_SIMD(_VAdd32f)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void add64f( const double* src1, size_t step1, + const double* src2, size_t step2, + double* dst, size_t step, Size sz, void* ) +{ + vBinOp64f, IF_SIMD(_VAdd64f)>(src1, step1, src2, step2, dst, step, sz); +} + +static void sub8u( const uchar* src1, size_t step1, + const uchar* src2, size_t step2, + uchar* dst, size_t step, Size sz, void* ) +{ + IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); + ippiSub_8u_C1RSfs(src2, (int)step2, src1, (int)step1, dst, (int)step, (IppiSize&)sz, 0), + (vBinOp8, IF_SIMD(_VSub8u)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void sub8s( const schar* src1, size_t step1, + const schar* src2, size_t step2, + schar* dst, size_t step, Size sz, void* ) +{ + vBinOp8, IF_SIMD(_VSub8s)>(src1, step1, src2, step2, dst, step, sz); +} + +static void sub16u( const ushort* src1, size_t step1, + const ushort* src2, size_t step2, + ushort* dst, size_t step, Size sz, void* ) +{ + IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); + ippiSub_16u_C1RSfs(src2, (int)step2, src1, (int)step1, dst, (int)step, (IppiSize&)sz, 0), + (vBinOp16, IF_SIMD(_VSub16u)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void sub16s( const short* src1, size_t step1, + const short* src2, size_t step2, + short* dst, size_t step, Size sz, void* ) +{ + IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); + ippiSub_16s_C1RSfs(src2, (int)step2, src1, (int)step1, dst, (int)step, (IppiSize&)sz, 0), + (vBinOp16, IF_SIMD(_VSub16s)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void sub32s( const int* src1, size_t step1, + const int* src2, size_t step2, + int* dst, size_t step, Size sz, void* ) +{ + vBinOp32s, IF_SIMD(_VSub32s)>(src1, step1, src2, step2, dst, step, sz); +} + +static void sub32f( const float* src1, size_t step1, + const float* src2, size_t step2, + float* dst, size_t step, Size sz, void* ) +{ + IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); + ippiSub_32f_C1R(src2, (int)step2, src1, (int)step1, dst, (int)step, (IppiSize&)sz), + (vBinOp32f, IF_SIMD(_VSub32f)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void sub64f( const double* src1, size_t step1, + const double* src2, size_t step2, + double* dst, size_t step, Size sz, void* ) +{ + vBinOp64f, IF_SIMD(_VSub64f)>(src1, step1, src2, step2, dst, step, sz); +} + +template<> inline uchar OpMin::operator ()(uchar a, uchar b) const { return CV_MIN_8U(a, b); } +template<> inline uchar OpMax::operator ()(uchar a, uchar b) const { return CV_MAX_8U(a, b); } + +static void max8u( const uchar* src1, size_t step1, + const uchar* src2, size_t step2, + uchar* dst, size_t step, Size sz, void* ) +{ +#if (ARITHM_USE_IPP == 1) + { + uchar* s1 = (uchar*)src1; + uchar* s2 = (uchar*)src2; + uchar* d = dst; + fixSteps(sz, sizeof(dst[0]), step1, step2, step); + for(int i = 0; i < sz.height; i++) + { + ippsMaxEvery_8u(s1, s2, d, sz.width); + s1 += step1; + s2 += step2; + d += step; + } + } +#else + vBinOp8, IF_SIMD(_VMax8u)>(src1, step1, src2, step2, dst, step, sz); +#endif + +// IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); +// ippiMaxEvery_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz), +// (vBinOp8, IF_SIMD(_VMax8u)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void max8s( const schar* src1, size_t step1, + const schar* src2, size_t step2, + schar* dst, size_t step, Size sz, void* ) +{ + vBinOp8, IF_SIMD(_VMax8s)>(src1, step1, src2, step2, dst, step, sz); +} + +static void max16u( const ushort* src1, size_t step1, + const ushort* src2, size_t step2, + ushort* dst, size_t step, Size sz, void* ) +{ +#if (ARITHM_USE_IPP == 1) + { + ushort* s1 = (ushort*)src1; + ushort* s2 = (ushort*)src2; + ushort* d = dst; + fixSteps(sz, sizeof(dst[0]), step1, step2, step); + for(int i = 0; i < sz.height; i++) + { + ippsMaxEvery_16u(s1, s2, d, sz.width); + s1 = (ushort*)((uchar*)s1 + step1); + s2 = (ushort*)((uchar*)s2 + step2); + d = (ushort*)((uchar*)d + step); + } + } +#else + vBinOp16, IF_SIMD(_VMax16u)>(src1, step1, src2, step2, dst, step, sz); +#endif + +// IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); +// ippiMaxEvery_16u_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz), +// (vBinOp16, IF_SIMD(_VMax16u)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void max16s( const short* src1, size_t step1, + const short* src2, size_t step2, + short* dst, size_t step, Size sz, void* ) +{ + vBinOp16, IF_SIMD(_VMax16s)>(src1, step1, src2, step2, dst, step, sz); +} + +static void max32s( const int* src1, size_t step1, + const int* src2, size_t step2, + int* dst, size_t step, Size sz, void* ) +{ + vBinOp32s, IF_SIMD(_VMax32s)>(src1, step1, src2, step2, dst, step, sz); +} + +static void max32f( const float* src1, size_t step1, + const float* src2, size_t step2, + float* dst, size_t step, Size sz, void* ) +{ +#if (ARITHM_USE_IPP == 1) + { + float* s1 = (float*)src1; + float* s2 = (float*)src2; + float* d = dst; + fixSteps(sz, sizeof(dst[0]), step1, step2, step); + for(int i = 0; i < sz.height; i++) + { + ippsMaxEvery_32f(s1, s2, d, sz.width); + s1 = (float*)((uchar*)s1 + step1); + s2 = (float*)((uchar*)s2 + step2); + d = (float*)((uchar*)d + step); + } + } +#else + vBinOp32f, IF_SIMD(_VMax32f)>(src1, step1, src2, step2, dst, step, sz); +#endif +// IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); +// ippiMaxEvery_32f_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz), +// (vBinOp32f, IF_SIMD(_VMax32f)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void max64f( const double* src1, size_t step1, + const double* src2, size_t step2, + double* dst, size_t step, Size sz, void* ) +{ + vBinOp64f, IF_SIMD(_VMax64f)>(src1, step1, src2, step2, dst, step, sz); +} + +static void min8u( const uchar* src1, size_t step1, + const uchar* src2, size_t step2, + uchar* dst, size_t step, Size sz, void* ) +{ +#if (ARITHM_USE_IPP == 1) + { + uchar* s1 = (uchar*)src1; + uchar* s2 = (uchar*)src2; + uchar* d = dst; + fixSteps(sz, sizeof(dst[0]), step1, step2, step); + for(int i = 0; i < sz.height; i++) + { + ippsMinEvery_8u(s1, s2, d, sz.width); + s1 += step1; + s2 += step2; + d += step; + } + } +#else + vBinOp8, IF_SIMD(_VMin8u)>(src1, step1, src2, step2, dst, step, sz); +#endif + +// IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); +// ippiMinEvery_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz), +// (vBinOp8, IF_SIMD(_VMin8u)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void min8s( const schar* src1, size_t step1, + const schar* src2, size_t step2, + schar* dst, size_t step, Size sz, void* ) +{ + vBinOp8, IF_SIMD(_VMin8s)>(src1, step1, src2, step2, dst, step, sz); +} + +static void min16u( const ushort* src1, size_t step1, + const ushort* src2, size_t step2, + ushort* dst, size_t step, Size sz, void* ) +{ +#if (ARITHM_USE_IPP == 1) + { + ushort* s1 = (ushort*)src1; + ushort* s2 = (ushort*)src2; + ushort* d = dst; + fixSteps(sz, sizeof(dst[0]), step1, step2, step); + for(int i = 0; i < sz.height; i++) + { + ippsMinEvery_16u(s1, s2, d, sz.width); + s1 = (ushort*)((uchar*)s1 + step1); + s2 = (ushort*)((uchar*)s2 + step2); + d = (ushort*)((uchar*)d + step); + } + } +#else + vBinOp16, IF_SIMD(_VMin16u)>(src1, step1, src2, step2, dst, step, sz); +#endif + +// IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); +// ippiMinEvery_16u_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz), +// (vBinOp16, IF_SIMD(_VMin16u)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void min16s( const short* src1, size_t step1, + const short* src2, size_t step2, + short* dst, size_t step, Size sz, void* ) +{ + vBinOp16, IF_SIMD(_VMin16s)>(src1, step1, src2, step2, dst, step, sz); +} + +static void min32s( const int* src1, size_t step1, + const int* src2, size_t step2, + int* dst, size_t step, Size sz, void* ) +{ + vBinOp32s, IF_SIMD(_VMin32s)>(src1, step1, src2, step2, dst, step, sz); +} + +static void min32f( const float* src1, size_t step1, + const float* src2, size_t step2, + float* dst, size_t step, Size sz, void* ) +{ +#if (ARITHM_USE_IPP == 1) + { + float* s1 = (float*)src1; + float* s2 = (float*)src2; + float* d = dst; + fixSteps(sz, sizeof(dst[0]), step1, step2, step); + for(int i = 0; i < sz.height; i++) + { + ippsMinEvery_32f(s1, s2, d, sz.width); + s1 = (float*)((uchar*)s1 + step1); + s2 = (float*)((uchar*)s2 + step2); + d = (float*)((uchar*)d + step); + } + } +#else + vBinOp32f, IF_SIMD(_VMin32f)>(src1, step1, src2, step2, dst, step, sz); +#endif +// IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); +// ippiMinEvery_32f_C1R(src1, (int)step1, src2, (int)step2, dst, (IppiSize&)sz), +// (vBinOp32f, IF_SIMD(_VMin32f)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void min64f( const double* src1, size_t step1, + const double* src2, size_t step2, + double* dst, size_t step, Size sz, void* ) +{ + vBinOp64f, IF_SIMD(_VMin64f)>(src1, step1, src2, step2, dst, step, sz); +} + +static void absdiff8u( const uchar* src1, size_t step1, + const uchar* src2, size_t step2, + uchar* dst, size_t step, Size sz, void* ) +{ + IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); + ippiAbsDiff_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz), + (vBinOp8, IF_SIMD(_VAbsDiff8u)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void absdiff8s( const schar* src1, size_t step1, + const schar* src2, size_t step2, + schar* dst, size_t step, Size sz, void* ) +{ + vBinOp8, IF_SIMD(_VAbsDiff8s)>(src1, step1, src2, step2, dst, step, sz); +} + +static void absdiff16u( const ushort* src1, size_t step1, + const ushort* src2, size_t step2, + ushort* dst, size_t step, Size sz, void* ) +{ + IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); + ippiAbsDiff_16u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz), + (vBinOp16, IF_SIMD(_VAbsDiff16u)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void absdiff16s( const short* src1, size_t step1, + const short* src2, size_t step2, + short* dst, size_t step, Size sz, void* ) +{ + vBinOp16, IF_SIMD(_VAbsDiff16s)>(src1, step1, src2, step2, dst, step, sz); +} + +static void absdiff32s( const int* src1, size_t step1, + const int* src2, size_t step2, + int* dst, size_t step, Size sz, void* ) +{ + vBinOp32s, IF_SIMD(_VAbsDiff32s)>(src1, step1, src2, step2, dst, step, sz); +} + +static void absdiff32f( const float* src1, size_t step1, + const float* src2, size_t step2, + float* dst, size_t step, Size sz, void* ) +{ + IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); + ippiAbsDiff_32f_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz), + (vBinOp32f, IF_SIMD(_VAbsDiff32f)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void absdiff64f( const double* src1, size_t step1, + const double* src2, size_t step2, + double* dst, size_t step, Size sz, void* ) +{ + vBinOp64f, IF_SIMD(_VAbsDiff64f)>(src1, step1, src2, step2, dst, step, sz); +} + + +static void and8u( const uchar* src1, size_t step1, + const uchar* src2, size_t step2, + uchar* dst, size_t step, Size sz, void* ) +{ + IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); + ippiAnd_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz), + (vBinOp8, IF_SIMD(_VAnd8u)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void or8u( const uchar* src1, size_t step1, + const uchar* src2, size_t step2, + uchar* dst, size_t step, Size sz, void* ) +{ + IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); + ippiOr_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz), + (vBinOp8, IF_SIMD(_VOr8u)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void xor8u( const uchar* src1, size_t step1, + const uchar* src2, size_t step2, + uchar* dst, size_t step, Size sz, void* ) +{ + IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); + ippiXor_8u_C1R(src1, (int)step1, src2, (int)step2, dst, (int)step, (IppiSize&)sz), + (vBinOp8, IF_SIMD(_VXor8u)>(src1, step1, src2, step2, dst, step, sz))); +} + +static void not8u( const uchar* src1, size_t step1, + const uchar* src2, size_t step2, + uchar* dst, size_t step, Size sz, void* ) +{ + IF_IPP(fixSteps(sz, sizeof(dst[0]), step1, step2, step); + ippiNot_8u_C1R(src1, (int)step1, dst, (int)step, (IppiSize&)sz), + (vBinOp8, IF_SIMD(_VNot8u)>(src1, step1, src2, step2, dst, step, sz))); +} + +/****************************************************************************************\ +* logical operations * +\****************************************************************************************/ + +void convertAndUnrollScalar( const Mat& sc, int buftype, uchar* scbuf, size_t blocksize ) +{ + int scn = (int)sc.total(), cn = CV_MAT_CN(buftype); + size_t esz = CV_ELEM_SIZE(buftype); + getConvertFunc(sc.depth(), buftype)(sc.data, 0, 0, 0, scbuf, 0, Size(std::min(cn, scn), 1), 0); + // unroll the scalar + if( scn < cn ) + { + CV_Assert( scn == 1 ); + size_t esz1 = CV_ELEM_SIZE1(buftype); + for( size_t i = esz1; i < esz; i++ ) + scbuf[i] = scbuf[i - esz1]; + } + for( size_t i = esz; i < blocksize*esz; i++ ) + scbuf[i] = scbuf[i - esz]; +} + +void binary_op(InputArray _src1, InputArray _src2, OutputArray _dst, + InputArray _mask, const BinaryFunc* tab, bool bitwise) +{ + int kind1 = _src1.kind(), kind2 = _src2.kind(); + Mat src1 = _src1.getMat(), src2 = _src2.getMat(); + bool haveMask = !_mask.empty(), haveScalar = false; + BinaryFunc func; + int c; + + if( src1.dims <= 2 && src2.dims <= 2 && kind1 == kind2 && + src1.size() == src2.size() && src1.type() == src2.type() && !haveMask ) + { + _dst.create(src1.size(), src1.type()); + Mat dst = _dst.getMat(); + if( bitwise ) + { + func = *tab; + c = (int)src1.elemSize(); + } + else + { + func = tab[src1.depth()]; + c = src1.channels(); + } + + Size sz = getContinuousSize(src1, src2, dst); + size_t len = sz.width*(size_t)c; + if( len == (size_t)(int)len ) + { + sz.width = (int)len; + func(src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, 0); + return; + } + } + + if( (kind1 == _InputArray::MATX) + (kind2 == _InputArray::MATX) == 1 || + src1.size != src2.size || src1.type() != src2.type() ) + { + if( checkScalar(src1, src2.type(), kind1, kind2) ) + // src1 is a scalar; swap it with src2 + swap(src1, src2); + else if( !checkScalar(src2, src1.type(), kind2, kind1) ) + CV_Error( CV_StsUnmatchedSizes, + "The operation is neither 'array op array' (where arrays have the same size and type), " + "nor 'array op scalar', nor 'scalar op array'" ); + haveScalar = true; + } + + size_t esz = src1.elemSize(); + size_t blocksize0 = (BLOCK_SIZE + esz-1)/esz; + int cn = src1.channels(); + BinaryFunc copymask = 0; + Mat mask; + + if( haveMask ) + { + mask = _mask.getMat(); + CV_Assert( (mask.type() == CV_8UC1 || mask.type() == CV_8SC1) ); + CV_Assert( mask.size == src1.size ); + copymask = getCopyMaskFunc(esz); + } + + AutoBuffer _buf; + uchar *scbuf = 0, *maskbuf = 0; + + _dst.create(src1.dims, src1.size, src1.type()); + Mat dst = _dst.getMat(); + + if( bitwise ) + { + func = *tab; + c = (int)esz; + } + else + { + func = tab[src1.depth()]; + c = cn; + } + + if( !haveScalar ) + { + const Mat* arrays[] = { &src1, &src2, &dst, &mask, 0 }; + uchar* ptrs[4]; + + NAryMatIterator it(arrays, ptrs); + size_t total = it.size, blocksize = total; + + if( blocksize*c > INT_MAX ) + blocksize = INT_MAX/c; + + if( haveMask ) + { + blocksize = std::min(blocksize, blocksize0); + _buf.allocate(blocksize*esz); + maskbuf = _buf; + } + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( size_t j = 0; j < total; j += blocksize ) + { + int bsz = (int)MIN(total - j, blocksize); + + func( ptrs[0], 0, ptrs[1], 0, haveMask ? maskbuf : ptrs[2], 0, Size(bsz*c, 1), 0 ); + if( haveMask ) + { + copymask( maskbuf, 0, ptrs[3], 0, ptrs[2], 0, Size(bsz, 1), &esz ); + ptrs[3] += bsz; + } + + bsz *= (int)esz; + ptrs[0] += bsz; ptrs[1] += bsz; ptrs[2] += bsz; + } + } + } + else + { + const Mat* arrays[] = { &src1, &dst, &mask, 0 }; + uchar* ptrs[3]; + + NAryMatIterator it(arrays, ptrs); + size_t total = it.size, blocksize = std::min(total, blocksize0); + + _buf.allocate(blocksize*(haveMask ? 2 : 1)*esz + 32); + scbuf = _buf; + maskbuf = alignPtr(scbuf + blocksize*esz, 16); + + convertAndUnrollScalar( src2, src1.type(), scbuf, blocksize); + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( size_t j = 0; j < total; j += blocksize ) + { + int bsz = (int)MIN(total - j, blocksize); + + func( ptrs[0], 0, scbuf, 0, haveMask ? maskbuf : ptrs[1], 0, Size(bsz*c, 1), 0 ); + if( haveMask ) + { + copymask( maskbuf, 0, ptrs[2], 0, ptrs[1], 0, Size(bsz, 1), &esz ); + ptrs[2] += bsz; + } + + bsz *= (int)esz; + ptrs[0] += bsz; ptrs[1] += bsz; + } + } + } +} + +static BinaryFunc maxTab[] = +{ + (BinaryFunc)max8u, (BinaryFunc)max8s, (BinaryFunc)max16u, (BinaryFunc)max16s, + (BinaryFunc)max32s, (BinaryFunc)max32f, (BinaryFunc)max64f, 0 +}; + +static BinaryFunc minTab[] = +{ + (BinaryFunc)min8u, (BinaryFunc)min8s, (BinaryFunc)min16u, (BinaryFunc)min16s, + (BinaryFunc)min32s, (BinaryFunc)min32f, (BinaryFunc)min64f, 0 +}; + +} + +void cv::bitwise_and(InputArray a, InputArray b, OutputArray c, InputArray mask) +{ + BinaryFunc f = +#ifdef HAVE_TEGRA_OPTIMIZATION + (BinaryFunc)tegra:: +#endif + and8u; + binary_op(a, b, c, mask, &f, true); +} + +void cv::bitwise_or(InputArray a, InputArray b, OutputArray c, InputArray mask) +{ + BinaryFunc f = +#ifdef HAVE_TEGRA_OPTIMIZATION + (BinaryFunc)tegra:: +#endif + or8u; + binary_op(a, b, c, mask, &f, true); +} + +void cv::bitwise_xor(InputArray a, InputArray b, OutputArray c, InputArray mask) +{ + BinaryFunc f = +#ifdef HAVE_TEGRA_OPTIMIZATION + (BinaryFunc)tegra:: +#endif + xor8u; + binary_op(a, b, c, mask, &f, true); +} + +void cv::bitwise_not(InputArray a, OutputArray c, InputArray mask) +{ + BinaryFunc f = +#ifdef HAVE_TEGRA_OPTIMIZATION + (BinaryFunc)tegra:: +#endif + not8u; + binary_op(a, a, c, mask, &f, true); +} + +void cv::max( InputArray src1, InputArray src2, OutputArray dst ) +{ + binary_op(src1, src2, dst, noArray(), maxTab, false ); +} + +void cv::min( InputArray src1, InputArray src2, OutputArray dst ) +{ + binary_op(src1, src2, dst, noArray(), minTab, false ); +} + +void cv::max(const Mat& src1, const Mat& src2, Mat& dst) +{ + OutputArray _dst(dst); + binary_op(src1, src2, _dst, noArray(), maxTab, false ); +} + +void cv::min(const Mat& src1, const Mat& src2, Mat& dst) +{ + OutputArray _dst(dst); + binary_op(src1, src2, _dst, noArray(), minTab, false ); +} + +void cv::max(const Mat& src1, double src2, Mat& dst) +{ + OutputArray _dst(dst); + binary_op(src1, src2, _dst, noArray(), maxTab, false ); +} + +void cv::min(const Mat& src1, double src2, Mat& dst) +{ + OutputArray _dst(dst); + binary_op(src1, src2, _dst, noArray(), minTab, false ); +} + +/****************************************************************************************\ +* add/subtract * +\****************************************************************************************/ + +namespace cv +{ + +void arithm_op(InputArray _src1, InputArray _src2, OutputArray _dst, + InputArray _mask, int dtype, BinaryFunc* tab, bool muldiv=false, void* usrdata=0) +{ + int kind1 = _src1.kind(), kind2 = _src2.kind(); + Mat src1 = _src1.getMat(), src2 = _src2.getMat(); + bool haveMask = !_mask.empty(); + + if( kind1 == kind2 && src1.dims <= 2 && src2.dims <= 2 && + src1.size() == src2.size() && src1.type() == src2.type() && + !haveMask && ((!_dst.fixedType() && (dtype < 0 || CV_MAT_DEPTH(dtype) == src1.depth())) || + (_dst.fixedType() && _dst.type() == _src1.type())) ) + { + _dst.create(src1.size(), src1.type()); + Mat dst = _dst.getMat(); + Size sz = getContinuousSize(src1, src2, dst, src1.channels()); + tab[src1.depth()](src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, usrdata); + return; + } + + bool haveScalar = false, swapped12 = false; + + if( (kind1 == _InputArray::MATX) + (kind2 == _InputArray::MATX) == 1 || + src1.size != src2.size || src1.channels() != src2.channels() ) + { + if( checkScalar(src1, src2.type(), kind1, kind2) ) + { + // src1 is a scalar; swap it with src2 + swap(src1, src2); + swapped12 = true; + } + else if( !checkScalar(src2, src1.type(), kind2, kind1) ) + CV_Error( CV_StsUnmatchedSizes, + "The operation is neither 'array op array' (where arrays have the same size and the same number of channels), " + "nor 'array op scalar', nor 'scalar op array'" ); + haveScalar = true; + } + + int cn = src1.channels(), depth1 = src1.depth(), depth2 = src2.depth(), wtype; + BinaryFunc cvtsrc1 = 0, cvtsrc2 = 0, cvtdst = 0; + + if( dtype < 0 ) + { + if( _dst.fixedType() ) + dtype = _dst.type(); + else + { + if( !haveScalar && src1.type() != src2.type() ) + CV_Error(CV_StsBadArg, + "When the input arrays in add/subtract/multiply/divide functions have different types, " + "the output array type must be explicitly specified"); + dtype = src1.type(); + } + } + dtype = CV_MAT_DEPTH(dtype); + + if( depth1 == depth2 && dtype == depth1 ) + wtype = dtype; + else if( !muldiv ) + { + wtype = depth1 <= CV_8S && depth2 <= CV_8S ? CV_16S : + depth1 <= CV_32S && depth2 <= CV_32S ? CV_32S : std::max(depth1, depth2); + wtype = std::max(wtype, dtype); + + // when the result of addition should be converted to an integer type, + // and just one of the input arrays is floating-point, it makes sense to convert that input to integer type before the operation, + // instead of converting the other input to floating-point and then converting the operation result back to integers. + if( dtype < CV_32F && (depth1 < CV_32F || depth2 < CV_32F) ) + wtype = CV_32S; + } + else + { + wtype = std::max(depth1, std::max(depth2, CV_32F)); + wtype = std::max(wtype, dtype); + } + + cvtsrc1 = depth1 == wtype ? 0 : getConvertFunc(depth1, wtype); + cvtsrc2 = depth2 == depth1 ? cvtsrc1 : depth2 == wtype ? 0 : getConvertFunc(depth2, wtype); + cvtdst = dtype == wtype ? 0 : getConvertFunc(wtype, dtype); + + dtype = CV_MAKETYPE(dtype, cn); + wtype = CV_MAKETYPE(wtype, cn); + + size_t esz1 = src1.elemSize(), esz2 = src2.elemSize(); + size_t dsz = CV_ELEM_SIZE(dtype), wsz = CV_ELEM_SIZE(wtype); + size_t blocksize0 = (size_t)(BLOCK_SIZE + wsz-1)/wsz; + BinaryFunc copymask = 0; + Mat mask; + + if( haveMask ) + { + mask = _mask.getMat(); + CV_Assert( (mask.type() == CV_8UC1 || mask.type() == CV_8SC1) ); + CV_Assert( mask.size == src1.size ); + copymask = getCopyMaskFunc(dsz); + } + + AutoBuffer _buf; + uchar *buf, *maskbuf = 0, *buf1 = 0, *buf2 = 0, *wbuf = 0; + size_t bufesz = (cvtsrc1 ? wsz : 0) + (cvtsrc2 || haveScalar ? wsz : 0) + (cvtdst ? wsz : 0) + (haveMask ? dsz : 0); + + _dst.create(src1.dims, src1.size, dtype); + Mat dst = _dst.getMat(); + BinaryFunc func = tab[CV_MAT_DEPTH(wtype)]; + + if( !haveScalar ) + { + const Mat* arrays[] = { &src1, &src2, &dst, &mask, 0 }; + uchar* ptrs[4]; + + NAryMatIterator it(arrays, ptrs); + size_t total = it.size, blocksize = total; + + if( haveMask || cvtsrc1 || cvtsrc2 || cvtdst ) + blocksize = std::min(blocksize, blocksize0); + + _buf.allocate(bufesz*blocksize + 64); + buf = _buf; + if( cvtsrc1 ) + buf1 = buf, buf = alignPtr(buf + blocksize*wsz, 16); + if( cvtsrc2 ) + buf2 = buf, buf = alignPtr(buf + blocksize*wsz, 16); + wbuf = maskbuf = buf; + if( cvtdst ) + buf = alignPtr(buf + blocksize*wsz, 16); + if( haveMask ) + maskbuf = buf; + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( size_t j = 0; j < total; j += blocksize ) + { + int bsz = (int)MIN(total - j, blocksize); + Size bszn(bsz*cn, 1); + const uchar *sptr1 = ptrs[0], *sptr2 = ptrs[1]; + uchar* dptr = ptrs[2]; + if( cvtsrc1 ) + { + cvtsrc1( sptr1, 0, 0, 0, buf1, 0, bszn, 0 ); + sptr1 = buf1; + } + if( ptrs[0] == ptrs[1] ) + sptr2 = sptr1; + else if( cvtsrc2 ) + { + cvtsrc2( sptr2, 0, 0, 0, buf2, 0, bszn, 0 ); + sptr2 = buf2; + } + + if( !haveMask && !cvtdst ) + func( sptr1, 0, sptr2, 0, dptr, 0, bszn, usrdata ); + else + { + func( sptr1, 0, sptr2, 0, wbuf, 0, bszn, usrdata ); + if( !haveMask ) + cvtdst( wbuf, 0, 0, 0, dptr, 0, bszn, 0 ); + else if( !cvtdst ) + { + copymask( wbuf, 0, ptrs[3], 0, dptr, 0, Size(bsz, 1), &dsz ); + ptrs[3] += bsz; + } + else + { + cvtdst( wbuf, 0, 0, 0, maskbuf, 0, bszn, 0 ); + copymask( maskbuf, 0, ptrs[3], 0, dptr, 0, Size(bsz, 1), &dsz ); + ptrs[3] += bsz; + } + } + ptrs[0] += bsz*esz1; ptrs[1] += bsz*esz2; ptrs[2] += bsz*dsz; + } + } + } + else + { + const Mat* arrays[] = { &src1, &dst, &mask, 0 }; + uchar* ptrs[3]; + + NAryMatIterator it(arrays, ptrs); + size_t total = it.size, blocksize = std::min(total, blocksize0); + + _buf.allocate(bufesz*blocksize + 64); + buf = _buf; + if( cvtsrc1 ) + buf1 = buf, buf = alignPtr(buf + blocksize*wsz, 16); + buf2 = buf; buf = alignPtr(buf + blocksize*wsz, 16); + wbuf = maskbuf = buf; + if( cvtdst ) + buf = alignPtr(buf + blocksize*wsz, 16); + if( haveMask ) + maskbuf = buf; + + convertAndUnrollScalar( src2, wtype, buf2, blocksize); + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( size_t j = 0; j < total; j += blocksize ) + { + int bsz = (int)MIN(total - j, blocksize); + Size bszn(bsz*cn, 1); + const uchar *sptr1 = ptrs[0]; + const uchar* sptr2 = buf2; + uchar* dptr = ptrs[1]; + + if( cvtsrc1 ) + { + cvtsrc1( sptr1, 0, 0, 0, buf1, 0, bszn, 0 ); + sptr1 = buf1; + } + + if( swapped12 ) + std::swap(sptr1, sptr2); + + if( !haveMask && !cvtdst ) + func( sptr1, 0, sptr2, 0, dptr, 0, bszn, usrdata ); + else + { + func( sptr1, 0, sptr2, 0, wbuf, 0, bszn, usrdata ); + if( !haveMask ) + cvtdst( wbuf, 0, 0, 0, dptr, 0, bszn, 0 ); + else if( !cvtdst ) + { + copymask( wbuf, 0, ptrs[2], 0, dptr, 0, Size(bsz, 1), &dsz ); + ptrs[2] += bsz; + } + else + { + cvtdst( wbuf, 0, 0, 0, maskbuf, 0, bszn, 0 ); + copymask( maskbuf, 0, ptrs[2], 0, dptr, 0, Size(bsz, 1), &dsz ); + ptrs[2] += bsz; + } + } + ptrs[0] += bsz*esz1; ptrs[1] += bsz*dsz; + } + } + } +} + +#ifdef HAVE_TEGRA_OPTIMIZATION +static BinaryFunc addTab[] = +{ + (BinaryFunc)tegra::add8u, (BinaryFunc)add8s, (BinaryFunc)add16u, (BinaryFunc)add16s, + (BinaryFunc)add32s, (BinaryFunc)add32f, (BinaryFunc)add64f, 0 +}; + +static BinaryFunc subTab[] = +{ + (BinaryFunc)tegra::sub8u, (BinaryFunc)sub8s, (BinaryFunc)sub16u, (BinaryFunc)sub16s, + (BinaryFunc)sub32s, (BinaryFunc)sub32f, (BinaryFunc)sub64f, 0 +}; + +#else +static BinaryFunc addTab[] = +{ + (BinaryFunc)add8u, (BinaryFunc)add8s, (BinaryFunc)add16u, (BinaryFunc)add16s, + (BinaryFunc)add32s, (BinaryFunc)add32f, (BinaryFunc)add64f, 0 +}; + +static BinaryFunc subTab[] = +{ + (BinaryFunc)sub8u, (BinaryFunc)sub8s, (BinaryFunc)sub16u, (BinaryFunc)sub16s, + (BinaryFunc)sub32s, (BinaryFunc)sub32f, (BinaryFunc)sub64f, 0 +}; +#endif + +static BinaryFunc absdiffTab[] = +{ + (BinaryFunc)absdiff8u, (BinaryFunc)absdiff8s, (BinaryFunc)absdiff16u, + (BinaryFunc)absdiff16s, (BinaryFunc)absdiff32s, (BinaryFunc)absdiff32f, + (BinaryFunc)absdiff64f, 0 +}; + +} + +void cv::add( InputArray src1, InputArray src2, OutputArray dst, + InputArray mask, int dtype ) +{ + arithm_op(src1, src2, dst, mask, dtype, addTab ); +} + +void cv::subtract( InputArray src1, InputArray src2, OutputArray dst, + InputArray mask, int dtype ) +{ + arithm_op(src1, src2, dst, mask, dtype, subTab ); +} + +void cv::absdiff( InputArray src1, InputArray src2, OutputArray dst ) +{ + arithm_op(src1, src2, dst, noArray(), -1, absdiffTab); +} + +/****************************************************************************************\ +* multiply/divide * +\****************************************************************************************/ + +namespace cv +{ + +template static void +mul_( const T* src1, size_t step1, const T* src2, size_t step2, + T* dst, size_t step, Size size, WT scale ) +{ + step1 /= sizeof(src1[0]); + step2 /= sizeof(src2[0]); + step /= sizeof(dst[0]); + + if( scale == (WT)1. ) + { + for( ; size.height--; src1 += step1, src2 += step2, dst += step ) + { + int i; + for( i = 0; i <= size.width - 4; i += 4 ) + { + T t0; + T t1; + t0 = saturate_cast(src1[i ] * src2[i ]); + t1 = saturate_cast(src1[i+1] * src2[i+1]); + dst[i ] = t0; + dst[i+1] = t1; + + t0 = saturate_cast(src1[i+2] * src2[i+2]); + t1 = saturate_cast(src1[i+3] * src2[i+3]); + dst[i+2] = t0; + dst[i+3] = t1; + } + + for( ; i < size.width; i++ ) + dst[i] = saturate_cast(src1[i] * src2[i]); + } + } + else + { + for( ; size.height--; src1 += step1, src2 += step2, dst += step ) + { + int i; + for( i = 0; i <= size.width - 4; i += 4 ) + { + T t0 = saturate_cast(scale*(WT)src1[i]*src2[i]); + T t1 = saturate_cast(scale*(WT)src1[i+1]*src2[i+1]); + dst[i] = t0; dst[i+1] = t1; + + t0 = saturate_cast(scale*(WT)src1[i+2]*src2[i+2]); + t1 = saturate_cast(scale*(WT)src1[i+3]*src2[i+3]); + dst[i+2] = t0; dst[i+3] = t1; + } + + for( ; i < size.width; i++ ) + dst[i] = saturate_cast(scale*(WT)src1[i]*src2[i]); + } + } +} + +template static void +div_( const T* src1, size_t step1, const T* src2, size_t step2, + T* dst, size_t step, Size size, double scale ) +{ + step1 /= sizeof(src1[0]); + step2 /= sizeof(src2[0]); + step /= sizeof(dst[0]); + + for( ; size.height--; src1 += step1, src2 += step2, dst += step ) + { + int i = 0; + for( ; i <= size.width - 4; i += 4 ) + { + if( src2[i] != 0 && src2[i+1] != 0 && src2[i+2] != 0 && src2[i+3] != 0 ) + { + double a = (double)src2[i] * src2[i+1]; + double b = (double)src2[i+2] * src2[i+3]; + double d = scale/(a * b); + b *= d; + a *= d; + + T z0 = saturate_cast(src2[i+1] * ((double)src1[i] * b)); + T z1 = saturate_cast(src2[i] * ((double)src1[i+1] * b)); + T z2 = saturate_cast(src2[i+3] * ((double)src1[i+2] * a)); + T z3 = saturate_cast(src2[i+2] * ((double)src1[i+3] * a)); + + dst[i] = z0; dst[i+1] = z1; + dst[i+2] = z2; dst[i+3] = z3; + } + else + { + T z0 = src2[i] != 0 ? saturate_cast(src1[i]*scale/src2[i]) : 0; + T z1 = src2[i+1] != 0 ? saturate_cast(src1[i+1]*scale/src2[i+1]) : 0; + T z2 = src2[i+2] != 0 ? saturate_cast(src1[i+2]*scale/src2[i+2]) : 0; + T z3 = src2[i+3] != 0 ? saturate_cast(src1[i+3]*scale/src2[i+3]) : 0; + + dst[i] = z0; dst[i+1] = z1; + dst[i+2] = z2; dst[i+3] = z3; + } + } + + for( ; i < size.width; i++ ) + dst[i] = src2[i] != 0 ? saturate_cast(src1[i]*scale/src2[i]) : 0; + } +} + +template static void +recip_( const T*, size_t, const T* src2, size_t step2, + T* dst, size_t step, Size size, double scale ) +{ + step2 /= sizeof(src2[0]); + step /= sizeof(dst[0]); + + for( ; size.height--; src2 += step2, dst += step ) + { + int i = 0; + for( ; i <= size.width - 4; i += 4 ) + { + if( src2[i] != 0 && src2[i+1] != 0 && src2[i+2] != 0 && src2[i+3] != 0 ) + { + double a = (double)src2[i] * src2[i+1]; + double b = (double)src2[i+2] * src2[i+3]; + double d = scale/(a * b); + b *= d; + a *= d; + + T z0 = saturate_cast(src2[i+1] * b); + T z1 = saturate_cast(src2[i] * b); + T z2 = saturate_cast(src2[i+3] * a); + T z3 = saturate_cast(src2[i+2] * a); + + dst[i] = z0; dst[i+1] = z1; + dst[i+2] = z2; dst[i+3] = z3; + } + else + { + T z0 = src2[i] != 0 ? saturate_cast(scale/src2[i]) : 0; + T z1 = src2[i+1] != 0 ? saturate_cast(scale/src2[i+1]) : 0; + T z2 = src2[i+2] != 0 ? saturate_cast(scale/src2[i+2]) : 0; + T z3 = src2[i+3] != 0 ? saturate_cast(scale/src2[i+3]) : 0; + + dst[i] = z0; dst[i+1] = z1; + dst[i+2] = z2; dst[i+3] = z3; + } + } + + for( ; i < size.width; i++ ) + dst[i] = src2[i] != 0 ? saturate_cast(scale/src2[i]) : 0; + } +} + + +static void mul8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, + uchar* dst, size_t step, Size sz, void* scale) +{ + mul_(src1, step1, src2, step2, dst, step, sz, (float)*(const double*)scale); +} + +static void mul8s( const schar* src1, size_t step1, const schar* src2, size_t step2, + schar* dst, size_t step, Size sz, void* scale) +{ + mul_(src1, step1, src2, step2, dst, step, sz, (float)*(const double*)scale); +} + +static void mul16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, + ushort* dst, size_t step, Size sz, void* scale) +{ + mul_(src1, step1, src2, step2, dst, step, sz, (float)*(const double*)scale); +} + +static void mul16s( const short* src1, size_t step1, const short* src2, size_t step2, + short* dst, size_t step, Size sz, void* scale) +{ + mul_(src1, step1, src2, step2, dst, step, sz, (float)*(const double*)scale); +} + +static void mul32s( const int* src1, size_t step1, const int* src2, size_t step2, + int* dst, size_t step, Size sz, void* scale) +{ + mul_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale); +} + +static void mul32f( const float* src1, size_t step1, const float* src2, size_t step2, + float* dst, size_t step, Size sz, void* scale) +{ + mul_(src1, step1, src2, step2, dst, step, sz, (float)*(const double*)scale); +} + +static void mul64f( const double* src1, size_t step1, const double* src2, size_t step2, + double* dst, size_t step, Size sz, void* scale) +{ + mul_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale); +} + +static void div8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, + uchar* dst, size_t step, Size sz, void* scale) +{ + if( src1 ) + div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale); + else + recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale); +} + +static void div8s( const schar* src1, size_t step1, const schar* src2, size_t step2, + schar* dst, size_t step, Size sz, void* scale) +{ + div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale); +} + +static void div16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, + ushort* dst, size_t step, Size sz, void* scale) +{ + div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale); +} + +static void div16s( const short* src1, size_t step1, const short* src2, size_t step2, + short* dst, size_t step, Size sz, void* scale) +{ + div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale); +} + +static void div32s( const int* src1, size_t step1, const int* src2, size_t step2, + int* dst, size_t step, Size sz, void* scale) +{ + div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale); +} + +static void div32f( const float* src1, size_t step1, const float* src2, size_t step2, + float* dst, size_t step, Size sz, void* scale) +{ + div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale); +} + +static void div64f( const double* src1, size_t step1, const double* src2, size_t step2, + double* dst, size_t step, Size sz, void* scale) +{ + div_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale); +} + +static void recip8u( const uchar* src1, size_t step1, const uchar* src2, size_t step2, + uchar* dst, size_t step, Size sz, void* scale) +{ + recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale); +} + +static void recip8s( const schar* src1, size_t step1, const schar* src2, size_t step2, + schar* dst, size_t step, Size sz, void* scale) +{ + recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale); +} + +static void recip16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, + ushort* dst, size_t step, Size sz, void* scale) +{ + recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale); +} + +static void recip16s( const short* src1, size_t step1, const short* src2, size_t step2, + short* dst, size_t step, Size sz, void* scale) +{ + recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale); +} + +static void recip32s( const int* src1, size_t step1, const int* src2, size_t step2, + int* dst, size_t step, Size sz, void* scale) +{ + recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale); +} + +static void recip32f( const float* src1, size_t step1, const float* src2, size_t step2, + float* dst, size_t step, Size sz, void* scale) +{ + recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale); +} + +static void recip64f( const double* src1, size_t step1, const double* src2, size_t step2, + double* dst, size_t step, Size sz, void* scale) +{ + recip_(src1, step1, src2, step2, dst, step, sz, *(const double*)scale); +} + + +static BinaryFunc mulTab[] = +{ + (BinaryFunc)mul8u, (BinaryFunc)mul8s, (BinaryFunc)mul16u, + (BinaryFunc)mul16s, (BinaryFunc)mul32s, (BinaryFunc)mul32f, + (BinaryFunc)mul64f, 0 +}; + +static BinaryFunc divTab[] = +{ + (BinaryFunc)div8u, (BinaryFunc)div8s, (BinaryFunc)div16u, + (BinaryFunc)div16s, (BinaryFunc)div32s, (BinaryFunc)div32f, + (BinaryFunc)div64f, 0 +}; + +static BinaryFunc recipTab[] = +{ + (BinaryFunc)recip8u, (BinaryFunc)recip8s, (BinaryFunc)recip16u, + (BinaryFunc)recip16s, (BinaryFunc)recip32s, (BinaryFunc)recip32f, + (BinaryFunc)recip64f, 0 +}; + + +} + +void cv::multiply(InputArray src1, InputArray src2, + OutputArray dst, double scale, int dtype) +{ + arithm_op(src1, src2, dst, noArray(), dtype, mulTab, true, &scale); +} + +void cv::divide(InputArray src1, InputArray src2, + OutputArray dst, double scale, int dtype) +{ + arithm_op(src1, src2, dst, noArray(), dtype, divTab, true, &scale); +} + +void cv::divide(double scale, InputArray src2, + OutputArray dst, int dtype) +{ + arithm_op(src2, src2, dst, noArray(), dtype, recipTab, true, &scale); +} + +/****************************************************************************************\ +* addWeighted * +\****************************************************************************************/ + +namespace cv +{ + +template static void +addWeighted_( const T* src1, size_t step1, const T* src2, size_t step2, + T* dst, size_t step, Size size, void* _scalars ) +{ + const double* scalars = (const double*)_scalars; + WT alpha = (WT)scalars[0], beta = (WT)scalars[1], gamma = (WT)scalars[2]; + step1 /= sizeof(src1[0]); + step2 /= sizeof(src2[0]); + step /= sizeof(dst[0]); + + for( ; size.height--; src1 += step1, src2 += step2, dst += step ) + { + int x = 0; + for( ; x <= size.width - 4; x += 4 ) + { + T t0 = saturate_cast(src1[x]*alpha + src2[x]*beta + gamma); + T t1 = saturate_cast(src1[x+1]*alpha + src2[x+1]*beta + gamma); + dst[x] = t0; dst[x+1] = t1; + + t0 = saturate_cast(src1[x+2]*alpha + src2[x+2]*beta + gamma); + t1 = saturate_cast(src1[x+3]*alpha + src2[x+3]*beta + gamma); + dst[x+2] = t0; dst[x+3] = t1; + } + + for( ; x < size.width; x++ ) + dst[x] = saturate_cast(src1[x]*alpha + src2[x]*beta + gamma); + } +} + + +static void +addWeighted8u( const uchar* src1, size_t step1, + const uchar* src2, size_t step2, + uchar* dst, size_t step, Size size, + void* _scalars ) +{ + const double* scalars = (const double*)_scalars; + float alpha = (float)scalars[0], beta = (float)scalars[1], gamma = (float)scalars[2]; + + for( ; size.height--; src1 += step1, src2 += step2, dst += step ) + { + int x = 0; + +#if CV_SSE2 + if( USE_SSE2 ) + { + __m128 a4 = _mm_set1_ps(alpha), b4 = _mm_set1_ps(beta), g4 = _mm_set1_ps(gamma); + __m128i z = _mm_setzero_si128(); + + for( ; x <= size.width - 8; x += 8 ) + { + __m128i u = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(src1 + x)), z); + __m128i v = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(src2 + x)), z); + + __m128 u0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(u, z)); + __m128 u1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(u, z)); + __m128 v0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v, z)); + __m128 v1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v, z)); + + u0 = _mm_add_ps(_mm_mul_ps(u0, a4), _mm_mul_ps(v0, b4)); + u1 = _mm_add_ps(_mm_mul_ps(u1, a4), _mm_mul_ps(v1, b4)); + u0 = _mm_add_ps(u0, g4); u1 = _mm_add_ps(u1, g4); + + u = _mm_packs_epi32(_mm_cvtps_epi32(u0), _mm_cvtps_epi32(u1)); + u = _mm_packus_epi16(u, u); + + _mm_storel_epi64((__m128i*)(dst + x), u); + } + } +#endif + for( ; x <= size.width - 4; x += 4 ) + { + float t0, t1; + t0 = CV_8TO32F(src1[x])*alpha + CV_8TO32F(src2[x])*beta + gamma; + t1 = CV_8TO32F(src1[x+1])*alpha + CV_8TO32F(src2[x+1])*beta + gamma; + + dst[x] = saturate_cast(t0); + dst[x+1] = saturate_cast(t1); + + t0 = CV_8TO32F(src1[x+2])*alpha + CV_8TO32F(src2[x+2])*beta + gamma; + t1 = CV_8TO32F(src1[x+3])*alpha + CV_8TO32F(src2[x+3])*beta + gamma; + + dst[x+2] = saturate_cast(t0); + dst[x+3] = saturate_cast(t1); + } + + for( ; x < size.width; x++ ) + { + float t0 = CV_8TO32F(src1[x])*alpha + CV_8TO32F(src2[x])*beta + gamma; + dst[x] = saturate_cast(t0); + } + } +} + +static void addWeighted8s( const schar* src1, size_t step1, const schar* src2, size_t step2, + schar* dst, size_t step, Size sz, void* scalars ) +{ + addWeighted_(src1, step1, src2, step2, dst, step, sz, scalars); +} + +static void addWeighted16u( const ushort* src1, size_t step1, const ushort* src2, size_t step2, + ushort* dst, size_t step, Size sz, void* scalars ) +{ + addWeighted_(src1, step1, src2, step2, dst, step, sz, scalars); +} + +static void addWeighted16s( const short* src1, size_t step1, const short* src2, size_t step2, + short* dst, size_t step, Size sz, void* scalars ) +{ + addWeighted_(src1, step1, src2, step2, dst, step, sz, scalars); +} + +static void addWeighted32s( const int* src1, size_t step1, const int* src2, size_t step2, + int* dst, size_t step, Size sz, void* scalars ) +{ + addWeighted_(src1, step1, src2, step2, dst, step, sz, scalars); +} + +static void addWeighted32f( const float* src1, size_t step1, const float* src2, size_t step2, + float* dst, size_t step, Size sz, void* scalars ) +{ + addWeighted_(src1, step1, src2, step2, dst, step, sz, scalars); +} + +static void addWeighted64f( const double* src1, size_t step1, const double* src2, size_t step2, + double* dst, size_t step, Size sz, void* scalars ) +{ + addWeighted_(src1, step1, src2, step2, dst, step, sz, scalars); +} + +static BinaryFunc addWeightedTab[] = +{ + (BinaryFunc)addWeighted8u, (BinaryFunc)addWeighted8s, (BinaryFunc)addWeighted16u, + (BinaryFunc)addWeighted16s, (BinaryFunc)addWeighted32s, (BinaryFunc)addWeighted32f, + (BinaryFunc)addWeighted64f, 0 +}; + +} + +void cv::addWeighted( InputArray src1, double alpha, InputArray src2, + double beta, double gamma, OutputArray dst, int dtype ) +{ + double scalars[] = {alpha, beta, gamma}; + arithm_op(src1, src2, dst, noArray(), dtype, addWeightedTab, true, scalars); +} + + +/****************************************************************************************\ +* compare * +\****************************************************************************************/ + +namespace cv +{ + +template static void +cmp_(const T* src1, size_t step1, const T* src2, size_t step2, + uchar* dst, size_t step, Size size, int code) +{ + step1 /= sizeof(src1[0]); + step2 /= sizeof(src2[0]); + if( code == CMP_GE || code == CMP_LT ) + { + std::swap(src1, src2); + std::swap(step1, step2); + code = code == CMP_GE ? CMP_LE : CMP_GT; + } + + if( code == CMP_GT || code == CMP_LE ) + { + int m = code == CMP_GT ? 0 : 255; + for( ; size.height--; src1 += step1, src2 += step2, dst += step ) + { + int x = 0; + for( ; x <= size.width - 4; x += 4 ) + { + int t0, t1; + t0 = -(src1[x] > src2[x]) ^ m; + t1 = -(src1[x+1] > src2[x+1]) ^ m; + dst[x] = (uchar)t0; dst[x+1] = (uchar)t1; + t0 = -(src1[x+2] > src2[x+2]) ^ m; + t1 = -(src1[x+3] > src2[x+3]) ^ m; + dst[x+2] = (uchar)t0; dst[x+3] = (uchar)t1; + } + + for( ; x < size.width; x++ ) + dst[x] = (uchar)(-(src1[x] > src2[x]) ^ m); + } + } + else if( code == CMP_EQ || code == CMP_NE ) + { + int m = code == CMP_EQ ? 0 : 255; + for( ; size.height--; src1 += step1, src2 += step2, dst += step ) + { + int x = 0; + for( ; x <= size.width - 4; x += 4 ) + { + int t0, t1; + t0 = -(src1[x] == src2[x]) ^ m; + t1 = -(src1[x+1] == src2[x+1]) ^ m; + dst[x] = (uchar)t0; dst[x+1] = (uchar)t1; + t0 = -(src1[x+2] == src2[x+2]) ^ m; + t1 = -(src1[x+3] == src2[x+3]) ^ m; + dst[x+2] = (uchar)t0; dst[x+3] = (uchar)t1; + } + + for( ; x < size.width; x++ ) + dst[x] = (uchar)(-(src1[x] == src2[x]) ^ m); + } + } +} + + +static void cmp8u(const uchar* src1, size_t step1, const uchar* src2, size_t step2, + uchar* dst, size_t step, Size size, void* _cmpop) +{ + cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop); +} + +static void cmp8s(const schar* src1, size_t step1, const schar* src2, size_t step2, + uchar* dst, size_t step, Size size, void* _cmpop) +{ + cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop); +} + +static void cmp16u(const ushort* src1, size_t step1, const ushort* src2, size_t step2, + uchar* dst, size_t step, Size size, void* _cmpop) +{ + cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop); +} + +static void cmp16s(const short* src1, size_t step1, const short* src2, size_t step2, + uchar* dst, size_t step, Size size, void* _cmpop) +{ + cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop); +} + +static void cmp32s(const int* src1, size_t step1, const int* src2, size_t step2, + uchar* dst, size_t step, Size size, void* _cmpop) +{ + cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop); +} + +static void cmp32f(const float* src1, size_t step1, const float* src2, size_t step2, + uchar* dst, size_t step, Size size, void* _cmpop) +{ + cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop); +} + +static void cmp64f(const double* src1, size_t step1, const double* src2, size_t step2, + uchar* dst, size_t step, Size size, void* _cmpop) +{ + cmp_(src1, step1, src2, step2, dst, step, size, *(int*)_cmpop); +} + +static BinaryFunc cmpTab[] = +{ + (BinaryFunc)cmp8u, (BinaryFunc)cmp8s, (BinaryFunc)cmp16u, + (BinaryFunc)cmp16s, (BinaryFunc)cmp32s, (BinaryFunc)cmp32f, + (BinaryFunc)cmp64f, 0 +}; + + +static double getMinVal(int depth) +{ + static const double tab[] = {0, -128, 0, -32768, INT_MIN, -FLT_MAX, -DBL_MAX, 0}; + return tab[depth]; +} + +static double getMaxVal(int depth) +{ + static const double tab[] = {255, 127, 65535, 32767, INT_MAX, FLT_MAX, DBL_MAX, 0}; + return tab[depth]; +} + +} + +void cv::compare(InputArray _src1, InputArray _src2, OutputArray _dst, int op) +{ + CV_Assert( op == CMP_LT || op == CMP_LE || op == CMP_EQ || + op == CMP_NE || op == CMP_GE || op == CMP_GT ); + + int kind1 = _src1.kind(), kind2 = _src2.kind(); + Mat src1 = _src1.getMat(), src2 = _src2.getMat(); + + if( kind1 == kind2 && src1.dims <= 2 && src2.dims <= 2 && src1.size() == src2.size() && src1.type() == src2.type() ) + { + int cn = src1.channels(); + _dst.create(src1.size(), CV_8UC(cn)); + Mat dst = _dst.getMat(); + Size sz = getContinuousSize(src1, src2, dst, src1.channels()); + cmpTab[src1.depth()](src1.data, src1.step, src2.data, src2.step, dst.data, dst.step, sz, &op); + return; + } + + bool haveScalar = false; + + if( (kind1 == _InputArray::MATX) + (kind2 == _InputArray::MATX) == 1 || + src1.size != src2.size || src1.type() != src2.type() ) + { + if( checkScalar(src1, src2.type(), kind1, kind2) ) + { + // src1 is a scalar; swap it with src2 + swap(src1, src2); + op = op == CMP_LT ? CMP_GT : op == CMP_LE ? CMP_GE : + op == CMP_GE ? CMP_LE : op == CMP_GT ? CMP_LT : op; + } + else if( !checkScalar(src2, src1.type(), kind2, kind1) ) + CV_Error( CV_StsUnmatchedSizes, + "The operation is neither 'array op array' (where arrays have the same size and the same type), " + "nor 'array op scalar', nor 'scalar op array'" ); + haveScalar = true; + } + + + int cn = src1.channels(), depth1 = src1.depth(), depth2 = src2.depth(); + + _dst.create(src1.dims, src1.size, CV_8UC(cn)); + src1 = src1.reshape(1); src2 = src2.reshape(1); + Mat dst = _dst.getMat().reshape(1); + + size_t esz = src1.elemSize(); + size_t blocksize0 = (size_t)(BLOCK_SIZE + esz-1)/esz; + BinaryFunc func = cmpTab[depth1]; + + if( !haveScalar ) + { + const Mat* arrays[] = { &src1, &src2, &dst, 0 }; + uchar* ptrs[3]; + + NAryMatIterator it(arrays, ptrs); + size_t total = it.size; + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + func( ptrs[0], 0, ptrs[1], 0, ptrs[2], 0, Size((int)total, 1), &op ); + } + else + { + const Mat* arrays[] = { &src1, &dst, 0 }; + uchar* ptrs[2]; + + NAryMatIterator it(arrays, ptrs); + size_t total = it.size, blocksize = std::min(total, blocksize0); + + AutoBuffer _buf(blocksize*esz); + uchar *buf = _buf; + + if( depth1 > CV_32S ) + convertAndUnrollScalar( src2, depth1, buf, blocksize ); + else + { + double fval=0; + getConvertFunc(depth2, CV_64F)(src2.data, 0, 0, 0, (uchar*)&fval, 0, Size(1,1), 0); + if( fval < getMinVal(depth1) ) + { + dst = Scalar::all(op == CMP_GT || op == CMP_GE || op == CMP_NE ? 255 : 0); + return; + } + + if( fval > getMaxVal(depth1) ) + { + dst = Scalar::all(op == CMP_LT || op == CMP_LE || op == CMP_NE ? 255 : 0); + return; + } + + int ival = cvRound(fval); + if( fval != ival ) + { + if( op == CMP_LT || op == CMP_GE ) + ival = cvCeil(fval); + else if( op == CMP_LE || op == CMP_GT ) + ival = cvFloor(fval); + else + { + dst = Scalar::all(op == CMP_NE ? 255 : 0); + return; + } + } + convertAndUnrollScalar(Mat(1, 1, CV_32S, &ival), depth1, buf, blocksize); + } + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( size_t j = 0; j < total; j += blocksize ) + { + int bsz = (int)MIN(total - j, blocksize); + func( ptrs[0], 0, buf, 0, ptrs[1], 0, Size(bsz, 1), &op); + ptrs[0] += bsz*esz; + ptrs[1] += bsz; + } + } + } +} + +/****************************************************************************************\ +* inRange * +\****************************************************************************************/ + +namespace cv +{ + +template static void +inRange_(const T* src1, size_t step1, const T* src2, size_t step2, + const T* src3, size_t step3, uchar* dst, size_t step, + Size size) +{ + step1 /= sizeof(src1[0]); + step2 /= sizeof(src2[0]); + step3 /= sizeof(src3[0]); + + for( ; size.height--; src1 += step1, src2 += step2, src3 += step3, dst += step ) + { + int x = 0; + for( ; x <= size.width - 4; x += 4 ) + { + int t0, t1; + t0 = src2[x] <= src1[x] && src1[x] <= src3[x]; + t1 = src2[x+1] <= src1[x+1] && src1[x+1] <= src3[x+1]; + dst[x] = (uchar)-t0; dst[x+1] = (uchar)-t1; + t0 = src2[x+2] <= src1[x+2] && src1[x+2] <= src3[x+2]; + t1 = src2[x+3] <= src1[x+3] && src1[x+3] <= src3[x+3]; + dst[x+2] = (uchar)-t0; dst[x+3] = (uchar)-t1; + } + + for( ; x < size.width; x++ ) + dst[x] = (uchar)-(src2[x] <= src1[x] && src1[x] <= src3[x]); + } +} + + +static void inRange8u(const uchar* src1, size_t step1, const uchar* src2, size_t step2, + const uchar* src3, size_t step3, uchar* dst, size_t step, Size size) +{ + inRange_(src1, step1, src2, step2, src3, step3, dst, step, size); +} + +static void inRange8s(const schar* src1, size_t step1, const schar* src2, size_t step2, + const schar* src3, size_t step3, uchar* dst, size_t step, Size size) +{ + inRange_(src1, step1, src2, step2, src3, step3, dst, step, size); +} + +static void inRange16u(const ushort* src1, size_t step1, const ushort* src2, size_t step2, + const ushort* src3, size_t step3, uchar* dst, size_t step, Size size) +{ + inRange_(src1, step1, src2, step2, src3, step3, dst, step, size); +} + +static void inRange16s(const short* src1, size_t step1, const short* src2, size_t step2, + const short* src3, size_t step3, uchar* dst, size_t step, Size size) +{ + inRange_(src1, step1, src2, step2, src3, step3, dst, step, size); +} + +static void inRange32s(const int* src1, size_t step1, const int* src2, size_t step2, + const int* src3, size_t step3, uchar* dst, size_t step, Size size) +{ + inRange_(src1, step1, src2, step2, src3, step3, dst, step, size); +} + +static void inRange32f(const float* src1, size_t step1, const float* src2, size_t step2, + const float* src3, size_t step3, uchar* dst, size_t step, Size size) +{ + inRange_(src1, step1, src2, step2, src3, step3, dst, step, size); +} + +static void inRange64f(const double* src1, size_t step1, const double* src2, size_t step2, + const double* src3, size_t step3, uchar* dst, size_t step, Size size) +{ + inRange_(src1, step1, src2, step2, src3, step3, dst, step, size); +} + +static void inRangeReduce(const uchar* src, uchar* dst, size_t len, int cn) +{ + int k = cn % 4 ? cn % 4 : 4; + size_t i, j; + if( k == 1 ) + for( i = j = 0; i < len; i++, j += cn ) + dst[i] = src[j]; + else if( k == 2 ) + for( i = j = 0; i < len; i++, j += cn ) + dst[i] = src[j] & src[j+1]; + else if( k == 3 ) + for( i = j = 0; i < len; i++, j += cn ) + dst[i] = src[j] & src[j+1] & src[j+2]; + else + for( i = j = 0; i < len; i++, j += cn ) + dst[i] = src[j] & src[j+1] & src[j+2] & src[j+3]; + + for( ; k < cn; k += 4 ) + { + for( i = 0, j = k; i < len; i++, j += cn ) + dst[i] &= src[j] & src[j+1] & src[j+2] & src[j+3]; + } +} + +typedef void (*InRangeFunc)( const uchar* src1, size_t step1, const uchar* src2, size_t step2, + const uchar* src3, size_t step3, uchar* dst, size_t step, Size sz ); + +static InRangeFunc inRangeTab[] = +{ + (InRangeFunc)inRange8u, (InRangeFunc)inRange8s, (InRangeFunc)inRange16u, + (InRangeFunc)inRange16s, (InRangeFunc)inRange32s, (InRangeFunc)inRange32f, + (InRangeFunc)inRange64f, 0 +}; + +} + +void cv::inRange(InputArray _src, InputArray _lowerb, + InputArray _upperb, OutputArray _dst) +{ + int skind = _src.kind(), lkind = _lowerb.kind(), ukind = _upperb.kind(); + Mat src = _src.getMat(), lb = _lowerb.getMat(), ub = _upperb.getMat(); + + bool lbScalar = false, ubScalar = false; + + if( (lkind == _InputArray::MATX && skind != _InputArray::MATX) || + src.size != lb.size || src.type() != lb.type() ) + { + if( !checkScalar(lb, src.type(), lkind, skind) ) + CV_Error( CV_StsUnmatchedSizes, + "The lower bounary is neither an array of the same size and same type as src, nor a scalar"); + lbScalar = true; + } + + if( (ukind == _InputArray::MATX && skind != _InputArray::MATX) || + src.size != ub.size || src.type() != ub.type() ) + { + if( !checkScalar(ub, src.type(), ukind, skind) ) + CV_Error( CV_StsUnmatchedSizes, + "The upper bounary is neither an array of the same size and same type as src, nor a scalar"); + ubScalar = true; + } + + CV_Assert( ((int)lbScalar ^ (int)ubScalar) == 0 ); + + int cn = src.channels(), depth = src.depth(); + + size_t esz = src.elemSize(); + size_t blocksize0 = (size_t)(BLOCK_SIZE + esz-1)/esz; + + _dst.create(src.dims, src.size, CV_8U); + Mat dst = _dst.getMat(); + InRangeFunc func = inRangeTab[depth]; + + const Mat* arrays_sc[] = { &src, &dst, 0 }; + const Mat* arrays_nosc[] = { &src, &dst, &lb, &ub, 0 }; + uchar* ptrs[4]; + + NAryMatIterator it(lbScalar && ubScalar ? arrays_sc : arrays_nosc, ptrs); + size_t total = it.size, blocksize = std::min(total, blocksize0); + + AutoBuffer _buf(blocksize*(((int)lbScalar + (int)ubScalar)*esz + cn) + 2*cn*sizeof(int) + 128); + uchar *buf = _buf, *mbuf = buf, *lbuf = 0, *ubuf = 0; + buf = alignPtr(buf + blocksize*cn, 16); + + if( lbScalar && ubScalar ) + { + lbuf = buf; + ubuf = buf = alignPtr(buf + blocksize*esz, 16); + + CV_Assert( lb.type() == ub.type() ); + int scdepth = lb.depth(); + + if( scdepth != depth && depth < CV_32S ) + { + int* ilbuf = (int*)alignPtr(buf + blocksize*esz, 16); + int* iubuf = ilbuf + cn; + + BinaryFunc sccvtfunc = getConvertFunc(scdepth, CV_32S); + sccvtfunc(lb.data, 0, 0, 0, (uchar*)ilbuf, 0, Size(cn, 1), 0); + sccvtfunc(ub.data, 0, 0, 0, (uchar*)iubuf, 0, Size(cn, 1), 0); + int minval = cvRound(getMinVal(depth)), maxval = cvRound(getMaxVal(depth)); + + for( int k = 0; k < cn; k++ ) + { + if( ilbuf[k] > iubuf[k] || ilbuf[k] > maxval || iubuf[k] < minval ) + ilbuf[k] = minval+1, iubuf[k] = minval; + } + lb = Mat(cn, 1, CV_32S, ilbuf); + ub = Mat(cn, 1, CV_32S, iubuf); + } + + convertAndUnrollScalar( lb, src.type(), lbuf, blocksize ); + convertAndUnrollScalar( ub, src.type(), ubuf, blocksize ); + } + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( size_t j = 0; j < total; j += blocksize ) + { + int bsz = (int)MIN(total - j, blocksize); + size_t delta = bsz*esz; + uchar *lptr = lbuf, *uptr = ubuf; + if( !lbScalar ) + { + lptr = ptrs[2]; + ptrs[2] += delta; + } + if( !ubScalar ) + { + int idx = !lbScalar ? 3 : 2; + uptr = ptrs[idx]; + ptrs[idx] += delta; + } + func( ptrs[0], 0, lptr, 0, uptr, 0, cn == 1 ? ptrs[1] : mbuf, 0, Size(bsz*cn, 1)); + if( cn > 1 ) + inRangeReduce(mbuf, ptrs[1], bsz, cn); + ptrs[0] += delta; + ptrs[1] += bsz; + } + } +} + +/****************************************************************************************\ +* Earlier API: cvAdd etc. * +\****************************************************************************************/ + +CV_IMPL void +cvNot( const CvArr* srcarr, CvArr* dstarr ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + CV_Assert( src.size == dst.size && src.type() == dst.type() ); + cv::bitwise_not( src, dst ); +} + + +CV_IMPL void +cvAnd( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2), + dst = cv::cvarrToMat(dstarr), mask; + CV_Assert( src1.size == dst.size && src1.type() == dst.type() ); + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + cv::bitwise_and( src1, src2, dst, mask ); +} + + +CV_IMPL void +cvOr( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2), + dst = cv::cvarrToMat(dstarr), mask; + CV_Assert( src1.size == dst.size && src1.type() == dst.type() ); + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + cv::bitwise_or( src1, src2, dst, mask ); +} + + +CV_IMPL void +cvXor( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2), + dst = cv::cvarrToMat(dstarr), mask; + CV_Assert( src1.size == dst.size && src1.type() == dst.type() ); + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + cv::bitwise_xor( src1, src2, dst, mask ); +} + + +CV_IMPL void +cvAndS( const CvArr* srcarr, CvScalar s, CvArr* dstarr, const CvArr* maskarr ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask; + CV_Assert( src.size == dst.size && src.type() == dst.type() ); + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + cv::bitwise_and( src, (const cv::Scalar&)s, dst, mask ); +} + + +CV_IMPL void +cvOrS( const CvArr* srcarr, CvScalar s, CvArr* dstarr, const CvArr* maskarr ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask; + CV_Assert( src.size == dst.size && src.type() == dst.type() ); + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + cv::bitwise_or( src, (const cv::Scalar&)s, dst, mask ); +} + + +CV_IMPL void +cvXorS( const CvArr* srcarr, CvScalar s, CvArr* dstarr, const CvArr* maskarr ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask; + CV_Assert( src.size == dst.size && src.type() == dst.type() ); + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + cv::bitwise_xor( src, (const cv::Scalar&)s, dst, mask ); +} + + +CV_IMPL void cvAdd( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2), + dst = cv::cvarrToMat(dstarr), mask; + CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() ); + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + cv::add( src1, src2, dst, mask, dst.type() ); +} + + +CV_IMPL void cvSub( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2), + dst = cv::cvarrToMat(dstarr), mask; + CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() ); + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + cv::subtract( src1, src2, dst, mask, dst.type() ); +} + + +CV_IMPL void cvAddS( const CvArr* srcarr1, CvScalar value, CvArr* dstarr, const CvArr* maskarr ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), + dst = cv::cvarrToMat(dstarr), mask; + CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() ); + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + cv::add( src1, (const cv::Scalar&)value, dst, mask, dst.type() ); +} + + +CV_IMPL void cvSubRS( const CvArr* srcarr1, CvScalar value, CvArr* dstarr, const CvArr* maskarr ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), + dst = cv::cvarrToMat(dstarr), mask; + CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() ); + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + cv::subtract( (const cv::Scalar&)value, src1, dst, mask, dst.type() ); +} + + +CV_IMPL void cvMul( const CvArr* srcarr1, const CvArr* srcarr2, + CvArr* dstarr, double scale ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2), + dst = cv::cvarrToMat(dstarr); + CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() ); + cv::multiply( src1, src2, dst, scale, dst.type() ); +} + + +CV_IMPL void cvDiv( const CvArr* srcarr1, const CvArr* srcarr2, + CvArr* dstarr, double scale ) +{ + cv::Mat src2 = cv::cvarrToMat(srcarr2), + dst = cv::cvarrToMat(dstarr), mask; + CV_Assert( src2.size == dst.size && src2.channels() == dst.channels() ); + + if( srcarr1 ) + cv::divide( cv::cvarrToMat(srcarr1), src2, dst, scale, dst.type() ); + else + cv::divide( scale, src2, dst, dst.type() ); +} + + +CV_IMPL void +cvAddWeighted( const CvArr* srcarr1, double alpha, + const CvArr* srcarr2, double beta, + double gamma, CvArr* dstarr ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2), + dst = cv::cvarrToMat(dstarr); + CV_Assert( src1.size == dst.size && src1.channels() == dst.channels() ); + cv::addWeighted( src1, alpha, src2, beta, gamma, dst, dst.type() ); +} + + +CV_IMPL void +cvAbsDiff( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr); + CV_Assert( src1.size == dst.size && src1.type() == dst.type() ); + + cv::absdiff( src1, cv::cvarrToMat(srcarr2), dst ); +} + + +CV_IMPL void +cvAbsDiffS( const CvArr* srcarr1, CvArr* dstarr, CvScalar scalar ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr); + CV_Assert( src1.size == dst.size && src1.type() == dst.type() ); + + cv::absdiff( src1, (const cv::Scalar&)scalar, dst ); +} + + +CV_IMPL void +cvInRange( const void* srcarr1, const void* srcarr2, + const void* srcarr3, void* dstarr ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr); + CV_Assert( src1.size == dst.size && dst.type() == CV_8U ); + + cv::inRange( src1, cv::cvarrToMat(srcarr2), cv::cvarrToMat(srcarr3), dst ); +} + + +CV_IMPL void +cvInRangeS( const void* srcarr1, CvScalar lowerb, CvScalar upperb, void* dstarr ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr); + CV_Assert( src1.size == dst.size && dst.type() == CV_8U ); + + cv::inRange( src1, (const cv::Scalar&)lowerb, (const cv::Scalar&)upperb, dst ); +} + + +CV_IMPL void +cvCmp( const void* srcarr1, const void* srcarr2, void* dstarr, int cmp_op ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr); + CV_Assert( src1.size == dst.size && dst.type() == CV_8U ); + + cv::compare( src1, cv::cvarrToMat(srcarr2), dst, cmp_op ); +} + + +CV_IMPL void +cvCmpS( const void* srcarr1, double value, void* dstarr, int cmp_op ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr); + CV_Assert( src1.size == dst.size && dst.type() == CV_8U ); + + cv::compare( src1, value, dst, cmp_op ); +} + + +CV_IMPL void +cvMin( const void* srcarr1, const void* srcarr2, void* dstarr ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr); + CV_Assert( src1.size == dst.size && src1.type() == dst.type() ); + + cv::min( src1, cv::cvarrToMat(srcarr2), dst ); +} + + +CV_IMPL void +cvMax( const void* srcarr1, const void* srcarr2, void* dstarr ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr); + CV_Assert( src1.size == dst.size && src1.type() == dst.type() ); + + cv::max( src1, cv::cvarrToMat(srcarr2), dst ); +} + + +CV_IMPL void +cvMinS( const void* srcarr1, double value, void* dstarr ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr); + CV_Assert( src1.size == dst.size && src1.type() == dst.type() ); + + cv::min( src1, value, dst ); +} + + +CV_IMPL void +cvMaxS( const void* srcarr1, double value, void* dstarr ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr); + CV_Assert( src1.size == dst.size && src1.type() == dst.type() ); + + cv::max( src1, value, dst ); +} + +/* End of file. */ diff --git a/opencv/core/array.cpp b/opencv/core/array.cpp new file mode 100644 index 0000000..f065a1e --- /dev/null +++ b/opencv/core/array.cpp @@ -0,0 +1,3206 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* //////////////////////////////////////////////////////////////////// +// +// CvMat, CvMatND, CvSparceMat and IplImage support functions +// (creation, deletion, copying, retrieving and setting elements etc.) +// +// */ + +#include "precomp.hpp" + + +static struct +{ + Cv_iplCreateImageHeader createHeader; + Cv_iplAllocateImageData allocateData; + Cv_iplDeallocate deallocate; + Cv_iplCreateROI createROI; + Cv_iplCloneImage cloneImage; +} +CvIPL; + +// Makes the library use native IPL image allocators +CV_IMPL void +cvSetIPLAllocators( Cv_iplCreateImageHeader createHeader, + Cv_iplAllocateImageData allocateData, + Cv_iplDeallocate deallocate, + Cv_iplCreateROI createROI, + Cv_iplCloneImage cloneImage ) +{ + int count = (createHeader != 0) + (allocateData != 0) + (deallocate != 0) + + (createROI != 0) + (cloneImage != 0); + + if( count != 0 && count != 5 ) + CV_Error( CV_StsBadArg, "Either all the pointers should be null or " + "they all should be non-null" ); + + CvIPL.createHeader = createHeader; + CvIPL.allocateData = allocateData; + CvIPL.deallocate = deallocate; + CvIPL.createROI = createROI; + CvIPL.cloneImage = cloneImage; +} + + +/****************************************************************************************\ +* CvMat creation and basic operations * +\****************************************************************************************/ + +// Creates CvMat and underlying data +CV_IMPL CvMat* +cvCreateMat( int height, int width, int type ) +{ + CvMat* arr = cvCreateMatHeader( height, width, type ); + cvCreateData( arr ); + + return arr; +} + + +static void icvCheckHuge( CvMat* arr ) +{ + if( (int64)arr->step*arr->rows > INT_MAX ) + arr->type &= ~CV_MAT_CONT_FLAG; +} + +// Creates CvMat header only +CV_IMPL CvMat* +cvCreateMatHeader( int rows, int cols, int type ) +{ + type = CV_MAT_TYPE(type); + + if( rows < 0 || cols <= 0 ) + CV_Error( CV_StsBadSize, "Non-positive width or height" ); + + int min_step = CV_ELEM_SIZE(type)*cols; + if( min_step <= 0 ) + CV_Error( CV_StsUnsupportedFormat, "Invalid matrix type" ); + + CvMat* arr = (CvMat*)cvAlloc( sizeof(*arr)); + + arr->step = min_step; + arr->type = CV_MAT_MAGIC_VAL | type | CV_MAT_CONT_FLAG; + arr->rows = rows; + arr->cols = cols; + arr->data.ptr = 0; + arr->refcount = 0; + arr->hdr_refcount = 1; + + icvCheckHuge( arr ); + return arr; +} + + +// Initializes CvMat header, allocated by the user +CV_IMPL CvMat* +cvInitMatHeader( CvMat* arr, int rows, int cols, + int type, void* data, int step ) +{ + if( !arr ) + CV_Error( CV_StsNullPtr, "" ); + + if( (unsigned)CV_MAT_DEPTH(type) > CV_DEPTH_MAX ) + CV_Error( CV_BadNumChannels, "" ); + + if( rows < 0 || cols <= 0 ) + CV_Error( CV_StsBadSize, "Non-positive cols or rows" ); + + type = CV_MAT_TYPE( type ); + arr->type = type | CV_MAT_MAGIC_VAL; + arr->rows = rows; + arr->cols = cols; + arr->data.ptr = (uchar*)data; + arr->refcount = 0; + arr->hdr_refcount = 0; + + int pix_size = CV_ELEM_SIZE(type); + int min_step = arr->cols*pix_size; + + if( step != CV_AUTOSTEP && step != 0 ) + { + if( step < min_step ) + CV_Error( CV_BadStep, "" ); + arr->step = step; + } + else + { + arr->step = min_step; + } + + arr->type = CV_MAT_MAGIC_VAL | type | + (arr->rows == 1 || arr->step == min_step ? CV_MAT_CONT_FLAG : 0); + + icvCheckHuge( arr ); + return arr; +} + + +// Deallocates the CvMat structure and underlying data +CV_IMPL void +cvReleaseMat( CvMat** array ) +{ + if( !array ) + CV_Error( CV_HeaderIsNull, "" ); + + if( *array ) + { + CvMat* arr = *array; + + if( !CV_IS_MAT_HDR_Z(arr) && !CV_IS_MATND_HDR(arr) ) + CV_Error( CV_StsBadFlag, "" ); + + *array = 0; + + cvDecRefData( arr ); + cvFree( &arr ); + } +} + + +// Creates a copy of matrix +CV_IMPL CvMat* +cvCloneMat( const CvMat* src ) +{ + if( !CV_IS_MAT_HDR( src )) + CV_Error( CV_StsBadArg, "Bad CvMat header" ); + + CvMat* dst = cvCreateMatHeader( src->rows, src->cols, src->type ); + + if( src->data.ptr ) + { + cvCreateData( dst ); + cvCopy( src, dst ); + } + + return dst; +} + + +/****************************************************************************************\ +* CvMatND creation and basic operations * +\****************************************************************************************/ + +CV_IMPL CvMatND* +cvInitMatNDHeader( CvMatND* mat, int dims, const int* sizes, + int type, void* data ) +{ + type = CV_MAT_TYPE(type); + int64 step = CV_ELEM_SIZE(type); + + if( !mat ) + CV_Error( CV_StsNullPtr, "NULL matrix header pointer" ); + + if( step == 0 ) + CV_Error( CV_StsUnsupportedFormat, "invalid array data type" ); + + if( !sizes ) + CV_Error( CV_StsNullPtr, "NULL pointer" ); + + if( dims <= 0 || dims > CV_MAX_DIM ) + CV_Error( CV_StsOutOfRange, + "non-positive or too large number of dimensions" ); + + for( int i = dims - 1; i >= 0; i-- ) + { + if( sizes[i] < 0 ) + CV_Error( CV_StsBadSize, "one of dimesion sizes is non-positive" ); + mat->dim[i].size = sizes[i]; + if( step > INT_MAX ) + CV_Error( CV_StsOutOfRange, "The array is too big" ); + mat->dim[i].step = (int)step; + step *= sizes[i]; + } + + mat->type = CV_MATND_MAGIC_VAL | (step <= INT_MAX ? CV_MAT_CONT_FLAG : 0) | type; + mat->dims = dims; + mat->data.ptr = (uchar*)data; + mat->refcount = 0; + mat->hdr_refcount = 0; + return mat; +} + + +// Creates CvMatND and underlying data +CV_IMPL CvMatND* +cvCreateMatND( int dims, const int* sizes, int type ) +{ + CvMatND* arr = cvCreateMatNDHeader( dims, sizes, type ); + cvCreateData( arr ); + + return arr; +} + + +// Creates CvMatND header only +CV_IMPL CvMatND* +cvCreateMatNDHeader( int dims, const int* sizes, int type ) +{ + if( dims <= 0 || dims > CV_MAX_DIM ) + CV_Error( CV_StsOutOfRange, + "non-positive or too large number of dimensions" ); + + CvMatND* arr = (CvMatND*)cvAlloc( sizeof(*arr) ); + + cvInitMatNDHeader( arr, dims, sizes, type, 0 ); + arr->hdr_refcount = 1; + return arr; +} + + +// Creates a copy of nD array +CV_IMPL CvMatND* +cvCloneMatND( const CvMatND* src ) +{ + if( !CV_IS_MATND_HDR( src )) + CV_Error( CV_StsBadArg, "Bad CvMatND header" ); + + CV_Assert( src->dims <= CV_MAX_DIM ); + int sizes[CV_MAX_DIM]; + + for( int i = 0; i < src->dims; i++ ) + sizes[i] = src->dim[i].size; + + CvMatND* dst = cvCreateMatNDHeader( src->dims, sizes, src->type ); + + if( src->data.ptr ) + { + cvCreateData( dst ); + cv::Mat _src(src), _dst(dst); + uchar* data0 = dst->data.ptr; + _src.copyTo(_dst); + CV_Assert(_dst.data == data0); + //cvCopy( src, dst ); + } + + return dst; +} + + +static CvMatND* +cvGetMatND( const CvArr* arr, CvMatND* matnd, int* coi ) +{ + CvMatND* result = 0; + + if( coi ) + *coi = 0; + + if( !matnd || !arr ) + CV_Error( CV_StsNullPtr, "NULL array pointer is passed" ); + + if( CV_IS_MATND_HDR(arr)) + { + if( !((CvMatND*)arr)->data.ptr ) + CV_Error( CV_StsNullPtr, "The matrix has NULL data pointer" ); + + result = (CvMatND*)arr; + } + else + { + CvMat stub, *mat = (CvMat*)arr; + + if( CV_IS_IMAGE_HDR( mat )) + mat = cvGetMat( mat, &stub, coi ); + + if( !CV_IS_MAT_HDR( mat )) + CV_Error( CV_StsBadArg, "Unrecognized or unsupported array type" ); + + if( !mat->data.ptr ) + CV_Error( CV_StsNullPtr, "Input array has NULL data pointer" ); + + matnd->data.ptr = mat->data.ptr; + matnd->refcount = 0; + matnd->hdr_refcount = 0; + matnd->type = mat->type; + matnd->dims = 2; + matnd->dim[0].size = mat->rows; + matnd->dim[0].step = mat->step; + matnd->dim[1].size = mat->cols; + matnd->dim[1].step = CV_ELEM_SIZE(mat->type); + result = matnd; + } + + return result; +} + + +// returns number of dimensions to iterate. +/* +Checks whether arrays have equal type, sizes (mask is optional array +that needs to have the same size, but 8uC1 or 8sC1 type). +Returns number of dimensions to iterate through: +0 means that all arrays are continuous, +1 means that all arrays are vectors of continuous arrays etc. +and the size of largest common continuous part of the arrays +*/ +CV_IMPL int +cvInitNArrayIterator( int count, CvArr** arrs, + const CvArr* mask, CvMatND* stubs, + CvNArrayIterator* iterator, int flags ) +{ + int dims = -1; + int i, j, size, dim0 = -1; + int64 step; + CvMatND* hdr0 = 0; + + if( count < 1 || count > CV_MAX_ARR ) + CV_Error( CV_StsOutOfRange, "Incorrect number of arrays" ); + + if( !arrs || !stubs ) + CV_Error( CV_StsNullPtr, "Some of required array pointers is NULL" ); + + if( !iterator ) + CV_Error( CV_StsNullPtr, "Iterator pointer is NULL" ); + + for( i = 0; i <= count; i++ ) + { + const CvArr* arr = i < count ? arrs[i] : mask; + CvMatND* hdr; + + if( !arr ) + { + if( i < count ) + CV_Error( CV_StsNullPtr, "Some of required array pointers is NULL" ); + break; + } + + if( CV_IS_MATND( arr )) + hdr = (CvMatND*)arr; + else + { + int coi = 0; + hdr = cvGetMatND( arr, stubs + i, &coi ); + if( coi != 0 ) + CV_Error( CV_BadCOI, "COI set is not allowed here" ); + } + + iterator->hdr[i] = hdr; + + if( i > 0 ) + { + if( hdr->dims != hdr0->dims ) + CV_Error( CV_StsUnmatchedSizes, + "Number of dimensions is the same for all arrays" ); + + if( i < count ) + { + switch( flags & (CV_NO_DEPTH_CHECK|CV_NO_CN_CHECK)) + { + case 0: + if( !CV_ARE_TYPES_EQ( hdr, hdr0 )) + CV_Error( CV_StsUnmatchedFormats, + "Data type is not the same for all arrays" ); + break; + case CV_NO_DEPTH_CHECK: + if( !CV_ARE_CNS_EQ( hdr, hdr0 )) + CV_Error( CV_StsUnmatchedFormats, + "Number of channels is not the same for all arrays" ); + break; + case CV_NO_CN_CHECK: + if( !CV_ARE_CNS_EQ( hdr, hdr0 )) + CV_Error( CV_StsUnmatchedFormats, + "Depth is not the same for all arrays" ); + break; + } + } + else + { + if( !CV_IS_MASK_ARR( hdr )) + CV_Error( CV_StsBadMask, "Mask should have 8uC1 or 8sC1 data type" ); + } + + if( !(flags & CV_NO_SIZE_CHECK) ) + { + for( j = 0; j < hdr->dims; j++ ) + if( hdr->dim[j].size != hdr0->dim[j].size ) + CV_Error( CV_StsUnmatchedSizes, + "Dimension sizes are the same for all arrays" ); + } + } + else + hdr0 = hdr; + + step = CV_ELEM_SIZE(hdr->type); + for( j = hdr->dims - 1; j > dim0; j-- ) + { + if( step != hdr->dim[j].step ) + break; + step *= hdr->dim[j].size; + } + + if( j == dim0 && step > INT_MAX ) + j++; + + if( j > dim0 ) + dim0 = j; + + iterator->hdr[i] = (CvMatND*)hdr; + iterator->ptr[i] = (uchar*)hdr->data.ptr; + } + + size = 1; + for( j = hdr0->dims - 1; j > dim0; j-- ) + size *= hdr0->dim[j].size; + + dims = dim0 + 1; + iterator->dims = dims; + iterator->count = count; + iterator->size = cvSize(size,1); + + for( i = 0; i < dims; i++ ) + iterator->stack[i] = hdr0->dim[i].size; + + return dims; +} + + +// returns zero value if iteration is finished, non-zero otherwise +CV_IMPL int cvNextNArraySlice( CvNArrayIterator* iterator ) +{ + assert( iterator != 0 ); + int i, dims, size = 0; + + for( dims = iterator->dims; dims > 0; dims-- ) + { + for( i = 0; i < iterator->count; i++ ) + iterator->ptr[i] += iterator->hdr[i]->dim[dims-1].step; + + if( --iterator->stack[dims-1] > 0 ) + break; + + size = iterator->hdr[0]->dim[dims-1].size; + + for( i = 0; i < iterator->count; i++ ) + iterator->ptr[i] -= (size_t)size*iterator->hdr[i]->dim[dims-1].step; + + iterator->stack[dims-1] = size; + } + + return dims > 0; +} + + +/****************************************************************************************\ +* CvSparseMat creation and basic operations * +\****************************************************************************************/ + + +// Creates CvMatND and underlying data +CV_IMPL CvSparseMat* +cvCreateSparseMat( int dims, const int* sizes, int type ) +{ + type = CV_MAT_TYPE( type ); + int pix_size1 = CV_ELEM_SIZE1(type); + int pix_size = pix_size1*CV_MAT_CN(type); + int i, size; + CvMemStorage* storage; + + if( pix_size == 0 ) + CV_Error( CV_StsUnsupportedFormat, "invalid array data type" ); + + if( dims <= 0 || dims > CV_MAX_DIM_HEAP ) + CV_Error( CV_StsOutOfRange, "bad number of dimensions" ); + + if( !sizes ) + CV_Error( CV_StsNullPtr, "NULL pointer" ); + + for( i = 0; i < dims; i++ ) + { + if( sizes[i] <= 0 ) + CV_Error( CV_StsBadSize, "one of dimesion sizes is non-positive" ); + } + + CvSparseMat* arr = (CvSparseMat*)cvAlloc(sizeof(*arr)+MAX(0,dims-CV_MAX_DIM)*sizeof(arr->size[0])); + + arr->type = CV_SPARSE_MAT_MAGIC_VAL | type; + arr->dims = dims; + arr->refcount = 0; + arr->hdr_refcount = 1; + memcpy( arr->size, sizes, dims*sizeof(sizes[0])); + + arr->valoffset = (int)cvAlign(sizeof(CvSparseNode), pix_size1); + arr->idxoffset = (int)cvAlign(arr->valoffset + pix_size, sizeof(int)); + size = (int)cvAlign(arr->idxoffset + dims*sizeof(int), sizeof(CvSetElem)); + + storage = cvCreateMemStorage( CV_SPARSE_MAT_BLOCK ); + arr->heap = cvCreateSet( 0, sizeof(CvSet), size, storage ); + + arr->hashsize = CV_SPARSE_HASH_SIZE0; + size = arr->hashsize*sizeof(arr->hashtable[0]); + + arr->hashtable = (void**)cvAlloc( size ); + memset( arr->hashtable, 0, size ); + + return arr; +} + + +// Creates CvMatND and underlying data +CV_IMPL void +cvReleaseSparseMat( CvSparseMat** array ) +{ + if( !array ) + CV_Error( CV_HeaderIsNull, "" ); + + if( *array ) + { + CvSparseMat* arr = *array; + + if( !CV_IS_SPARSE_MAT_HDR(arr) ) + CV_Error( CV_StsBadFlag, "" ); + + *array = 0; + + CvMemStorage* storage = arr->heap->storage; + cvReleaseMemStorage( &storage ); + cvFree( &arr->hashtable ); + cvFree( &arr ); + } +} + + +// Creates CvMatND and underlying data +CV_IMPL CvSparseMat* +cvCloneSparseMat( const CvSparseMat* src ) +{ + if( !CV_IS_SPARSE_MAT_HDR(src) ) + CV_Error( CV_StsBadArg, "Invalid sparse array header" ); + + CvSparseMat* dst = cvCreateSparseMat( src->dims, src->size, src->type ); + cvCopy( src, dst ); + return dst; +} + + +CvSparseNode* +cvInitSparseMatIterator( const CvSparseMat* mat, CvSparseMatIterator* iterator ) +{ + CvSparseNode* node = 0; + int idx; + + if( !CV_IS_SPARSE_MAT( mat )) + CV_Error( CV_StsBadArg, "Invalid sparse matrix header" ); + + if( !iterator ) + CV_Error( CV_StsNullPtr, "NULL iterator pointer" ); + + iterator->mat = (CvSparseMat*)mat; + iterator->node = 0; + + for( idx = 0; idx < mat->hashsize; idx++ ) + if( mat->hashtable[idx] ) + { + node = iterator->node = (CvSparseNode*)mat->hashtable[idx]; + break; + } + + iterator->curidx = idx; + return node; +} + +#define ICV_SPARSE_MAT_HASH_MULTIPLIER cv::SparseMat::HASH_SCALE + +static uchar* +icvGetNodePtr( CvSparseMat* mat, const int* idx, int* _type, + int create_node, unsigned* precalc_hashval ) +{ + uchar* ptr = 0; + int i, tabidx; + unsigned hashval = 0; + CvSparseNode *node; + assert( CV_IS_SPARSE_MAT( mat )); + + if( !precalc_hashval ) + { + for( i = 0; i < mat->dims; i++ ) + { + int t = idx[i]; + if( (unsigned)t >= (unsigned)mat->size[i] ) + CV_Error( CV_StsOutOfRange, "One of indices is out of range" ); + hashval = hashval*ICV_SPARSE_MAT_HASH_MULTIPLIER + t; + } + } + else + { + hashval = *precalc_hashval; + } + + tabidx = hashval & (mat->hashsize - 1); + hashval &= INT_MAX; + + if( create_node >= -1 ) + { + for( node = (CvSparseNode*)mat->hashtable[tabidx]; + node != 0; node = node->next ) + { + if( node->hashval == hashval ) + { + int* nodeidx = CV_NODE_IDX(mat,node); + for( i = 0; i < mat->dims; i++ ) + if( idx[i] != nodeidx[i] ) + break; + if( i == mat->dims ) + { + ptr = (uchar*)CV_NODE_VAL(mat,node); + break; + } + } + } + } + + if( !ptr && create_node ) + { + if( mat->heap->active_count >= mat->hashsize*CV_SPARSE_HASH_RATIO ) + { + void** newtable; + int newsize = MAX( mat->hashsize*2, CV_SPARSE_HASH_SIZE0); + int newrawsize = newsize*sizeof(newtable[0]); + + CvSparseMatIterator iterator; + assert( (newsize & (newsize - 1)) == 0 ); + + // resize hash table + newtable = (void**)cvAlloc( newrawsize ); + memset( newtable, 0, newrawsize ); + + node = cvInitSparseMatIterator( mat, &iterator ); + while( node ) + { + CvSparseNode* next = cvGetNextSparseNode( &iterator ); + int newidx = node->hashval & (newsize - 1); + node->next = (CvSparseNode*)newtable[newidx]; + newtable[newidx] = node; + node = next; + } + + cvFree( &mat->hashtable ); + mat->hashtable = newtable; + mat->hashsize = newsize; + tabidx = hashval & (newsize - 1); + } + + node = (CvSparseNode*)cvSetNew( mat->heap ); + node->hashval = hashval; + node->next = (CvSparseNode*)mat->hashtable[tabidx]; + mat->hashtable[tabidx] = node; + memcpy(CV_NODE_IDX(mat,node), idx, mat->dims*sizeof(idx[0])); + ptr = (uchar*)CV_NODE_VAL(mat,node); + if( create_node > 0 ) + memset( ptr, 0, CV_ELEM_SIZE(mat->type)); + } + + if( _type ) + *_type = CV_MAT_TYPE(mat->type); + + return ptr; +} + + +static void +icvDeleteNode( CvSparseMat* mat, const int* idx, unsigned* precalc_hashval ) +{ + int i, tabidx; + unsigned hashval = 0; + CvSparseNode *node, *prev = 0; + assert( CV_IS_SPARSE_MAT( mat )); + + if( !precalc_hashval ) + { + for( i = 0; i < mat->dims; i++ ) + { + int t = idx[i]; + if( (unsigned)t >= (unsigned)mat->size[i] ) + CV_Error( CV_StsOutOfRange, "One of indices is out of range" ); + hashval = hashval*ICV_SPARSE_MAT_HASH_MULTIPLIER + t; + } + } + else + { + hashval = *precalc_hashval; + } + + tabidx = hashval & (mat->hashsize - 1); + hashval &= INT_MAX; + + for( node = (CvSparseNode*)mat->hashtable[tabidx]; + node != 0; prev = node, node = node->next ) + { + if( node->hashval == hashval ) + { + int* nodeidx = CV_NODE_IDX(mat,node); + for( i = 0; i < mat->dims; i++ ) + if( idx[i] != nodeidx[i] ) + break; + if( i == mat->dims ) + break; + } + } + + if( node ) + { + if( prev ) + prev->next = node->next; + else + mat->hashtable[tabidx] = node->next; + cvSetRemoveByPtr( mat->heap, node ); + } +} + + +/****************************************************************************************\ +* Common for multiple array types operations * +\****************************************************************************************/ + +// Allocates underlying array data +CV_IMPL void +cvCreateData( CvArr* arr ) +{ + if( CV_IS_MAT_HDR_Z( arr )) + { + size_t step, total_size; + CvMat* mat = (CvMat*)arr; + step = mat->step; + + if( mat->rows == 0 || mat->cols == 0 ) + return; + + if( mat->data.ptr != 0 ) + CV_Error( CV_StsError, "Data is already allocated" ); + + if( step == 0 ) + step = CV_ELEM_SIZE(mat->type)*mat->cols; + + int64 _total_size = (int64)step*mat->rows + sizeof(int) + CV_MALLOC_ALIGN; + total_size = (size_t)_total_size; + if(_total_size != (int64)total_size) + CV_Error(CV_StsNoMem, "Too big buffer is allocated" ); + mat->refcount = (int*)cvAlloc( (size_t)total_size ); + mat->data.ptr = (uchar*)cvAlignPtr( mat->refcount + 1, CV_MALLOC_ALIGN ); + *mat->refcount = 1; + } + else if( CV_IS_IMAGE_HDR(arr)) + { + IplImage* img = (IplImage*)arr; + + if( img->imageData != 0 ) + CV_Error( CV_StsError, "Data is already allocated" ); + + if( !CvIPL.allocateData ) + { + img->imageData = img->imageDataOrigin = + (char*)cvAlloc( (size_t)img->imageSize ); + } + else + { + int depth = img->depth; + int width = img->width; + + if( img->depth == IPL_DEPTH_32F || img->depth == IPL_DEPTH_64F ) + { + img->width *= img->depth == IPL_DEPTH_32F ? sizeof(float) : sizeof(double); + img->depth = IPL_DEPTH_8U; + } + + CvIPL.allocateData( img, 0, 0 ); + + img->width = width; + img->depth = depth; + } + } + else if( CV_IS_MATND_HDR( arr )) + { + CvMatND* mat = (CvMatND*)arr; + int i; + size_t total_size = CV_ELEM_SIZE(mat->type); + + if( mat->dim[0].size == 0 ) + return; + + if( mat->data.ptr != 0 ) + CV_Error( CV_StsError, "Data is already allocated" ); + + if( CV_IS_MAT_CONT( mat->type )) + { + total_size = (size_t)mat->dim[0].size*(mat->dim[0].step != 0 ? + mat->dim[0].step : total_size); + } + else + { + for( i = mat->dims - 1; i >= 0; i-- ) + { + size_t size = (size_t)mat->dim[i].step*mat->dim[i].size; + + if( total_size < size ) + total_size = size; + } + } + + mat->refcount = (int*)cvAlloc( total_size + + sizeof(int) + CV_MALLOC_ALIGN ); + mat->data.ptr = (uchar*)cvAlignPtr( mat->refcount + 1, CV_MALLOC_ALIGN ); + *mat->refcount = 1; + } + else + CV_Error( CV_StsBadArg, "unrecognized or unsupported array type" ); +} + + +// Assigns external data to array +CV_IMPL void +cvSetData( CvArr* arr, void* data, int step ) +{ + int pix_size, min_step; + + if( CV_IS_MAT_HDR(arr) || CV_IS_MATND_HDR(arr) ) + cvReleaseData( arr ); + + if( CV_IS_MAT_HDR( arr )) + { + CvMat* mat = (CvMat*)arr; + + int type = CV_MAT_TYPE(mat->type); + pix_size = CV_ELEM_SIZE(type); + min_step = mat->cols*pix_size; + + if( step != CV_AUTOSTEP && step != 0 ) + { + if( step < min_step && data != 0 ) + CV_Error( CV_BadStep, "" ); + mat->step = step; + } + else + mat->step = min_step; + + mat->data.ptr = (uchar*)data; + mat->type = CV_MAT_MAGIC_VAL | type | + (mat->rows == 1 || mat->step == min_step ? CV_MAT_CONT_FLAG : 0); + icvCheckHuge( mat ); + } + else if( CV_IS_IMAGE_HDR( arr )) + { + IplImage* img = (IplImage*)arr; + + pix_size = ((img->depth & 255) >> 3)*img->nChannels; + min_step = img->width*pix_size; + + if( step != CV_AUTOSTEP && img->height > 1 ) + { + if( step < min_step && data != 0 ) + CV_Error( CV_BadStep, "" ); + img->widthStep = step; + } + else + { + img->widthStep = min_step; + } + + img->imageSize = img->widthStep * img->height; + img->imageData = img->imageDataOrigin = (char*)data; + + if( (((int)(size_t)data | step) & 7) == 0 && + cvAlign(img->width * pix_size, 8) == step ) + img->align = 8; + else + img->align = 4; + } + else if( CV_IS_MATND_HDR( arr )) + { + CvMatND* mat = (CvMatND*)arr; + int i; + int64 cur_step; + + if( step != CV_AUTOSTEP ) + CV_Error( CV_BadStep, + "For multidimensional array only CV_AUTOSTEP is allowed here" ); + + mat->data.ptr = (uchar*)data; + cur_step = CV_ELEM_SIZE(mat->type); + + for( i = mat->dims - 1; i >= 0; i-- ) + { + if( cur_step > INT_MAX ) + CV_Error( CV_StsOutOfRange, "The array is too big" ); + mat->dim[i].step = (int)cur_step; + cur_step *= mat->dim[i].size; + } + } + else + CV_Error( CV_StsBadArg, "unrecognized or unsupported array type" ); +} + + +// Deallocates array's data +CV_IMPL void +cvReleaseData( CvArr* arr ) +{ + if( CV_IS_MAT_HDR( arr ) || CV_IS_MATND_HDR( arr )) + { + CvMat* mat = (CvMat*)arr; + cvDecRefData( mat ); + } + else if( CV_IS_IMAGE_HDR( arr )) + { + IplImage* img = (IplImage*)arr; + + if( !CvIPL.deallocate ) + { + char* ptr = img->imageDataOrigin; + img->imageData = img->imageDataOrigin = 0; + cvFree( &ptr ); + } + else + { + CvIPL.deallocate( img, IPL_IMAGE_DATA ); + } + } + else + CV_Error( CV_StsBadArg, "unrecognized or unsupported array type" ); +} + + +// Retrieves essential information about image ROI or CvMat data +CV_IMPL void +cvGetRawData( const CvArr* arr, uchar** data, int* step, CvSize* roi_size ) +{ + if( CV_IS_MAT( arr )) + { + CvMat *mat = (CvMat*)arr; + + if( step ) + *step = mat->step; + + if( data ) + *data = mat->data.ptr; + + if( roi_size ) + *roi_size = cvGetMatSize( mat ); + } + else if( CV_IS_IMAGE( arr )) + { + IplImage* img = (IplImage*)arr; + + if( step ) + *step = img->widthStep; + + if( data ) + *data = cvPtr2D( img, 0, 0 ); + + if( roi_size ) + { + if( img->roi ) + { + *roi_size = cvSize( img->roi->width, img->roi->height ); + } + else + { + *roi_size = cvSize( img->width, img->height ); + } + } + } + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + + if( !CV_IS_MAT_CONT( mat->type )) + CV_Error( CV_StsBadArg, "Only continuous nD arrays are supported here" ); + + if( data ) + *data = mat->data.ptr; + + if( roi_size || step ) + { + int i, size1 = mat->dim[0].size, size2 = 1; + + if( mat->dims > 2 ) + for( i = 1; i < mat->dims; i++ ) + size1 *= mat->dim[i].size; + else + size2 = mat->dim[1].size; + + if( roi_size ) + { + roi_size->width = size2; + roi_size->height = size1; + } + + if( step ) + *step = mat->dim[0].step; + } + } + else + CV_Error( CV_StsBadArg, "unrecognized or unsupported array type" ); +} + + +CV_IMPL int +cvGetElemType( const CvArr* arr ) +{ + int type = -1; + if( CV_IS_MAT_HDR(arr) || CV_IS_MATND_HDR(arr) || CV_IS_SPARSE_MAT_HDR(arr)) + type = CV_MAT_TYPE( ((CvMat*)arr)->type ); + else if( CV_IS_IMAGE(arr)) + { + IplImage* img = (IplImage*)arr; + type = CV_MAKETYPE( IPL2CV_DEPTH(img->depth), img->nChannels ); + } + else + CV_Error( CV_StsBadArg, "unrecognized or unsupported array type" ); + + return type; +} + + +// Returns a number of array dimensions +CV_IMPL int +cvGetDims( const CvArr* arr, int* sizes ) +{ + int dims = -1; + if( CV_IS_MAT_HDR( arr )) + { + CvMat* mat = (CvMat*)arr; + + dims = 2; + if( sizes ) + { + sizes[0] = mat->rows; + sizes[1] = mat->cols; + } + } + else if( CV_IS_IMAGE( arr )) + { + IplImage* img = (IplImage*)arr; + dims = 2; + + if( sizes ) + { + sizes[0] = img->height; + sizes[1] = img->width; + } + } + else if( CV_IS_MATND_HDR( arr )) + { + CvMatND* mat = (CvMatND*)arr; + dims = mat->dims; + + if( sizes ) + { + int i; + for( i = 0; i < dims; i++ ) + sizes[i] = mat->dim[i].size; + } + } + else if( CV_IS_SPARSE_MAT_HDR( arr )) + { + CvSparseMat* mat = (CvSparseMat*)arr; + dims = mat->dims; + + if( sizes ) + memcpy( sizes, mat->size, dims*sizeof(sizes[0])); + } + else + CV_Error( CV_StsBadArg, "unrecognized or unsupported array type" ); + + return dims; +} + + +// Returns the size of particular array dimension +CV_IMPL int +cvGetDimSize( const CvArr* arr, int index ) +{ + int size = -1; + + if( CV_IS_MAT( arr )) + { + CvMat *mat = (CvMat*)arr; + + switch( index ) + { + case 0: + size = mat->rows; + break; + case 1: + size = mat->cols; + break; + default: + CV_Error( CV_StsOutOfRange, "bad dimension index" ); + } + } + else if( CV_IS_IMAGE( arr )) + { + IplImage* img = (IplImage*)arr; + + switch( index ) + { + case 0: + size = !img->roi ? img->height : img->roi->height; + break; + case 1: + size = !img->roi ? img->width : img->roi->width; + break; + default: + CV_Error( CV_StsOutOfRange, "bad dimension index" ); + } + } + else if( CV_IS_MATND_HDR( arr )) + { + CvMatND* mat = (CvMatND*)arr; + + if( (unsigned)index >= (unsigned)mat->dims ) + CV_Error( CV_StsOutOfRange, "bad dimension index" ); + + size = mat->dim[index].size; + } + else if( CV_IS_SPARSE_MAT_HDR( arr )) + { + CvSparseMat* mat = (CvSparseMat*)arr; + + if( (unsigned)index >= (unsigned)mat->dims ) + CV_Error( CV_StsOutOfRange, "bad dimension index" ); + + size = mat->size[index]; + } + else + CV_Error( CV_StsBadArg, "unrecognized or unsupported array type" ); + + return size; +} + + +// Returns the size of CvMat or IplImage +CV_IMPL CvSize +cvGetSize( const CvArr* arr ) +{ + CvSize size = { 0, 0 }; + + if( CV_IS_MAT_HDR_Z( arr )) + { + CvMat *mat = (CvMat*)arr; + + size.width = mat->cols; + size.height = mat->rows; + } + else if( CV_IS_IMAGE_HDR( arr )) + { + IplImage* img = (IplImage*)arr; + + if( img->roi ) + { + size.width = img->roi->width; + size.height = img->roi->height; + } + else + { + size.width = img->width; + size.height = img->height; + } + } + else + CV_Error( CV_StsBadArg, "Array should be CvMat or IplImage" ); + + return size; +} + + +// Selects sub-array (no data is copied) +CV_IMPL CvMat* +cvGetSubRect( const CvArr* arr, CvMat* submat, CvRect rect ) +{ + CvMat* res = 0; + CvMat stub, *mat = (CvMat*)arr; + + if( !CV_IS_MAT( mat )) + mat = cvGetMat( mat, &stub ); + + if( !submat ) + CV_Error( CV_StsNullPtr, "" ); + + if( (rect.x|rect.y|rect.width|rect.height) < 0 ) + CV_Error( CV_StsBadSize, "" ); + + if( rect.x + rect.width > mat->cols || + rect.y + rect.height > mat->rows ) + CV_Error( CV_StsBadSize, "" ); + + { + /* + int* refcount = mat->refcount; + + if( refcount ) + ++*refcount; + + cvDecRefData( submat ); + */ + submat->data.ptr = mat->data.ptr + (size_t)rect.y*mat->step + + rect.x*CV_ELEM_SIZE(mat->type); + submat->step = mat->step; + submat->type = (mat->type & (rect.width < mat->cols ? ~CV_MAT_CONT_FLAG : -1)) | + (rect.height <= 1 ? CV_MAT_CONT_FLAG : 0); + submat->rows = rect.height; + submat->cols = rect.width; + submat->refcount = 0; + res = submat; + } + + return res; +} + + +// Selects array's row span. +CV_IMPL CvMat* +cvGetRows( const CvArr* arr, CvMat* submat, + int start_row, int end_row, int delta_row ) +{ + CvMat* res = 0; + CvMat stub, *mat = (CvMat*)arr; + + if( !CV_IS_MAT( mat )) + mat = cvGetMat( mat, &stub ); + + if( !submat ) + CV_Error( CV_StsNullPtr, "" ); + + if( (unsigned)start_row >= (unsigned)mat->rows || + (unsigned)end_row > (unsigned)mat->rows || delta_row <= 0 ) + CV_Error( CV_StsOutOfRange, "" ); + + { + /* + int* refcount = mat->refcount; + + if( refcount ) + ++*refcount; + + cvDecRefData( submat ); + */ + if( delta_row == 1 ) + { + submat->rows = end_row - start_row; + submat->step = mat->step; + } + else + { + submat->rows = (end_row - start_row + delta_row - 1)/delta_row; + submat->step = mat->step * delta_row; + } + + submat->cols = mat->cols; + submat->step &= submat->rows > 1 ? -1 : 0; + submat->data.ptr = mat->data.ptr + (size_t)start_row*mat->step; + submat->type = (mat->type | (submat->rows == 1 ? CV_MAT_CONT_FLAG : 0)) & + (delta_row != 1 && submat->rows > 1 ? ~CV_MAT_CONT_FLAG : -1); + submat->refcount = 0; + submat->hdr_refcount = 0; + res = submat; + } + + return res; +} + + +// Selects array's column span. +CV_IMPL CvMat* +cvGetCols( const CvArr* arr, CvMat* submat, int start_col, int end_col ) +{ + CvMat* res = 0; + CvMat stub, *mat = (CvMat*)arr; + int cols; + + if( !CV_IS_MAT( mat )) + mat = cvGetMat( mat, &stub ); + + if( !submat ) + CV_Error( CV_StsNullPtr, "" ); + + cols = mat->cols; + if( (unsigned)start_col >= (unsigned)cols || + (unsigned)end_col > (unsigned)cols ) + CV_Error( CV_StsOutOfRange, "" ); + + { + /* + int* refcount = mat->refcount; + + if( refcount ) + ++*refcount; + + cvDecRefData( submat ); + */ + submat->rows = mat->rows; + submat->cols = end_col - start_col; + submat->step = mat->step; + submat->data.ptr = mat->data.ptr + (size_t)start_col*CV_ELEM_SIZE(mat->type); + submat->type = mat->type & (submat->rows > 1 && submat->cols < cols ? ~CV_MAT_CONT_FLAG : -1); + submat->refcount = 0; + submat->hdr_refcount = 0; + res = submat; + } + + return res; +} + + +// Selects array diagonal +CV_IMPL CvMat* +cvGetDiag( const CvArr* arr, CvMat* submat, int diag ) +{ + CvMat* res = 0; + CvMat stub, *mat = (CvMat*)arr; + int len, pix_size; + + if( !CV_IS_MAT( mat )) + mat = cvGetMat( mat, &stub ); + + if( !submat ) + CV_Error( CV_StsNullPtr, "" ); + + pix_size = CV_ELEM_SIZE(mat->type); + + /*{ + int* refcount = mat->refcount; + + if( refcount ) + ++*refcount; + + cvDecRefData( submat ); + }*/ + + if( diag >= 0 ) + { + len = mat->cols - diag; + + if( len <= 0 ) + CV_Error( CV_StsOutOfRange, "" ); + + len = CV_IMIN( len, mat->rows ); + submat->data.ptr = mat->data.ptr + diag*pix_size; + } + else + { + len = mat->rows + diag; + + if( len <= 0 ) + CV_Error( CV_StsOutOfRange, "" ); + + len = CV_IMIN( len, mat->cols ); + submat->data.ptr = mat->data.ptr - diag*mat->step; + } + + submat->rows = len; + submat->cols = 1; + submat->step = mat->step + (submat->rows > 1 ? pix_size : 0); + submat->type = mat->type; + if( submat->rows > 1 ) + submat->type &= ~CV_MAT_CONT_FLAG; + else + submat->type |= CV_MAT_CONT_FLAG; + submat->refcount = 0; + submat->hdr_refcount = 0; + res = submat; + + return res; +} + + +/****************************************************************************************\ +* Operations on CvScalar and accessing array elements * +\****************************************************************************************/ + +// Converts CvScalar to specified type +CV_IMPL void +cvScalarToRawData( const CvScalar* scalar, void* data, int type, int extend_to_12 ) +{ + type = CV_MAT_TYPE(type); + int cn = CV_MAT_CN( type ); + int depth = type & CV_MAT_DEPTH_MASK; + + assert( scalar && data ); + if( (unsigned)(cn - 1) >= 4 ) + CV_Error( CV_StsOutOfRange, "The number of channels must be 1, 2, 3 or 4" ); + + switch( depth ) + { + case CV_8UC1: + while( cn-- ) + { + int t = cvRound( scalar->val[cn] ); + ((uchar*)data)[cn] = CV_CAST_8U(t); + } + break; + case CV_8SC1: + while( cn-- ) + { + int t = cvRound( scalar->val[cn] ); + ((char*)data)[cn] = CV_CAST_8S(t); + } + break; + case CV_16UC1: + while( cn-- ) + { + int t = cvRound( scalar->val[cn] ); + ((ushort*)data)[cn] = CV_CAST_16U(t); + } + break; + case CV_16SC1: + while( cn-- ) + { + int t = cvRound( scalar->val[cn] ); + ((short*)data)[cn] = CV_CAST_16S(t); + } + break; + case CV_32SC1: + while( cn-- ) + ((int*)data)[cn] = cvRound( scalar->val[cn] ); + break; + case CV_32FC1: + while( cn-- ) + ((float*)data)[cn] = (float)(scalar->val[cn]); + break; + case CV_64FC1: + while( cn-- ) + ((double*)data)[cn] = (double)(scalar->val[cn]); + break; + default: + assert(0); + CV_Error( CV_BadDepth, "" ); + } + + if( extend_to_12 ) + { + int pix_size = CV_ELEM_SIZE(type); + int offset = CV_ELEM_SIZE1(depth)*12; + + do + { + offset -= pix_size; + memcpy((char*)data + offset, data, pix_size); + } + while( offset > pix_size ); + } +} + + +// Converts data of specified type to CvScalar +CV_IMPL void +cvRawDataToScalar( const void* data, int flags, CvScalar* scalar ) +{ + int cn = CV_MAT_CN( flags ); + + assert( scalar && data ); + + if( (unsigned)(cn - 1) >= 4 ) + CV_Error( CV_StsOutOfRange, "The number of channels must be 1, 2, 3 or 4" ); + + memset( scalar->val, 0, sizeof(scalar->val)); + + switch( CV_MAT_DEPTH( flags )) + { + case CV_8U: + while( cn-- ) + scalar->val[cn] = CV_8TO32F(((uchar*)data)[cn]); + break; + case CV_8S: + while( cn-- ) + scalar->val[cn] = CV_8TO32F(((char*)data)[cn]); + break; + case CV_16U: + while( cn-- ) + scalar->val[cn] = ((ushort*)data)[cn]; + break; + case CV_16S: + while( cn-- ) + scalar->val[cn] = ((short*)data)[cn]; + break; + case CV_32S: + while( cn-- ) + scalar->val[cn] = ((int*)data)[cn]; + break; + case CV_32F: + while( cn-- ) + scalar->val[cn] = ((float*)data)[cn]; + break; + case CV_64F: + while( cn-- ) + scalar->val[cn] = ((double*)data)[cn]; + break; + default: + assert(0); + CV_Error( CV_BadDepth, "" ); + } +} + + +static double icvGetReal( const void* data, int type ) +{ + switch( type ) + { + case CV_8U: + return *(uchar*)data; + case CV_8S: + return *(char*)data; + case CV_16U: + return *(ushort*)data; + case CV_16S: + return *(short*)data; + case CV_32S: + return *(int*)data; + case CV_32F: + return *(float*)data; + case CV_64F: + return *(double*)data; + } + + return 0; +} + + +static void icvSetReal( double value, const void* data, int type ) +{ + if( type < CV_32F ) + { + int ivalue = cvRound(value); + switch( type ) + { + case CV_8U: + *(uchar*)data = CV_CAST_8U(ivalue); + break; + case CV_8S: + *(char*)data = CV_CAST_8S(ivalue); + break; + case CV_16U: + *(ushort*)data = CV_CAST_16U(ivalue); + break; + case CV_16S: + *(short*)data = CV_CAST_16S(ivalue); + break; + case CV_32S: + *(int*)data = CV_CAST_32S(ivalue); + break; + } + } + else + { + switch( type ) + { + case CV_32F: + *(float*)data = (float)value; + break; + case CV_64F: + *(double*)data = value; + break; + } + } +} + + +// Returns pointer to specified element of array (linear index is used) +CV_IMPL uchar* +cvPtr1D( const CvArr* arr, int idx, int* _type ) +{ + uchar* ptr = 0; + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + + int type = CV_MAT_TYPE(mat->type); + int pix_size = CV_ELEM_SIZE(type); + + if( _type ) + *_type = type; + + // the first part is mul-free sufficient check + // that the index is within the matrix + if( (unsigned)idx >= (unsigned)(mat->rows + mat->cols - 1) && + (unsigned)idx >= (unsigned)(mat->rows*mat->cols)) + CV_Error( CV_StsOutOfRange, "index is out of range" ); + + if( CV_IS_MAT_CONT(mat->type)) + { + ptr = mat->data.ptr + (size_t)idx*pix_size; + } + else + { + int row, col; + if( mat->cols == 1 ) + row = idx, col = 0; + else + row = idx/mat->cols, col = idx - row*mat->cols; + ptr = mat->data.ptr + (size_t)row*mat->step + col*pix_size; + } + } + else if( CV_IS_IMAGE_HDR( arr )) + { + IplImage* img = (IplImage*)arr; + int width = !img->roi ? img->width : img->roi->width; + int y = idx/width, x = idx - y*width; + + ptr = cvPtr2D( arr, y, x, _type ); + } + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + int j, type = CV_MAT_TYPE(mat->type); + size_t size = mat->dim[0].size; + + if( _type ) + *_type = type; + + for( j = 1; j < mat->dims; j++ ) + size *= mat->dim[j].size; + + if((unsigned)idx >= (unsigned)size ) + CV_Error( CV_StsOutOfRange, "index is out of range" ); + + if( CV_IS_MAT_CONT(mat->type)) + { + int pix_size = CV_ELEM_SIZE(type); + ptr = mat->data.ptr + (size_t)idx*pix_size; + } + else + { + ptr = mat->data.ptr; + for( j = mat->dims - 1; j >= 0; j-- ) + { + int sz = mat->dim[j].size; + if( sz ) + { + int t = idx/sz; + ptr += (idx - t*sz)*mat->dim[j].step; + idx = t; + } + } + } + } + else if( CV_IS_SPARSE_MAT( arr )) + { + CvSparseMat* m = (CvSparseMat*)arr; + if( m->dims == 1 ) + ptr = icvGetNodePtr( (CvSparseMat*)arr, &idx, _type, 1, 0 ); + else + { + int i, n = m->dims; + CV_DbgAssert( n <= CV_MAX_DIM_HEAP ); + int _idx[CV_MAX_DIM_HEAP]; + + for( i = n - 1; i >= 0; i-- ) + { + int t = idx / m->size[i]; + _idx[i] = idx - t*m->size[i]; + idx = t; + } + ptr = icvGetNodePtr( (CvSparseMat*)arr, _idx, _type, 1, 0 ); + } + } + else + { + CV_Error( CV_StsBadArg, "unrecognized or unsupported array type" ); + } + + return ptr; +} + + +// Returns pointer to specified element of 2d array +CV_IMPL uchar* +cvPtr2D( const CvArr* arr, int y, int x, int* _type ) +{ + uchar* ptr = 0; + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + int type; + + if( (unsigned)y >= (unsigned)(mat->rows) || + (unsigned)x >= (unsigned)(mat->cols) ) + CV_Error( CV_StsOutOfRange, "index is out of range" ); + + type = CV_MAT_TYPE(mat->type); + if( _type ) + *_type = type; + + ptr = mat->data.ptr + (size_t)y*mat->step + x*CV_ELEM_SIZE(type); + } + else if( CV_IS_IMAGE( arr )) + { + IplImage* img = (IplImage*)arr; + int pix_size = (img->depth & 255) >> 3; + int width, height; + ptr = (uchar*)img->imageData; + + if( img->dataOrder == 0 ) + pix_size *= img->nChannels; + + if( img->roi ) + { + width = img->roi->width; + height = img->roi->height; + + ptr += img->roi->yOffset*img->widthStep + + img->roi->xOffset*pix_size; + + if( img->dataOrder ) + { + int coi = img->roi->coi; + if( !coi ) + CV_Error( CV_BadCOI, + "COI must be non-null in case of planar images" ); + ptr += (coi - 1)*img->imageSize; + } + } + else + { + width = img->width; + height = img->height; + } + + if( (unsigned)y >= (unsigned)height || + (unsigned)x >= (unsigned)width ) + CV_Error( CV_StsOutOfRange, "index is out of range" ); + + ptr += y*img->widthStep + x*pix_size; + + if( _type ) + { + int type = IPL2CV_DEPTH(img->depth); + if( type < 0 || (unsigned)(img->nChannels - 1) > 3 ) + CV_Error( CV_StsUnsupportedFormat, "" ); + + *_type = CV_MAKETYPE( type, img->nChannels ); + } + } + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + + if( mat->dims != 2 || + (unsigned)y >= (unsigned)(mat->dim[0].size) || + (unsigned)x >= (unsigned)(mat->dim[1].size) ) + CV_Error( CV_StsOutOfRange, "index is out of range" ); + + ptr = mat->data.ptr + (size_t)y*mat->dim[0].step + x*mat->dim[1].step; + if( _type ) + *_type = CV_MAT_TYPE(mat->type); + } + else if( CV_IS_SPARSE_MAT( arr )) + { + int idx[] = { y, x }; + ptr = icvGetNodePtr( (CvSparseMat*)arr, idx, _type, 1, 0 ); + } + else + { + CV_Error( CV_StsBadArg, "unrecognized or unsupported array type" ); + } + + return ptr; +} + + +// Returns pointer to specified element of 3d array +CV_IMPL uchar* +cvPtr3D( const CvArr* arr, int z, int y, int x, int* _type ) +{ + uchar* ptr = 0; + if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + + if( mat->dims != 3 || + (unsigned)z >= (unsigned)(mat->dim[0].size) || + (unsigned)y >= (unsigned)(mat->dim[1].size) || + (unsigned)x >= (unsigned)(mat->dim[2].size) ) + CV_Error( CV_StsOutOfRange, "index is out of range" ); + + ptr = mat->data.ptr + (size_t)z*mat->dim[0].step + + (size_t)y*mat->dim[1].step + x*mat->dim[2].step; + + if( _type ) + *_type = CV_MAT_TYPE(mat->type); + } + else if( CV_IS_SPARSE_MAT( arr )) + { + int idx[] = { z, y, x }; + ptr = icvGetNodePtr( (CvSparseMat*)arr, idx, _type, 1, 0 ); + } + else + { + CV_Error( CV_StsBadArg, "unrecognized or unsupported array type" ); + } + + return ptr; +} + + +// Returns pointer to specified element of n-d array +CV_IMPL uchar* +cvPtrND( const CvArr* arr, const int* idx, int* _type, + int create_node, unsigned* precalc_hashval ) +{ + uchar* ptr = 0; + if( !idx ) + CV_Error( CV_StsNullPtr, "NULL pointer to indices" ); + + if( CV_IS_SPARSE_MAT( arr )) + ptr = icvGetNodePtr( (CvSparseMat*)arr, idx, + _type, create_node, precalc_hashval ); + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + int i; + ptr = mat->data.ptr; + + for( i = 0; i < mat->dims; i++ ) + { + if( (unsigned)idx[i] >= (unsigned)(mat->dim[i].size) ) + CV_Error( CV_StsOutOfRange, "index is out of range" ); + ptr += (size_t)idx[i]*mat->dim[i].step; + } + + if( _type ) + *_type = CV_MAT_TYPE(mat->type); + } + else if( CV_IS_MAT_HDR(arr) || CV_IS_IMAGE_HDR(arr) ) + ptr = cvPtr2D( arr, idx[0], idx[1], _type ); + else + CV_Error( CV_StsBadArg, "unrecognized or unsupported array type" ); + + return ptr; +} + + +// Returns specifed element of n-D array given linear index +CV_IMPL CvScalar +cvGet1D( const CvArr* arr, int idx ) +{ + CvScalar scalar = {{0,0,0,0}}; + int type = 0; + uchar* ptr; + + if( CV_IS_MAT( arr ) && CV_IS_MAT_CONT( ((CvMat*)arr)->type )) + { + CvMat* mat = (CvMat*)arr; + + type = CV_MAT_TYPE(mat->type); + int pix_size = CV_ELEM_SIZE(type); + + // the first part is mul-free sufficient check + // that the index is within the matrix + if( (unsigned)idx >= (unsigned)(mat->rows + mat->cols - 1) && + (unsigned)idx >= (unsigned)(mat->rows*mat->cols)) + CV_Error( CV_StsOutOfRange, "index is out of range" ); + + ptr = mat->data.ptr + (size_t)idx*pix_size; + } + else if( !CV_IS_SPARSE_MAT( arr ) || ((CvSparseMat*)arr)->dims > 1 ) + ptr = cvPtr1D( arr, idx, &type ); + else + ptr = icvGetNodePtr( (CvSparseMat*)arr, &idx, &type, 0, 0 ); + + if( ptr ) + cvRawDataToScalar( ptr, type, &scalar ); + + return scalar; +} + + +// Returns specifed element of 2D array +CV_IMPL CvScalar +cvGet2D( const CvArr* arr, int y, int x ) +{ + CvScalar scalar = {{0,0,0,0}}; + int type = 0; + uchar* ptr; + + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + + if( (unsigned)y >= (unsigned)(mat->rows) || + (unsigned)x >= (unsigned)(mat->cols) ) + CV_Error( CV_StsOutOfRange, "index is out of range" ); + + type = CV_MAT_TYPE(mat->type); + ptr = mat->data.ptr + (size_t)y*mat->step + x*CV_ELEM_SIZE(type); + } + else if( !CV_IS_SPARSE_MAT( arr )) + ptr = cvPtr2D( arr, y, x, &type ); + else + { + int idx[] = { y, x }; + ptr = icvGetNodePtr( (CvSparseMat*)arr, idx, &type, 0, 0 ); + } + + if( ptr ) + cvRawDataToScalar( ptr, type, &scalar ); + + return scalar; +} + + +// Returns specifed element of 3D array +CV_IMPL CvScalar +cvGet3D( const CvArr* arr, int z, int y, int x ) +{ + CvScalar scalar = {{0,0,0,0}}; + int type = 0; + uchar* ptr; + + if( !CV_IS_SPARSE_MAT( arr )) + ptr = cvPtr3D( arr, z, y, x, &type ); + else + { + int idx[] = { z, y, x }; + ptr = icvGetNodePtr( (CvSparseMat*)arr, idx, &type, 0, 0 ); + } + + if( ptr ) + cvRawDataToScalar( ptr, type, &scalar ); + return scalar; +} + + +// Returns specifed element of nD array +CV_IMPL CvScalar +cvGetND( const CvArr* arr, const int* idx ) +{ + CvScalar scalar = {{0,0,0,0}}; + int type = 0; + uchar* ptr; + + if( !CV_IS_SPARSE_MAT( arr )) + ptr = cvPtrND( arr, idx, &type ); + else + ptr = icvGetNodePtr( (CvSparseMat*)arr, idx, &type, 0, 0 ); + + if( ptr ) + cvRawDataToScalar( ptr, type, &scalar ); + + return scalar; +} + + +// Returns specifed element of n-D array given linear index +CV_IMPL double +cvGetReal1D( const CvArr* arr, int idx ) +{ + double value = 0; + int type = 0; + uchar* ptr; + + if( CV_IS_MAT( arr ) && CV_IS_MAT_CONT( ((CvMat*)arr)->type )) + { + CvMat* mat = (CvMat*)arr; + + type = CV_MAT_TYPE(mat->type); + int pix_size = CV_ELEM_SIZE(type); + + // the first part is mul-free sufficient check + // that the index is within the matrix + if( (unsigned)idx >= (unsigned)(mat->rows + mat->cols - 1) && + (unsigned)idx >= (unsigned)(mat->rows*mat->cols)) + CV_Error( CV_StsOutOfRange, "index is out of range" ); + + ptr = mat->data.ptr + (size_t)idx*pix_size; + } + else if( !CV_IS_SPARSE_MAT( arr ) || ((CvSparseMat*)arr)->dims > 1 ) + ptr = cvPtr1D( arr, idx, &type ); + else + ptr = icvGetNodePtr( (CvSparseMat*)arr, &idx, &type, 0, 0 ); + + if( ptr ) + { + if( CV_MAT_CN( type ) > 1 ) + CV_Error( CV_BadNumChannels, "cvGetReal* support only single-channel arrays" ); + + value = icvGetReal( ptr, type ); + } + return value; +} + + +// Returns specifed element of 2D array +CV_IMPL double +cvGetReal2D( const CvArr* arr, int y, int x ) +{ + double value = 0; + int type = 0; + uchar* ptr; + + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + + if( (unsigned)y >= (unsigned)(mat->rows) || + (unsigned)x >= (unsigned)(mat->cols) ) + CV_Error( CV_StsOutOfRange, "index is out of range" ); + + type = CV_MAT_TYPE(mat->type); + ptr = mat->data.ptr + (size_t)y*mat->step + x*CV_ELEM_SIZE(type); + } + else if( !CV_IS_SPARSE_MAT( arr )) + ptr = cvPtr2D( arr, y, x, &type ); + else + { + int idx[] = { y, x }; + ptr = icvGetNodePtr( (CvSparseMat*)arr, idx, &type, 0, 0 ); + } + + if( ptr ) + { + if( CV_MAT_CN( type ) > 1 ) + CV_Error( CV_BadNumChannels, "cvGetReal* support only single-channel arrays" ); + + value = icvGetReal( ptr, type ); + } + + return value; +} + + +// Returns specifed element of 3D array +CV_IMPL double +cvGetReal3D( const CvArr* arr, int z, int y, int x ) +{ + double value = 0; + int type = 0; + uchar* ptr; + + if( !CV_IS_SPARSE_MAT( arr )) + ptr = cvPtr3D( arr, z, y, x, &type ); + else + { + int idx[] = { z, y, x }; + ptr = icvGetNodePtr( (CvSparseMat*)arr, idx, &type, 0, 0 ); + } + + if( ptr ) + { + if( CV_MAT_CN( type ) > 1 ) + CV_Error( CV_BadNumChannels, "cvGetReal* support only single-channel arrays" ); + + value = icvGetReal( ptr, type ); + } + + return value; +} + + +// Returns specifed element of nD array +CV_IMPL double +cvGetRealND( const CvArr* arr, const int* idx ) +{ + double value = 0; + int type = 0; + uchar* ptr; + + if( !CV_IS_SPARSE_MAT( arr )) + ptr = cvPtrND( arr, idx, &type ); + else + ptr = icvGetNodePtr( (CvSparseMat*)arr, idx, &type, 0, 0 ); + + if( ptr ) + { + if( CV_MAT_CN( type ) > 1 ) + CV_Error( CV_BadNumChannels, "cvGetReal* support only single-channel arrays" ); + + value = icvGetReal( ptr, type ); + } + + return value; +} + + +// Assigns new value to specifed element of nD array given linear index +CV_IMPL void +cvSet1D( CvArr* arr, int idx, CvScalar scalar ) +{ + int type = 0; + uchar* ptr; + + if( CV_IS_MAT( arr ) && CV_IS_MAT_CONT( ((CvMat*)arr)->type )) + { + CvMat* mat = (CvMat*)arr; + + type = CV_MAT_TYPE(mat->type); + int pix_size = CV_ELEM_SIZE(type); + + // the first part is mul-free sufficient check + // that the index is within the matrix + if( (unsigned)idx >= (unsigned)(mat->rows + mat->cols - 1) && + (unsigned)idx >= (unsigned)(mat->rows*mat->cols)) + CV_Error( CV_StsOutOfRange, "index is out of range" ); + + ptr = mat->data.ptr + (size_t)idx*pix_size; + } + else if( !CV_IS_SPARSE_MAT( arr ) || ((CvSparseMat*)arr)->dims > 1 ) + ptr = cvPtr1D( arr, idx, &type ); + else + ptr = icvGetNodePtr( (CvSparseMat*)arr, &idx, &type, -1, 0 ); + + cvScalarToRawData( &scalar, ptr, type ); +} + + +// Assigns new value to specifed element of 2D array +CV_IMPL void +cvSet2D( CvArr* arr, int y, int x, CvScalar scalar ) +{ + int type = 0; + uchar* ptr; + + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + + if( (unsigned)y >= (unsigned)(mat->rows) || + (unsigned)x >= (unsigned)(mat->cols) ) + CV_Error( CV_StsOutOfRange, "index is out of range" ); + + type = CV_MAT_TYPE(mat->type); + ptr = mat->data.ptr + (size_t)y*mat->step + x*CV_ELEM_SIZE(type); + } + else if( !CV_IS_SPARSE_MAT( arr )) + ptr = cvPtr2D( arr, y, x, &type ); + else + { + int idx[] = { y, x }; + ptr = icvGetNodePtr( (CvSparseMat*)arr, idx, &type, -1, 0 ); + } + cvScalarToRawData( &scalar, ptr, type ); +} + + +// Assigns new value to specifed element of 3D array +CV_IMPL void +cvSet3D( CvArr* arr, int z, int y, int x, CvScalar scalar ) +{ + int type = 0; + uchar* ptr; + + if( !CV_IS_SPARSE_MAT( arr )) + ptr = cvPtr3D( arr, z, y, x, &type ); + else + { + int idx[] = { z, y, x }; + ptr = icvGetNodePtr( (CvSparseMat*)arr, idx, &type, -1, 0 ); + } + cvScalarToRawData( &scalar, ptr, type ); +} + + +// Assigns new value to specifed element of nD array +CV_IMPL void +cvSetND( CvArr* arr, const int* idx, CvScalar scalar ) +{ + int type = 0; + uchar* ptr; + + if( !CV_IS_SPARSE_MAT( arr )) + ptr = cvPtrND( arr, idx, &type ); + else + ptr = icvGetNodePtr( (CvSparseMat*)arr, idx, &type, -1, 0 ); + cvScalarToRawData( &scalar, ptr, type ); +} + + +CV_IMPL void +cvSetReal1D( CvArr* arr, int idx, double value ) +{ + int type = 0; + uchar* ptr; + + if( CV_IS_MAT( arr ) && CV_IS_MAT_CONT( ((CvMat*)arr)->type )) + { + CvMat* mat = (CvMat*)arr; + + type = CV_MAT_TYPE(mat->type); + int pix_size = CV_ELEM_SIZE(type); + + // the first part is mul-free sufficient check + // that the index is within the matrix + if( (unsigned)idx >= (unsigned)(mat->rows + mat->cols - 1) && + (unsigned)idx >= (unsigned)(mat->rows*mat->cols)) + CV_Error( CV_StsOutOfRange, "index is out of range" ); + + ptr = mat->data.ptr + (size_t)idx*pix_size; + } + else if( !CV_IS_SPARSE_MAT( arr ) || ((CvSparseMat*)arr)->dims > 1 ) + ptr = cvPtr1D( arr, idx, &type ); + else + ptr = icvGetNodePtr( (CvSparseMat*)arr, &idx, &type, -1, 0 ); + + if( CV_MAT_CN( type ) > 1 ) + CV_Error( CV_BadNumChannels, "cvSetReal* support only single-channel arrays" ); + + if( ptr ) + icvSetReal( value, ptr, type ); +} + + +CV_IMPL void +cvSetReal2D( CvArr* arr, int y, int x, double value ) +{ + int type = 0; + uchar* ptr; + + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + + if( (unsigned)y >= (unsigned)(mat->rows) || + (unsigned)x >= (unsigned)(mat->cols) ) + CV_Error( CV_StsOutOfRange, "index is out of range" ); + + type = CV_MAT_TYPE(mat->type); + ptr = mat->data.ptr + (size_t)y*mat->step + x*CV_ELEM_SIZE(type); + } + else if( !CV_IS_SPARSE_MAT( arr )) + { + ptr = cvPtr2D( arr, y, x, &type ); + } + else + { + int idx[] = { y, x }; + ptr = icvGetNodePtr( (CvSparseMat*)arr, idx, &type, -1, 0 ); + } + if( CV_MAT_CN( type ) > 1 ) + CV_Error( CV_BadNumChannels, "cvSetReal* support only single-channel arrays" ); + + if( ptr ) + icvSetReal( value, ptr, type ); +} + + +CV_IMPL void +cvSetReal3D( CvArr* arr, int z, int y, int x, double value ) +{ + int type = 0; + uchar* ptr; + + if( !CV_IS_SPARSE_MAT( arr )) + ptr = cvPtr3D( arr, z, y, x, &type ); + else + { + int idx[] = { z, y, x }; + ptr = icvGetNodePtr( (CvSparseMat*)arr, idx, &type, -1, 0 ); + } + if( CV_MAT_CN( type ) > 1 ) + CV_Error( CV_BadNumChannels, "cvSetReal* support only single-channel arrays" ); + + if( ptr ) + icvSetReal( value, ptr, type ); +} + + +CV_IMPL void +cvSetRealND( CvArr* arr, const int* idx, double value ) +{ + int type = 0; + uchar* ptr; + + if( !CV_IS_SPARSE_MAT( arr )) + ptr = cvPtrND( arr, idx, &type ); + else + ptr = icvGetNodePtr( (CvSparseMat*)arr, idx, &type, -1, 0 ); + + if( CV_MAT_CN( type ) > 1 ) + CV_Error( CV_BadNumChannels, "cvSetReal* support only single-channel arrays" ); + + if( ptr ) + icvSetReal( value, ptr, type ); +} + + +CV_IMPL void +cvClearND( CvArr* arr, const int* idx ) +{ + if( !CV_IS_SPARSE_MAT( arr )) + { + int type; + uchar* ptr; + ptr = cvPtrND( arr, idx, &type ); + if( ptr ) + memset( ptr, 0, CV_ELEM_SIZE(type) ); + } + else + icvDeleteNode( (CvSparseMat*)arr, idx, 0 ); +} + + +/****************************************************************************************\ +* Conversion to CvMat or IplImage * +\****************************************************************************************/ + +// convert array (CvMat or IplImage) to CvMat +CV_IMPL CvMat* +cvGetMat( const CvArr* array, CvMat* mat, + int* pCOI, int allowND ) +{ + CvMat* result = 0; + CvMat* src = (CvMat*)array; + int coi = 0; + + if( !mat || !src ) + CV_Error( CV_StsNullPtr, "NULL array pointer is passed" ); + + if( CV_IS_MAT_HDR(src)) + { + if( !src->data.ptr ) + CV_Error( CV_StsNullPtr, "The matrix has NULL data pointer" ); + + result = (CvMat*)src; + } + else if( CV_IS_IMAGE_HDR(src) ) + { + const IplImage* img = (const IplImage*)src; + int depth, order; + + if( img->imageData == 0 ) + CV_Error( CV_StsNullPtr, "The image has NULL data pointer" ); + + depth = IPL2CV_DEPTH( img->depth ); + if( depth < 0 ) + CV_Error( CV_BadDepth, "" ); + + order = img->dataOrder & (img->nChannels > 1 ? -1 : 0); + + if( img->roi ) + { + if( order == IPL_DATA_ORDER_PLANE ) + { + int type = depth; + + if( img->roi->coi == 0 ) + CV_Error( CV_StsBadFlag, + "Images with planar data layout should be used with COI selected" ); + + cvInitMatHeader( mat, img->roi->height, + img->roi->width, type, + img->imageData + (img->roi->coi-1)*img->imageSize + + img->roi->yOffset*img->widthStep + + img->roi->xOffset*CV_ELEM_SIZE(type), + img->widthStep ); + } + else /* pixel order */ + { + int type = CV_MAKETYPE( depth, img->nChannels ); + coi = img->roi->coi; + + if( img->nChannels > CV_CN_MAX ) + CV_Error( CV_BadNumChannels, + "The image is interleaved and has over CV_CN_MAX channels" ); + + cvInitMatHeader( mat, img->roi->height, img->roi->width, + type, img->imageData + + img->roi->yOffset*img->widthStep + + img->roi->xOffset*CV_ELEM_SIZE(type), + img->widthStep ); + } + } + else + { + int type = CV_MAKETYPE( depth, img->nChannels ); + + if( order != IPL_DATA_ORDER_PIXEL ) + CV_Error( CV_StsBadFlag, "Pixel order should be used with coi == 0" ); + + cvInitMatHeader( mat, img->height, img->width, type, + img->imageData, img->widthStep ); + } + + result = mat; + } + else if( allowND && CV_IS_MATND_HDR(src) ) + { + CvMatND* matnd = (CvMatND*)src; + int i; + int size1 = matnd->dim[0].size, size2 = 1; + + if( !src->data.ptr ) + CV_Error( CV_StsNullPtr, "Input array has NULL data pointer" ); + + if( !CV_IS_MAT_CONT( matnd->type )) + CV_Error( CV_StsBadArg, "Only continuous nD arrays are supported here" ); + + if( matnd->dims > 2 ) + for( i = 1; i < matnd->dims; i++ ) + size2 *= matnd->dim[i].size; + else + size2 = matnd->dims == 1 ? 1 : matnd->dim[1].size; + + mat->refcount = 0; + mat->hdr_refcount = 0; + mat->data.ptr = matnd->data.ptr; + mat->rows = size1; + mat->cols = size2; + mat->type = CV_MAT_TYPE(matnd->type) | CV_MAT_MAGIC_VAL | CV_MAT_CONT_FLAG; + mat->step = size2*CV_ELEM_SIZE(matnd->type); + mat->step &= size1 > 1 ? -1 : 0; + + icvCheckHuge( mat ); + result = mat; + } + else + CV_Error( CV_StsBadFlag, "Unrecognized or unsupported array type" ); + + if( pCOI ) + *pCOI = coi; + + return result; +} + + +CV_IMPL CvArr* +cvReshapeMatND( const CvArr* arr, + int sizeof_header, CvArr* _header, + int new_cn, int new_dims, int* new_sizes ) +{ + CvArr* result = 0; + int dims, coi = 0; + + if( !arr || !_header ) + CV_Error( CV_StsNullPtr, "NULL pointer to array or destination header" ); + + if( new_cn == 0 && new_dims == 0 ) + CV_Error( CV_StsBadArg, "None of array parameters is changed: dummy call?" ); + + dims = cvGetDims( arr ); + + if( new_dims == 0 ) + { + new_sizes = 0; + new_dims = dims; + } + else if( new_dims == 1 ) + { + new_sizes = 0; + } + else + { + if( new_dims <= 0 || new_dims > CV_MAX_DIM ) + CV_Error( CV_StsOutOfRange, "Non-positive or too large number of dimensions" ); + if( !new_sizes ) + CV_Error( CV_StsNullPtr, "New dimension sizes are not specified" ); + } + + if( new_dims <= 2 ) + { + CvMat* mat = (CvMat*)arr; + CvMat header; + int* refcount = 0; + int hdr_refcount = 0; + int total_width, new_rows, cn; + + if( sizeof_header != sizeof(CvMat) && sizeof_header != sizeof(CvMatND) ) + CV_Error( CV_StsBadArg, "The output header should be CvMat or CvMatND" ); + + if( mat == (CvMat*)_header ) + { + refcount = mat->refcount; + hdr_refcount = mat->hdr_refcount; + } + + if( !CV_IS_MAT( mat )) + mat = cvGetMat( mat, &header, &coi, 1 ); + + cn = CV_MAT_CN( mat->type ); + total_width = mat->cols * cn; + + if( new_cn == 0 ) + new_cn = cn; + + if( new_sizes ) + new_rows = new_sizes[0]; + else if( new_dims == 1 ) + new_rows = total_width*mat->rows/new_cn; + else + { + new_rows = mat->rows; + if( new_cn > total_width ) + new_rows = mat->rows * total_width / new_cn; + } + + if( new_rows != mat->rows ) + { + int total_size = total_width * mat->rows; + + if( !CV_IS_MAT_CONT( mat->type )) + CV_Error( CV_BadStep, + "The matrix is not continuous so the number of rows can not be changed" ); + + total_width = total_size / new_rows; + + if( total_width * new_rows != total_size ) + CV_Error( CV_StsBadArg, "The total number of matrix elements " + "is not divisible by the new number of rows" ); + } + + header.rows = new_rows; + header.cols = total_width / new_cn; + + if( header.cols * new_cn != total_width || + (new_sizes && header.cols != new_sizes[1]) ) + CV_Error( CV_StsBadArg, "The total matrix width is not " + "divisible by the new number of columns" ); + + header.type = (mat->type & ~CV_MAT_TYPE_MASK) | CV_MAKETYPE(mat->type, new_cn); + header.step = header.cols * CV_ELEM_SIZE(mat->type); + header.step &= new_rows > 1 ? -1 : 0; + header.refcount = refcount; + header.hdr_refcount = hdr_refcount; + + if( sizeof_header == sizeof(CvMat) ) + *(CvMat*)_header = header; + else + { + CvMatND* __header = (CvMatND*)_header; + cvGetMatND(&header, __header, 0); + if( new_dims > 0 ) + __header->dims = new_dims; + } + } + else + { + CvMatND* header = (CvMatND*)_header; + + if( sizeof_header != sizeof(CvMatND)) + CV_Error( CV_StsBadSize, "The output header should be CvMatND" ); + + if( !new_sizes ) + { + if( !CV_IS_MATND( arr )) + CV_Error( CV_StsBadArg, "The input array must be CvMatND" ); + + { + CvMatND* mat = (CvMatND*)arr; + assert( new_cn > 0 ); + int last_dim_size = mat->dim[mat->dims-1].size*CV_MAT_CN(mat->type); + int new_size = last_dim_size/new_cn; + + if( new_size*new_cn != last_dim_size ) + CV_Error( CV_StsBadArg, + "The last dimension full size is not divisible by new number of channels"); + + if( mat != header ) + { + memcpy( header, mat, sizeof(*header)); + header->refcount = 0; + header->hdr_refcount = 0; + } + + header->dim[header->dims-1].size = new_size; + header->type = (header->type & ~CV_MAT_TYPE_MASK) | CV_MAKETYPE(header->type, new_cn); + } + } + else + { + CvMatND stub; + CvMatND* mat = (CvMatND*)arr; + int i, size1, size2; + int step; + + if( new_cn != 0 ) + CV_Error( CV_StsBadArg, + "Simultaneous change of shape and number of channels is not supported. " + "Do it by 2 separate calls" ); + + if( !CV_IS_MATND( mat )) + { + cvGetMatND( mat, &stub, &coi ); + mat = &stub; + } + + if( CV_IS_MAT_CONT( mat->type )) + CV_Error( CV_StsBadArg, "Non-continuous nD arrays are not supported" ); + + size1 = mat->dim[0].size; + for( i = 1; i < dims; i++ ) + size1 *= mat->dim[i].size; + + size2 = 1; + for( i = 0; i < new_dims; i++ ) + { + if( new_sizes[i] <= 0 ) + CV_Error( CV_StsBadSize, + "One of new dimension sizes is non-positive" ); + size2 *= new_sizes[i]; + } + + if( size1 != size2 ) + CV_Error( CV_StsBadSize, + "Number of elements in the original and reshaped array is different" ); + + if( header != mat ) + { + header->refcount = 0; + header->hdr_refcount = 0; + } + + header->dims = new_dims; + header->type = mat->type; + header->data.ptr = mat->data.ptr; + step = CV_ELEM_SIZE(header->type); + + for( i = new_dims - 1; i >= 0; i-- ) + { + header->dim[i].size = new_sizes[i]; + header->dim[i].step = step; + step *= new_sizes[i]; + } + } + } + + if( coi ) + CV_Error( CV_BadCOI, "COI is not supported by this operation" ); + + result = _header; + return result; +} + + +CV_IMPL CvMat* +cvReshape( const CvArr* array, CvMat* header, + int new_cn, int new_rows ) +{ + CvMat* result = 0; + CvMat *mat = (CvMat*)array; + int total_width, new_width; + + if( !header ) + CV_Error( CV_StsNullPtr, "" ); + + if( !CV_IS_MAT( mat )) + { + int coi = 0; + mat = cvGetMat( mat, header, &coi, 1 ); + if( coi ) + CV_Error( CV_BadCOI, "COI is not supported" ); + } + + if( new_cn == 0 ) + new_cn = CV_MAT_CN(mat->type); + else if( (unsigned)(new_cn - 1) > 3 ) + CV_Error( CV_BadNumChannels, "" ); + + if( mat != header ) + { + int hdr_refcount = header->hdr_refcount; + *header = *mat; + header->refcount = 0; + header->hdr_refcount = hdr_refcount; + } + + total_width = mat->cols * CV_MAT_CN( mat->type ); + + if( (new_cn > total_width || total_width % new_cn != 0) && new_rows == 0 ) + new_rows = mat->rows * total_width / new_cn; + + if( new_rows == 0 || new_rows == mat->rows ) + { + header->rows = mat->rows; + header->step = mat->step; + } + else + { + int total_size = total_width * mat->rows; + if( !CV_IS_MAT_CONT( mat->type )) + CV_Error( CV_BadStep, + "The matrix is not continuous, thus its number of rows can not be changed" ); + + if( (unsigned)new_rows > (unsigned)total_size ) + CV_Error( CV_StsOutOfRange, "Bad new number of rows" ); + + total_width = total_size / new_rows; + + if( total_width * new_rows != total_size ) + CV_Error( CV_StsBadArg, "The total number of matrix elements " + "is not divisible by the new number of rows" ); + + header->rows = new_rows; + header->step = total_width * CV_ELEM_SIZE1(mat->type); + } + + new_width = total_width / new_cn; + + if( new_width * new_cn != total_width ) + CV_Error( CV_BadNumChannels, + "The total width is not divisible by the new number of channels" ); + + header->cols = new_width; + header->type = (mat->type & ~CV_MAT_TYPE_MASK) | CV_MAKETYPE(mat->type, new_cn); + + result = header; + return result; +} + + +// convert array (CvMat or IplImage) to IplImage +CV_IMPL IplImage* +cvGetImage( const CvArr* array, IplImage* img ) +{ + IplImage* result = 0; + const IplImage* src = (const IplImage*)array; + int depth; + + if( !img ) + CV_Error( CV_StsNullPtr, "" ); + + if( !CV_IS_IMAGE_HDR(src) ) + { + const CvMat* mat = (const CvMat*)src; + + if( !CV_IS_MAT_HDR(mat)) + CV_Error( CV_StsBadFlag, "" ); + + if( mat->data.ptr == 0 ) + CV_Error( CV_StsNullPtr, "" ); + + depth = cvIplDepth(mat->type); + + cvInitImageHeader( img, cvSize(mat->cols, mat->rows), + depth, CV_MAT_CN(mat->type) ); + cvSetData( img, mat->data.ptr, mat->step ); + + result = img; + } + else + { + result = (IplImage*)src; + } + + return result; +} + + +/****************************************************************************************\ +* IplImage-specific functions * +\****************************************************************************************/ + +static IplROI* icvCreateROI( int coi, int xOffset, int yOffset, int width, int height ) +{ + IplROI *roi = 0; + if( !CvIPL.createROI ) + { + roi = (IplROI*)cvAlloc( sizeof(*roi)); + + roi->coi = coi; + roi->xOffset = xOffset; + roi->yOffset = yOffset; + roi->width = width; + roi->height = height; + } + else + { + roi = CvIPL.createROI( coi, xOffset, yOffset, width, height ); + } + + return roi; +} + +static void +icvGetColorModel( int nchannels, const char** colorModel, const char** channelSeq ) +{ + static const char* tab[][2] = + { + {"GRAY", "GRAY"}, + {"",""}, + {"RGB","BGR"}, + {"RGB","BGRA"} + }; + + nchannels--; + *colorModel = *channelSeq = ""; + + if( (unsigned)nchannels <= 3 ) + { + *colorModel = tab[nchannels][0]; + *channelSeq = tab[nchannels][1]; + } +} + + +// create IplImage header +CV_IMPL IplImage * +cvCreateImageHeader( CvSize size, int depth, int channels ) +{ + IplImage *img = 0; + + if( !CvIPL.createHeader ) + { + img = (IplImage *)cvAlloc( sizeof( *img )); + cvInitImageHeader( img, size, depth, channels, IPL_ORIGIN_TL, + CV_DEFAULT_IMAGE_ROW_ALIGN ); + } + else + { + const char *colorModel, *channelSeq; + + icvGetColorModel( channels, &colorModel, &channelSeq ); + + img = CvIPL.createHeader( channels, 0, depth, (char*)colorModel, (char*)channelSeq, + IPL_DATA_ORDER_PIXEL, IPL_ORIGIN_TL, + CV_DEFAULT_IMAGE_ROW_ALIGN, + size.width, size.height, 0, 0, 0, 0 ); + } + + return img; +} + + +// create IplImage header and allocate underlying data +CV_IMPL IplImage * +cvCreateImage( CvSize size, int depth, int channels ) +{ + IplImage *img = cvCreateImageHeader( size, depth, channels ); + assert( img ); + cvCreateData( img ); + + return img; +} + + +// initalize IplImage header, allocated by the user +CV_IMPL IplImage* +cvInitImageHeader( IplImage * image, CvSize size, int depth, + int channels, int origin, int align ) +{ + const char *colorModel, *channelSeq; + + if( !image ) + CV_Error( CV_HeaderIsNull, "null pointer to header" ); + + memset( image, 0, sizeof( *image )); + image->nSize = sizeof( *image ); + + icvGetColorModel( channels, &colorModel, &channelSeq ); + strncpy( image->colorModel, colorModel, 4 ); + strncpy( image->channelSeq, channelSeq, 4 ); + + if( size.width < 0 || size.height < 0 ) + CV_Error( CV_BadROISize, "Bad input roi" ); + + if( (depth != (int)IPL_DEPTH_1U && depth != (int)IPL_DEPTH_8U && + depth != (int)IPL_DEPTH_8S && depth != (int)IPL_DEPTH_16U && + depth != (int)IPL_DEPTH_16S && depth != (int)IPL_DEPTH_32S && + depth != (int)IPL_DEPTH_32F && depth != (int)IPL_DEPTH_64F) || + channels < 0 ) + CV_Error( CV_BadDepth, "Unsupported format" ); + if( origin != CV_ORIGIN_BL && origin != CV_ORIGIN_TL ) + CV_Error( CV_BadOrigin, "Bad input origin" ); + + if( align != 4 && align != 8 ) + CV_Error( CV_BadAlign, "Bad input align" ); + + image->width = size.width; + image->height = size.height; + + if( image->roi ) + { + image->roi->coi = 0; + image->roi->xOffset = image->roi->yOffset = 0; + image->roi->width = size.width; + image->roi->height = size.height; + } + + image->nChannels = MAX( channels, 1 ); + image->depth = depth; + image->align = align; + image->widthStep = (((image->width * image->nChannels * + (image->depth & ~IPL_DEPTH_SIGN) + 7)/8)+ align - 1) & (~(align - 1)); + image->origin = origin; + image->imageSize = image->widthStep * image->height; + + return image; +} + + +CV_IMPL void +cvReleaseImageHeader( IplImage** image ) +{ + if( !image ) + CV_Error( CV_StsNullPtr, "" ); + + if( *image ) + { + IplImage* img = *image; + *image = 0; + + if( !CvIPL.deallocate ) + { + cvFree( &img->roi ); + cvFree( &img ); + } + else + { + CvIPL.deallocate( img, IPL_IMAGE_HEADER | IPL_IMAGE_ROI ); + } + } +} + + +CV_IMPL void +cvReleaseImage( IplImage ** image ) +{ + if( !image ) + CV_Error( CV_StsNullPtr, "" ); + + if( *image ) + { + IplImage* img = *image; + *image = 0; + + cvReleaseData( img ); + cvReleaseImageHeader( &img ); + } +} + + +CV_IMPL void +cvSetImageROI( IplImage* image, CvRect rect ) +{ + if( !image ) + CV_Error( CV_HeaderIsNull, "" ); + + // allow zero ROI width or height + CV_Assert( rect.width >= 0 && rect.height >= 0 && + rect.x < image->width && rect.y < image->height && + rect.x + rect.width >= (int)(rect.width > 0) && + rect.y + rect.height >= (int)(rect.height > 0) ); + + rect.width += rect.x; + rect.height += rect.y; + + rect.x = std::max(rect.x, 0); + rect.y = std::max(rect.y, 0); + rect.width = std::min(rect.width, image->width); + rect.height = std::min(rect.height, image->height); + + rect.width -= rect.x; + rect.height -= rect.y; + + if( image->roi ) + { + image->roi->xOffset = rect.x; + image->roi->yOffset = rect.y; + image->roi->width = rect.width; + image->roi->height = rect.height; + } + else + image->roi = icvCreateROI( 0, rect.x, rect.y, rect.width, rect.height ); +} + + +CV_IMPL void +cvResetImageROI( IplImage* image ) +{ + if( !image ) + CV_Error( CV_HeaderIsNull, "" ); + + if( image->roi ) + { + if( !CvIPL.deallocate ) + { + cvFree( &image->roi ); + } + else + { + CvIPL.deallocate( image, IPL_IMAGE_ROI ); + image->roi = 0; + } + } +} + + +CV_IMPL CvRect +cvGetImageROI( const IplImage* img ) +{ + CvRect rect = { 0, 0, 0, 0 }; + if( !img ) + CV_Error( CV_StsNullPtr, "Null pointer to image" ); + + if( img->roi ) + rect = cvRect( img->roi->xOffset, img->roi->yOffset, + img->roi->width, img->roi->height ); + else + rect = cvRect( 0, 0, img->width, img->height ); + + return rect; +} + + +CV_IMPL void +cvSetImageCOI( IplImage* image, int coi ) +{ + if( !image ) + CV_Error( CV_HeaderIsNull, "" ); + + if( (unsigned)coi > (unsigned)(image->nChannels) ) + CV_Error( CV_BadCOI, "" ); + + if( image->roi || coi != 0 ) + { + if( image->roi ) + { + image->roi->coi = coi; + } + else + { + image->roi = icvCreateROI( coi, 0, 0, image->width, image->height ); + } + } +} + + +CV_IMPL int +cvGetImageCOI( const IplImage* image ) +{ + if( !image ) + CV_Error( CV_HeaderIsNull, "" ); + + return image->roi ? image->roi->coi : 0; +} + + +CV_IMPL IplImage* +cvCloneImage( const IplImage* src ) +{ + IplImage* dst = 0; + + if( !CV_IS_IMAGE_HDR( src )) + CV_Error( CV_StsBadArg, "Bad image header" ); + + if( !CvIPL.cloneImage ) + { + dst = (IplImage*)cvAlloc( sizeof(*dst)); + + memcpy( dst, src, sizeof(*src)); + dst->imageData = dst->imageDataOrigin = 0; + dst->roi = 0; + + if( src->roi ) + { + dst->roi = icvCreateROI( src->roi->coi, src->roi->xOffset, + src->roi->yOffset, src->roi->width, src->roi->height ); + } + + if( src->imageData ) + { + int size = src->imageSize; + cvCreateData( dst ); + memcpy( dst->imageData, src->imageData, size ); + } + } + else + dst = CvIPL.cloneImage( src ); + + return dst; +} + + +/****************************************************************************************\ +* Additional operations on CvTermCriteria * +\****************************************************************************************/ + +CV_IMPL CvTermCriteria +cvCheckTermCriteria( CvTermCriteria criteria, double default_eps, + int default_max_iters ) +{ + CvTermCriteria crit; + + crit.type = CV_TERMCRIT_ITER|CV_TERMCRIT_EPS; + crit.max_iter = default_max_iters; + crit.epsilon = (float)default_eps; + + if( (criteria.type & ~(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER)) != 0 ) + CV_Error( CV_StsBadArg, + "Unknown type of term criteria" ); + + if( (criteria.type & CV_TERMCRIT_ITER) != 0 ) + { + if( criteria.max_iter <= 0 ) + CV_Error( CV_StsBadArg, + "Iterations flag is set and maximum number of iterations is <= 0" ); + crit.max_iter = criteria.max_iter; + } + + if( (criteria.type & CV_TERMCRIT_EPS) != 0 ) + { + if( criteria.epsilon < 0 ) + CV_Error( CV_StsBadArg, "Accuracy flag is set and epsilon is < 0" ); + + crit.epsilon = criteria.epsilon; + } + + if( (criteria.type & (CV_TERMCRIT_EPS | CV_TERMCRIT_ITER)) == 0 ) + CV_Error( CV_StsBadArg, + "Neither accuracy nor maximum iterations " + "number flags are set in criteria type" ); + + crit.epsilon = (float)MAX( 0, crit.epsilon ); + crit.max_iter = MAX( 1, crit.max_iter ); + + return crit; +} + +namespace cv +{ + +template<> void Ptr::delete_obj() +{ cvReleaseMat(&obj); } + +template<> void Ptr::delete_obj() +{ cvReleaseImage(&obj); } + +template<> void Ptr::delete_obj() +{ cvReleaseMatND(&obj); } + +template<> void Ptr::delete_obj() +{ cvReleaseSparseMat(&obj); } + +template<> void Ptr::delete_obj() +{ cvReleaseMemStorage(&obj); } + +template<> void Ptr::delete_obj() +{ cvReleaseFileStorage(&obj); } + +} + +/* End of file. */ diff --git a/opencv/core/cmdparser.cpp b/opencv/core/cmdparser.cpp new file mode 100644 index 0000000..f67f624 --- /dev/null +++ b/opencv/core/cmdparser.cpp @@ -0,0 +1,311 @@ +#include "precomp.hpp" +#include "iostream" + +using namespace std; +using namespace cv; + +void helpParser() +{ + printf("\nThe CommandLineParser class is designed for command line arguments parsing\n" + "Keys map: \n" + "Before you start to work with CommandLineParser you have to create a map for keys.\n" + " It will look like this\n" + " const char* keys =\n" + " {\n" + " { s| string| 123asd |string parameter}\n" + " { d| digit | 100 |digit parameter }\n" + " { c|noCamera|false |without camera }\n" + " { 1| |some text|help }\n" + " { 2| |333 |another help }\n" + " };\n" + "Usage syntax: \n" + " \"{\" - start of parameter string.\n" + " \"}\" - end of parameter string\n" + " \"|\" - separator between short name, full name, default value and help\n" + "Supported syntax: \n" + " --key1=arg1 \n" + " -key2=arg2 \n" + "Usage: \n" + " Imagine that the input parameters are next:\n" + " -s=string_value --digit=250 --noCamera lena.jpg 10000\n" + " CommandLineParser parser(argc, argv, keys) - create a parser object\n" + " parser.get(\"s\" or \"string\") will return you first parameter value\n" + " parser.get(\"s\", false or \"string\", false) will return you first parameter value\n" + " without spaces in end and begin\n" + " parser.get(\"d\" or \"digit\") will return you second parameter value.\n" + " It also works with 'unsigned int', 'double', and 'float' types>\n" + " parser.get(\"c\" or \"noCamera\") will return you true .\n" + " If you enter this key in commandline>\n" + " It return you false otherwise.\n" + " parser.get(\"1\") will return you the first argument without parameter (lena.jpg) \n" + " parser.get(\"2\") will return you the second argument without parameter (10000)\n" + " It also works with 'unsigned int', 'double', and 'float' types \n" + ); +} + +vector split_string(const string& str, const string& delimiters) +{ + vector res; + string::size_type lastPos = str.find_first_not_of(delimiters, 0); + string::size_type pos = str.find_first_of(delimiters, lastPos); + while (string::npos != pos || string::npos != lastPos) + { + + res.push_back(str.substr(lastPos, pos - lastPos)); + lastPos = str.find_first_not_of(delimiters, pos); + if (str[pos + 1] == '|' && str[pos] == '|') + { + res.push_back(""); + if(str[pos + 2] == '|') + res.push_back(""); + } + if (str[pos + 1] == '\0') + res.push_back(""); + pos = str.find_first_of(delimiters, lastPos); + } + + return res; +} + +CommandLineParser::CommandLineParser(int argc, const char* argv[], const char* keys) +{ + + std::string keys_buffer; + std::string values_buffer; + std::string buffer; + std::string curName; + std::vector keysVector; + std::vector paramVector; + std::map >::iterator it; + size_t flagPosition; + int currentIndex = 1; + bool isFound = false; + bool withNoKey = false; + bool hasValueThroughEq = false; + + keys_buffer = keys; + while (!keys_buffer.empty()) + { + + flagPosition = keys_buffer.find_first_of('}'); + flagPosition++; + buffer = keys_buffer.substr(0, flagPosition); + keys_buffer.erase(0, flagPosition); + + flagPosition = buffer.find('{'); + if (flagPosition != buffer.npos) + buffer.erase(flagPosition, (flagPosition + 1)); + + flagPosition = buffer.find('}'); + if (flagPosition != buffer.npos) + buffer.erase(flagPosition); + + paramVector = split_string(buffer, "|"); + buffer = paramVector[0]; + if (atoi(buffer.c_str()) == 0) + buffer = buffer + '|' + paramVector[1]; + + paramVector.erase(paramVector.begin(), paramVector.begin() + 2); + data[buffer] = paramVector; + } + + buffer.clear(); + keys_buffer.clear(); + paramVector.clear(); + for (int i = 1; i < argc; i++) + { + if (!argv[i]) + break; + curName = argv[i]; + if (curName.find('-') == 0 && ((curName[1] < '0') || (curName[1] > '9'))) + { + while (curName.find('-') == 0) + curName.erase(curName.begin(), (curName.begin() + 1)); + } + else + withNoKey = true; + if (curName.find('=') != curName.npos) + { + hasValueThroughEq = true; + buffer = curName; + curName.erase(curName.find('=')); + buffer.erase(0, (buffer.find('=') + 1)); + } + + for(it = data.begin(); it != data.end(); it++) + { + keys_buffer = it->first; + keysVector = split_string(keys_buffer, "| "); + if (keysVector.size() == 1) + keysVector.push_back(""); + values_buffer = it->second[0]; + if (((curName == keysVector[0]) || (curName == keysVector[1])) && hasValueThroughEq) + { + it->second[0] = buffer; + isFound = true; + break; + } + + if (!hasValueThroughEq && (values_buffer.find("false") == values_buffer.npos) && + ((curName == keysVector[0]) || (curName == keysVector[1]))) + + { + it->second[0] = argv[++i]; + isFound = true; + break; + } + + if (!hasValueThroughEq && (values_buffer.find("false") != values_buffer.npos) + && ((curName == keysVector[0]) || (curName == keysVector[1]))) + + { + it->second[0] = "true"; + isFound = true; + break; + } + + if (withNoKey) + { + std::string noKeyStr = it->first; + if(atoi(noKeyStr.c_str()) == currentIndex) + { + it->second[0] = curName; + currentIndex++; + isFound = true; + break; + } + } + } + + withNoKey = false; + hasValueThroughEq = false; + if(!isFound) + printf("The current parameter is not defined: %s\n", curName.c_str()); + isFound = false; + } + + +} + +bool CommandLineParser::has(const std::string& keys) +{ + std::map >::iterator it; + std::vector keysVector; + for(it = data.begin(); it != data.end(); it++) + { + keysVector = split_string(it->first, "| "); + if (keysVector.size() == 1) + keysVector.push_back(""); + if ((keys == keysVector[0]) || (keys == keysVector[1])) + return true; + } + return false; +} + +std::string CommandLineParser::getString(const std::string& keys) +{ + std::map >::iterator it; + std::vector valueVector; + + for(it = data.begin(); it != data.end(); it++) + { + valueVector = split_string(it->first, "| "); + if (valueVector.size() == 1) + valueVector.push_back(""); + if ((keys == valueVector[0]) || (keys == valueVector[1])) + return it->second[0]; + } + return string(); +} + +template + _Tp CommandLineParser::fromStringNumber(const std::string& str)//the default conversion function for numbers +{ + const char* c_str=str.c_str(); + if ((!isdigit(c_str[0])) + && + ( + (c_str[0]!='-') || (strlen(c_str) <= 1) || ( !isdigit(c_str[1]) ) + ) + ) + + { + printf("This string cannot be converted to a number. Zero will be returned %s\n ", str.c_str()); + return _Tp(); + } + + return getData<_Tp>(str); +} + + void CommandLineParser::printParams() + { + std::map >::iterator it; + std::vector keysVector; + for(it = data.begin(); it != data.end(); it++) + { + keysVector = split_string(it->first, "| "); + if (keysVector.size() == 1) + keysVector.push_back(""); + printf("\t%s [%8s] (%12s - by default) - %s\n", keysVector[0].c_str(), + keysVector[1].c_str(), it->second[0].c_str(), it->second[1].c_str()); + } + } + +template<> +bool CommandLineParser::get(const std::string& name, bool space_delete) +{ + std::string str_buf = getString(name); + if (space_delete) + { + while (str_buf.find_first_of(' ') == 0) + str_buf.erase(0, 1); + while (str_buf.find_last_of(' ') == (str_buf.length() - 1)) + str_buf.erase(str_buf.end() - 1, str_buf.end()); + } + if (str_buf == "false") + return false; + return true; +} +template<> +std::string CommandLineParser::analizeValue(const std::string& str, bool space_delete) +{ + if (space_delete) + { + std::string str_buf = str; + while (str_buf.find_first_of(' ') == 0) + str_buf.erase(0, 1); + while (str_buf.find_last_of('-') == (str.length() - 1)) + str_buf.erase(str_buf.end() - 1, str_buf.end()); + return str_buf; + } + return str; +} + +template<> +int CommandLineParser::analizeValue(const std::string& str, bool space_delete) +{ + return fromStringNumber(str); +} + +template<> +unsigned int CommandLineParser::analizeValue(const std::string& str, bool space_delete) +{ + return fromStringNumber(str); +} + +template<> +float CommandLineParser::analizeValue(const std::string& str, bool space_delete) +{ + return fromStringNumber(str); +} + +template<> +double CommandLineParser::analizeValue(const std::string& str, bool space_delete) +{ + return fromStringNumber(str); +} diff --git a/opencv/core/convert.cpp b/opencv/core/convert.cpp new file mode 100644 index 0000000..c0a7c49 --- /dev/null +++ b/opencv/core/convert.cpp @@ -0,0 +1,1215 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +namespace cv +{ + +/****************************************************************************************\ +* split & merge * +\****************************************************************************************/ + +template static void +split_( const T* src, T** dst, int len, int cn ) +{ + int k = cn % 4 ? cn % 4 : 4; + int i, j; + if( k == 1 ) + { + T* dst0 = dst[0]; + for( i = j = 0; i < len; i++, j += cn ) + dst0[i] = src[j]; + } + else if( k == 2 ) + { + T *dst0 = dst[0], *dst1 = dst[1]; + for( i = j = 0; i < len; i++, j += cn ) + { + dst0[i] = src[j]; + dst1[i] = src[j+1]; + } + } + else if( k == 3 ) + { + T *dst0 = dst[0], *dst1 = dst[1], *dst2 = dst[2]; + for( i = j = 0; i < len; i++, j += cn ) + { + dst0[i] = src[j]; + dst1[i] = src[j+1]; + dst2[i] = src[j+2]; + } + } + else + { + T *dst0 = dst[0], *dst1 = dst[1], *dst2 = dst[2], *dst3 = dst[3]; + for( i = j = 0; i < len; i++, j += cn ) + { + dst0[i] = src[j]; dst1[i] = src[j+1]; + dst2[i] = src[j+2]; dst3[i] = src[j+3]; + } + } + + for( ; k < cn; k += 4 ) + { + T *dst0 = dst[k], *dst1 = dst[k+1], *dst2 = dst[k+2], *dst3 = dst[k+3]; + for( i = 0, j = k; i < len; i++, j += cn ) + { + dst0[i] = src[j]; dst1[i] = src[j+1]; + dst2[i] = src[j+2]; dst3[i] = src[j+3]; + } + } +} + +template static void +merge_( const T** src, T* dst, int len, int cn ) +{ + int k = cn % 4 ? cn % 4 : 4; + int i, j; + if( k == 1 ) + { + const T* src0 = src[0]; + for( i = j = 0; i < len; i++, j += cn ) + dst[j] = src0[i]; + } + else if( k == 2 ) + { + const T *src0 = src[0], *src1 = src[1]; + for( i = j = 0; i < len; i++, j += cn ) + { + dst[j] = src0[i]; + dst[j+1] = src1[i]; + } + } + else if( k == 3 ) + { + const T *src0 = src[0], *src1 = src[1], *src2 = src[2]; + for( i = j = 0; i < len; i++, j += cn ) + { + dst[j] = src0[i]; + dst[j+1] = src1[i]; + dst[j+2] = src2[i]; + } + } + else + { + const T *src0 = src[0], *src1 = src[1], *src2 = src[2], *src3 = src[3]; + for( i = j = 0; i < len; i++, j += cn ) + { + dst[j] = src0[i]; dst[j+1] = src1[i]; + dst[j+2] = src2[i]; dst[j+3] = src3[i]; + } + } + + for( ; k < cn; k += 4 ) + { + const T *src0 = src[k], *src1 = src[k+1], *src2 = src[k+2], *src3 = src[k+3]; + for( i = 0, j = k; i < len; i++, j += cn ) + { + dst[j] = src0[i]; dst[j+1] = src1[i]; + dst[j+2] = src2[i]; dst[j+3] = src3[i]; + } + } +} + +static void split8u(const uchar* src, uchar** dst, int len, int cn ) +{ + split_(src, dst, len, cn); +} + +static void split16u(const ushort* src, ushort** dst, int len, int cn ) +{ + split_(src, dst, len, cn); +} + +static void split32s(const int* src, int** dst, int len, int cn ) +{ + split_(src, dst, len, cn); +} + +static void split64s(const int64* src, int64** dst, int len, int cn ) +{ + split_(src, dst, len, cn); +} + +static void merge8u(const uchar** src, uchar* dst, int len, int cn ) +{ + merge_(src, dst, len, cn); +} + +static void merge16u(const ushort** src, ushort* dst, int len, int cn ) +{ + merge_(src, dst, len, cn); +} + +static void merge32s(const int** src, int* dst, int len, int cn ) +{ + merge_(src, dst, len, cn); +} + +static void merge64s(const int64** src, int64* dst, int len, int cn ) +{ + merge_(src, dst, len, cn); +} + +typedef void (*SplitFunc)(const uchar* src, uchar** dst, int len, int cn); +typedef void (*MergeFunc)(const uchar** src, uchar* dst, int len, int cn); + +static SplitFunc splitTab[] = +{ + (SplitFunc)split8u, (SplitFunc)split8u, (SplitFunc)split16u, (SplitFunc)split16u, + (SplitFunc)split32s, (SplitFunc)split32s, (SplitFunc)split64s, 0 +}; + +static MergeFunc mergeTab[] = +{ + (MergeFunc)merge8u, (MergeFunc)merge8u, (MergeFunc)merge16u, (MergeFunc)merge16u, + (MergeFunc)merge32s, (MergeFunc)merge32s, (MergeFunc)merge64s, 0 +}; + +} + +void cv::split(const Mat& src, Mat* mv) +{ + int k, depth = src.depth(), cn = src.channels(); + if( cn == 1 ) + { + src.copyTo(mv[0]); + return; + } + + SplitFunc func = splitTab[depth]; + CV_Assert( func != 0 ); + + int esz = (int)src.elemSize(), esz1 = (int)src.elemSize1(); + int blocksize0 = (BLOCK_SIZE + esz-1)/esz; + AutoBuffer _buf((cn+1)*(sizeof(Mat*) + sizeof(uchar*)) + 16); + const Mat** arrays = (const Mat**)(uchar*)_buf; + uchar** ptrs = (uchar**)alignPtr(arrays + cn + 1, 16); + + arrays[0] = &src; + for( k = 0; k < cn; k++ ) + { + mv[k].create(src.dims, src.size, depth); + arrays[k+1] = &mv[k]; + } + + NAryMatIterator it(arrays, ptrs, cn+1); + int total = (int)it.size, blocksize = cn <= 4 ? total : std::min(total, blocksize0); + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( int j = 0; j < total; j += blocksize ) + { + int bsz = std::min(total - j, blocksize); + func( ptrs[0], &ptrs[1], bsz, cn ); + + if( j + blocksize < total ) + { + ptrs[0] += bsz*esz; + for( k = 0; k < cn; k++ ) + ptrs[k+1] += bsz*esz1; + } + } + } +} + +void cv::split(const Mat& m, vector& mv) +{ + mv.resize(!m.empty() ? m.channels() : 0); + if(!m.empty()) + split(m, &mv[0]); +} + +void cv::merge(const Mat* mv, size_t n, OutputArray _dst) +{ + CV_Assert( mv && n > 0 ); + + int depth = mv[0].depth(); + bool allch1 = true; + int k, cn = 0; + size_t i; + + for( i = 0; i < n; i++ ) + { + CV_Assert(mv[i].size == mv[0].size && mv[i].depth() == depth); + allch1 = allch1 && mv[i].channels() == 1; + cn += mv[i].channels(); + } + + CV_Assert( 0 < cn && cn <= CV_CN_MAX ); + _dst.create(mv[0].dims, mv[0].size, CV_MAKETYPE(depth, cn)); + Mat dst = _dst.getMat(); + + if( n == 1 ) + { + mv[0].copyTo(dst); + return; + } + + if( !allch1 ) + { + AutoBuffer pairs(cn*2); + int j, ni=0; + + for( i = 0, j = 0; i < n; i++, j += ni ) + { + ni = mv[i].channels(); + for( k = 0; k < ni; k++ ) + { + pairs[(j+k)*2] = j + k; + pairs[(j+k)*2+1] = j + k; + } + } + mixChannels( mv, n, &dst, 1, &pairs[0], cn ); + return; + } + + size_t esz = dst.elemSize(), esz1 = dst.elemSize1(); + int blocksize0 = (int)((BLOCK_SIZE + esz-1)/esz); + AutoBuffer _buf((cn+1)*(sizeof(Mat*) + sizeof(uchar*)) + 16); + const Mat** arrays = (const Mat**)(uchar*)_buf; + uchar** ptrs = (uchar**)alignPtr(arrays + cn + 1, 16); + + arrays[0] = &dst; + for( k = 0; k < cn; k++ ) + arrays[k+1] = &mv[k]; + + NAryMatIterator it(arrays, ptrs, cn+1); + int total = (int)it.size, blocksize = cn <= 4 ? total : std::min(total, blocksize0); + MergeFunc func = mergeTab[depth]; + + for( i = 0; i < it.nplanes; i++, ++it ) + { + for( int j = 0; j < total; j += blocksize ) + { + int bsz = std::min(total - j, blocksize); + func( (const uchar**)&ptrs[1], ptrs[0], bsz, cn ); + + if( j + blocksize < total ) + { + ptrs[0] += bsz*esz; + for( int k = 0; k < cn; k++ ) + ptrs[k+1] += bsz*esz1; + } + } + } +} + +void cv::merge(const vector& mv, OutputArray _dst) +{ + merge(!mv.empty() ? &mv[0] : 0, mv.size(), _dst); +} + +/****************************************************************************************\ +* Generalized split/merge: mixing channels * +\****************************************************************************************/ + +namespace cv +{ + +template static void +mixChannels_( const T** src, const int* sdelta, + T** dst, const int* ddelta, + int len, int npairs ) +{ + int i, k; + for( k = 0; k < npairs; k++ ) + { + const T* s = src[k]; + T* d = dst[k]; + int ds = sdelta[k], dd = ddelta[k]; + if( s ) + { + for( i = 0; i <= len - 2; i += 2, s += ds*2, d += dd*2 ) + { + T t0 = s[0], t1 = s[ds]; + d[0] = t0; d[dd] = t1; + } + if( i < len ) + d[0] = s[0]; + } + else + { + for( i = 0; i <= len - 2; i += 2, d += dd*2 ) + d[0] = d[dd] = 0; + if( i < len ) + d[0] = 0; + } + } +} + + +static void mixChannels8u( const uchar** src, const int* sdelta, + uchar** dst, const int* ddelta, + int len, int npairs ) +{ + mixChannels_(src, sdelta, dst, ddelta, len, npairs); +} + +static void mixChannels16u( const ushort** src, const int* sdelta, + ushort** dst, const int* ddelta, + int len, int npairs ) +{ + mixChannels_(src, sdelta, dst, ddelta, len, npairs); +} + +static void mixChannels32s( const int** src, const int* sdelta, + int** dst, const int* ddelta, + int len, int npairs ) +{ + mixChannels_(src, sdelta, dst, ddelta, len, npairs); +} + +static void mixChannels64s( const int64** src, const int* sdelta, + int64** dst, const int* ddelta, + int len, int npairs ) +{ + mixChannels_(src, sdelta, dst, ddelta, len, npairs); +} + +typedef void (*MixChannelsFunc)( const uchar** src, const int* sdelta, + uchar** dst, const int* ddelta, int len, int npairs ); + +static MixChannelsFunc mixchTab[] = +{ + (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels8u, (MixChannelsFunc)mixChannels16u, + (MixChannelsFunc)mixChannels16u, (MixChannelsFunc)mixChannels32s, (MixChannelsFunc)mixChannels32s, + (MixChannelsFunc)mixChannels64s, 0 +}; + +} + +void cv::mixChannels( const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts, const int* fromTo, size_t npairs ) +{ + if( npairs == 0 ) + return; + CV_Assert( src && nsrcs > 0 && dst && ndsts > 0 && fromTo && npairs > 0 ); + + size_t i, j, k, esz1 = dst[0].elemSize1(); + int depth = dst[0].depth(); + + AutoBuffer buf((nsrcs + ndsts + 1)*(sizeof(Mat*) + sizeof(uchar*)) + npairs*(sizeof(uchar*)*2 + sizeof(int)*6)); + const Mat** arrays = (const Mat**)(uchar*)buf; + uchar** ptrs = (uchar**)(arrays + nsrcs + ndsts); + const uchar** srcs = (const uchar**)(ptrs + nsrcs + ndsts + 1); + uchar** dsts = (uchar**)(srcs + npairs); + int* tab = (int*)(dsts + npairs); + int *sdelta = (int*)(tab + npairs*4), *ddelta = sdelta + npairs; + + for( i = 0; i < nsrcs; i++ ) + arrays[i] = &src[i]; + for( i = 0; i < ndsts; i++ ) + arrays[i + nsrcs] = &dst[i]; + ptrs[nsrcs + ndsts] = 0; + + for( i = 0; i < npairs; i++ ) + { + int i0 = fromTo[i*2], i1 = fromTo[i*2+1]; + if( i0 >= 0 ) + { + for( j = 0; j < nsrcs; i0 -= src[j].channels(), j++ ) + if( i0 < src[j].channels() ) + break; + CV_Assert(j < nsrcs && src[j].depth() == depth); + tab[i*4] = (int)j; tab[i*4+1] = (int)(i0*esz1); + sdelta[i] = src[j].channels(); + } + else + { + tab[i*4] = (int)(nsrcs + ndsts); tab[i*4+1] = 0; + sdelta[i] = 0; + } + + for( j = 0; j < ndsts; i1 -= dst[j].channels(), j++ ) + if( i1 < dst[j].channels() ) + break; + CV_Assert(i1 >= 0 && j < ndsts && dst[j].depth() == depth); + tab[i*4+2] = (int)(j + nsrcs); tab[i*4+3] = (int)(i1*esz1); + ddelta[i] = dst[j].channels(); + } + + NAryMatIterator it(arrays, ptrs, (int)(nsrcs + ndsts)); + int total = (int)it.size, blocksize = std::min(total, (int)((BLOCK_SIZE + esz1-1)/esz1)); + MixChannelsFunc func = mixchTab[depth]; + + for( i = 0; i < it.nplanes; i++, ++it ) + { + for( k = 0; k < npairs; k++ ) + { + srcs[k] = ptrs[tab[k*4]] + tab[k*4+1]; + dsts[k] = ptrs[tab[k*4+2]] + tab[k*4+3]; + } + + for( int j = 0; j < total; j += blocksize ) + { + int bsz = std::min(total - j, blocksize); + func( srcs, sdelta, dsts, ddelta, bsz, (int)npairs ); + + if( j + blocksize < total ) + for( k = 0; k < npairs; k++ ) + { + srcs[k] += blocksize*sdelta[k]*esz1; + dsts[k] += blocksize*ddelta[k]*esz1; + } + } + } +} + + +void cv::mixChannels(const vector& src, vector& dst, + const int* fromTo, size_t npairs) +{ + mixChannels(!src.empty() ? &src[0] : 0, src.size(), + !dst.empty() ? &dst[0] : 0, dst.size(), fromTo, npairs); +} + +void cv::mixChannels(InputArrayOfArrays src, InputArrayOfArrays dst, + const vector& fromTo) +{ + if(fromTo.empty()) + return; + int i, nsrc = (int)src.total(), ndst = (int)dst.total(); + CV_Assert(fromTo.size()%2 == 0 && nsrc > 0 && ndst > 0); + cv::AutoBuffer _buf(nsrc + ndst); + Mat* buf = _buf; + for( i = 0; i < nsrc; i++ ) + buf[i] = src.getMat(i); + for( i = 0; i < ndst; i++ ) + buf[nsrc + i] = dst.getMat(i); + mixChannels(&buf[0], nsrc, &buf[nsrc], ndst, &fromTo[0], fromTo.size()/2); +} + +void cv::extractChannel(InputArray _src, OutputArray _dst, int coi) +{ + Mat src = _src.getMat(); + CV_Assert( 0 <= coi && coi < src.channels() ); + _dst.create(src.dims, &src.size[0], src.depth()); + Mat dst = _dst.getMat(); + int ch[] = { coi, 0 }; + mixChannels(&src, 1, &dst, 1, ch, 1); +} + +void cv::insertChannel(InputArray _src, InputOutputArray _dst, int coi) +{ + Mat src = _src.getMat(), dst = _dst.getMat(); + CV_Assert( src.size == dst.size && src.depth() == dst.depth() ); + CV_Assert( 0 <= coi && coi < dst.channels() && src.channels() == 1 ); + int ch[] = { 0, coi }; + mixChannels(&src, 1, &dst, 1, ch, 1); +} + +/****************************************************************************************\ +* convertScale[Abs] * +\****************************************************************************************/ + +namespace cv +{ + +template static void +cvtScaleAbs_( const T* src, size_t sstep, + DT* dst, size_t dstep, Size size, + WT scale, WT shift ) +{ + sstep /= sizeof(src[0]); + dstep /= sizeof(dst[0]); + + for( ; size.height--; src += sstep, dst += dstep ) + { + int x = 0; + for( ; x <= size.width - 4; x += 4 ) + { + DT t0, t1; + t0 = saturate_cast
(std::abs(src[x]*scale + shift)); + t1 = saturate_cast
(std::abs(src[x+1]*scale + shift)); + dst[x] = t0; dst[x+1] = t1; + t0 = saturate_cast
(std::abs(src[x+2]*scale + shift)); + t1 = saturate_cast
(std::abs(src[x+3]*scale + shift)); + dst[x+2] = t0; dst[x+3] = t1; + } + + for( ; x < size.width; x++ ) + dst[x] = saturate_cast
(std::abs(src[x]*scale + shift)); + } +} + +template static void +cvtScale_( const T* src, size_t sstep, + DT* dst, size_t dstep, Size size, + WT scale, WT shift ) +{ + sstep /= sizeof(src[0]); + dstep /= sizeof(dst[0]); + + for( ; size.height--; src += sstep, dst += dstep ) + { + int x = 0; + for( ; x <= size.width - 4; x += 4 ) + { + DT t0, t1; + t0 = saturate_cast
(src[x]*scale + shift); + t1 = saturate_cast
(src[x+1]*scale + shift); + dst[x] = t0; dst[x+1] = t1; + t0 = saturate_cast
(src[x+2]*scale + shift); + t1 = saturate_cast
(src[x+3]*scale + shift); + dst[x+2] = t0; dst[x+3] = t1; + } + + for( ; x < size.width; x++ ) + dst[x] = saturate_cast
(src[x]*scale + shift); + } +} + + +template static void +cvt_( const T* src, size_t sstep, + DT* dst, size_t dstep, Size size ) +{ + sstep /= sizeof(src[0]); + dstep /= sizeof(dst[0]); + + for( ; size.height--; src += sstep, dst += dstep ) + { + int x = 0; + for( ; x <= size.width - 4; x += 4 ) + { + DT t0, t1; + t0 = saturate_cast
(src[x]); + t1 = saturate_cast
(src[x+1]); + dst[x] = t0; dst[x+1] = t1; + t0 = saturate_cast
(src[x+2]); + t1 = saturate_cast
(src[x+3]); + dst[x+2] = t0; dst[x+3] = t1; + } + + for( ; x < size.width; x++ ) + dst[x] = saturate_cast
(src[x]); + } +} + +template static void +cpy_( const T* src, size_t sstep, T* dst, size_t dstep, Size size ) +{ + sstep /= sizeof(src[0]); + dstep /= sizeof(dst[0]); + + for( ; size.height--; src += sstep, dst += dstep ) + memcpy(dst, src, size.width*sizeof(src[0])); +} + +#define DEF_CVT_SCALE_ABS_FUNC(suffix, tfunc, stype, dtype, wtype) \ +static void cvtScaleAbs##suffix( const stype* src, size_t sstep, const uchar*, size_t, \ + dtype* dst, size_t dstep, Size size, double* scale) \ +{ \ + tfunc(src, sstep, dst, dstep, size, (wtype)scale[0], (wtype)scale[1]); \ +} + +#define DEF_CVT_SCALE_FUNC(suffix, stype, dtype, wtype) \ +static void cvtScale##suffix( const stype* src, size_t sstep, const uchar*, size_t, \ +dtype* dst, size_t dstep, Size size, double* scale) \ +{ \ + cvtScale_(src, sstep, dst, dstep, size, (wtype)scale[0], (wtype)scale[1]); \ +} + + +#define DEF_CVT_FUNC(suffix, stype, dtype) \ +static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \ + dtype* dst, size_t dstep, Size size, double*) \ +{ \ + cvt_(src, sstep, dst, dstep, size); \ +} + +#define DEF_CPY_FUNC(suffix, stype) \ +static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \ +stype* dst, size_t dstep, Size size, double*) \ +{ \ + cpy_(src, sstep, dst, dstep, size); \ +} + + +DEF_CVT_SCALE_ABS_FUNC(8u, cvtScaleAbs_, uchar, uchar, float); +DEF_CVT_SCALE_ABS_FUNC(8s8u, cvtScaleAbs_, schar, uchar, float); +DEF_CVT_SCALE_ABS_FUNC(16u8u, cvtScaleAbs_, ushort, uchar, float); +DEF_CVT_SCALE_ABS_FUNC(16s8u, cvtScaleAbs_, short, uchar, float); +DEF_CVT_SCALE_ABS_FUNC(32s8u, cvtScaleAbs_, int, uchar, float); +DEF_CVT_SCALE_ABS_FUNC(32f8u, cvtScaleAbs_, float, uchar, float); +DEF_CVT_SCALE_ABS_FUNC(64f8u, cvtScaleAbs_, double, uchar, float); + +DEF_CVT_SCALE_FUNC(8u, uchar, uchar, float); +DEF_CVT_SCALE_FUNC(8s8u, schar, uchar, float); +DEF_CVT_SCALE_FUNC(16u8u, ushort, uchar, float); +DEF_CVT_SCALE_FUNC(16s8u, short, uchar, float); +DEF_CVT_SCALE_FUNC(32s8u, int, uchar, float); +DEF_CVT_SCALE_FUNC(32f8u, float, uchar, float); +DEF_CVT_SCALE_FUNC(64f8u, double, uchar, float); + +DEF_CVT_SCALE_FUNC(8u8s, uchar, schar, float); +DEF_CVT_SCALE_FUNC(8s, schar, schar, float); +DEF_CVT_SCALE_FUNC(16u8s, ushort, schar, float); +DEF_CVT_SCALE_FUNC(16s8s, short, schar, float); +DEF_CVT_SCALE_FUNC(32s8s, int, schar, float); +DEF_CVT_SCALE_FUNC(32f8s, float, schar, float); +DEF_CVT_SCALE_FUNC(64f8s, double, schar, float); + +DEF_CVT_SCALE_FUNC(8u16u, uchar, ushort, float); +DEF_CVT_SCALE_FUNC(8s16u, schar, ushort, float); +DEF_CVT_SCALE_FUNC(16u, ushort, ushort, float); +DEF_CVT_SCALE_FUNC(16s16u, short, ushort, float); +DEF_CVT_SCALE_FUNC(32s16u, int, ushort, float); +DEF_CVT_SCALE_FUNC(32f16u, float, ushort, float); +DEF_CVT_SCALE_FUNC(64f16u, double, ushort, float); + +DEF_CVT_SCALE_FUNC(8u16s, uchar, short, float); +DEF_CVT_SCALE_FUNC(8s16s, schar, short, float); +DEF_CVT_SCALE_FUNC(16u16s, ushort, short, float); +DEF_CVT_SCALE_FUNC(16s, short, short, float); +DEF_CVT_SCALE_FUNC(32s16s, int, short, float); +DEF_CVT_SCALE_FUNC(32f16s, float, short, float); +DEF_CVT_SCALE_FUNC(64f16s, double, short, float); + +DEF_CVT_SCALE_FUNC(8u32s, uchar, int, float); +DEF_CVT_SCALE_FUNC(8s32s, schar, int, float); +DEF_CVT_SCALE_FUNC(16u32s, ushort, int, float); +DEF_CVT_SCALE_FUNC(16s32s, short, int, float); +DEF_CVT_SCALE_FUNC(32s, int, int, double); +DEF_CVT_SCALE_FUNC(32f32s, float, int, float); +DEF_CVT_SCALE_FUNC(64f32s, double, int, double); + +DEF_CVT_SCALE_FUNC(8u32f, uchar, float, float); +DEF_CVT_SCALE_FUNC(8s32f, schar, float, float); +DEF_CVT_SCALE_FUNC(16u32f, ushort, float, float); +DEF_CVT_SCALE_FUNC(16s32f, short, float, float); +DEF_CVT_SCALE_FUNC(32s32f, int, float, double); +DEF_CVT_SCALE_FUNC(32f, float, float, float); +DEF_CVT_SCALE_FUNC(64f32f, double, float, double); + +DEF_CVT_SCALE_FUNC(8u64f, uchar, double, double); +DEF_CVT_SCALE_FUNC(8s64f, schar, double, double); +DEF_CVT_SCALE_FUNC(16u64f, ushort, double, double); +DEF_CVT_SCALE_FUNC(16s64f, short, double, double); +DEF_CVT_SCALE_FUNC(32s64f, int, double, double); +DEF_CVT_SCALE_FUNC(32f64f, float, double, double); +DEF_CVT_SCALE_FUNC(64f, double, double, double); + +DEF_CPY_FUNC(8u, uchar); +DEF_CVT_FUNC(8s8u, schar, uchar); +DEF_CVT_FUNC(16u8u, ushort, uchar); +DEF_CVT_FUNC(16s8u, short, uchar); +DEF_CVT_FUNC(32s8u, int, uchar); +DEF_CVT_FUNC(32f8u, float, uchar); +DEF_CVT_FUNC(64f8u, double, uchar); + +DEF_CVT_FUNC(8u8s, uchar, schar); +DEF_CVT_FUNC(16u8s, ushort, schar); +DEF_CVT_FUNC(16s8s, short, schar); +DEF_CVT_FUNC(32s8s, int, schar); +DEF_CVT_FUNC(32f8s, float, schar); +DEF_CVT_FUNC(64f8s, double, schar); + +DEF_CVT_FUNC(8u16u, uchar, ushort); +DEF_CVT_FUNC(8s16u, schar, ushort); +DEF_CPY_FUNC(16u, ushort); +DEF_CVT_FUNC(16s16u, short, ushort); +DEF_CVT_FUNC(32s16u, int, ushort); +DEF_CVT_FUNC(32f16u, float, ushort); +DEF_CVT_FUNC(64f16u, double, ushort); + +DEF_CVT_FUNC(8u16s, uchar, short); +DEF_CVT_FUNC(8s16s, schar, short); +DEF_CVT_FUNC(16u16s, ushort, short); +DEF_CVT_FUNC(32s16s, int, short); +DEF_CVT_FUNC(32f16s, float, short); +DEF_CVT_FUNC(64f16s, double, short); + +DEF_CVT_FUNC(8u32s, uchar, int); +DEF_CVT_FUNC(8s32s, schar, int); +DEF_CVT_FUNC(16u32s, ushort, int); +DEF_CVT_FUNC(16s32s, short, int); +DEF_CPY_FUNC(32s, int); +DEF_CVT_FUNC(32f32s, float, int); +DEF_CVT_FUNC(64f32s, double, int); + +DEF_CVT_FUNC(8u32f, uchar, float); +DEF_CVT_FUNC(8s32f, schar, float); +DEF_CVT_FUNC(16u32f, ushort, float); +DEF_CVT_FUNC(16s32f, short, float); +DEF_CVT_FUNC(32s32f, int, float); +DEF_CVT_FUNC(64f32f, double, float); + +DEF_CVT_FUNC(8u64f, uchar, double); +DEF_CVT_FUNC(8s64f, schar, double); +DEF_CVT_FUNC(16u64f, ushort, double); +DEF_CVT_FUNC(16s64f, short, double); +DEF_CVT_FUNC(32s64f, int, double); +DEF_CVT_FUNC(32f64f, float, double); +DEF_CPY_FUNC(64s, int64); + +static BinaryFunc cvtScaleAbsTab[] = +{ + (BinaryFunc)cvtScaleAbs8u, (BinaryFunc)cvtScaleAbs8s8u, (BinaryFunc)cvtScaleAbs16u8u, + (BinaryFunc)cvtScaleAbs16s8u, (BinaryFunc)cvtScaleAbs32s8u, (BinaryFunc)cvtScaleAbs32f8u, + (BinaryFunc)cvtScaleAbs64f8u, 0 +}; + +static BinaryFunc cvtScaleTab[][8] = +{ + { + (BinaryFunc)cvtScale8u, (BinaryFunc)cvtScale8s8u, (BinaryFunc)cvtScale16u8u, + (BinaryFunc)cvtScale16s8u, (BinaryFunc)cvtScale32s8u, (BinaryFunc)cvtScale32f8u, + (BinaryFunc)cvtScale64f8u, 0 + }, + { + (BinaryFunc)cvtScale8u8s, (BinaryFunc)cvtScale8s, (BinaryFunc)cvtScale16u8s, + (BinaryFunc)cvtScale16s8s, (BinaryFunc)cvtScale32s8s, (BinaryFunc)cvtScale32f8s, + (BinaryFunc)cvtScale64f8s, 0 + }, + { + (BinaryFunc)cvtScale8u16u, (BinaryFunc)cvtScale8s16u, (BinaryFunc)cvtScale16u, + (BinaryFunc)cvtScale16s16u, (BinaryFunc)cvtScale32s16u, (BinaryFunc)cvtScale32f16u, + (BinaryFunc)cvtScale64f16u, 0 + }, + { + (BinaryFunc)cvtScale8u16s, (BinaryFunc)cvtScale8s16s, (BinaryFunc)cvtScale16u16s, + (BinaryFunc)cvtScale16s, (BinaryFunc)cvtScale32s16s, (BinaryFunc)cvtScale32f16s, + (BinaryFunc)cvtScale64f16s, 0 + }, + { + (BinaryFunc)cvtScale8u32s, (BinaryFunc)cvtScale8s32s, (BinaryFunc)cvtScale16u32s, + (BinaryFunc)cvtScale16s32s, (BinaryFunc)cvtScale32s, (BinaryFunc)cvtScale32f32s, + (BinaryFunc)cvtScale64f32s, 0 + }, + { + (BinaryFunc)cvtScale8u32f, (BinaryFunc)cvtScale8s32f, (BinaryFunc)cvtScale16u32f, + (BinaryFunc)cvtScale16s32f, (BinaryFunc)cvtScale32s32f, (BinaryFunc)cvtScale32f, + (BinaryFunc)cvtScale64f32f, 0 + }, + { + (BinaryFunc)cvtScale8u64f, (BinaryFunc)cvtScale8s64f, (BinaryFunc)cvtScale16u64f, + (BinaryFunc)cvtScale16s64f, (BinaryFunc)cvtScale32s64f, (BinaryFunc)cvtScale32f64f, + (BinaryFunc)cvtScale64f, 0 + }, + { + 0, 0, 0, 0, 0, 0, 0, 0 + } +}; + +static BinaryFunc cvtTab[][8] = +{ + { + (BinaryFunc)cvt8u, (BinaryFunc)cvt8s8u, (BinaryFunc)cvt16u8u, + (BinaryFunc)cvt16s8u, (BinaryFunc)cvt32s8u, (BinaryFunc)cvt32f8u, + (BinaryFunc)cvt64f8u, 0 + }, + { + (BinaryFunc)cvt8u8s, (BinaryFunc)cvt8u, (BinaryFunc)cvt16u8s, + (BinaryFunc)cvt16s8s, (BinaryFunc)cvt32s8s, (BinaryFunc)cvt32f8s, + (BinaryFunc)cvt64f8s, 0 + }, + { + (BinaryFunc)cvt8u16u, (BinaryFunc)cvt8s16u, (BinaryFunc)cvt16u, + (BinaryFunc)cvt16s16u, (BinaryFunc)cvt32s16u, (BinaryFunc)cvt32f16u, + (BinaryFunc)cvt64f16u, 0 + }, + { + (BinaryFunc)cvt8u16s, (BinaryFunc)cvt8s16s, (BinaryFunc)cvt16u16s, + (BinaryFunc)cvt16u, (BinaryFunc)cvt32s16s, (BinaryFunc)cvt32f16s, + (BinaryFunc)cvt64f16s, 0 + }, + { + (BinaryFunc)cvt8u32s, (BinaryFunc)cvt8s32s, (BinaryFunc)cvt16u32s, + (BinaryFunc)cvt16s32s, (BinaryFunc)cvt32s, (BinaryFunc)cvt32f32s, + (BinaryFunc)cvt64f32s, 0 + }, + { + (BinaryFunc)cvt8u32f, (BinaryFunc)cvt8s32f, (BinaryFunc)cvt16u32f, + (BinaryFunc)cvt16s32f, (BinaryFunc)cvt32s32f, (BinaryFunc)cvt32s, + (BinaryFunc)cvt64f32f, 0 + }, + { + (BinaryFunc)cvt8u64f, (BinaryFunc)cvt8s64f, (BinaryFunc)cvt16u64f, + (BinaryFunc)cvt16s64f, (BinaryFunc)cvt32s64f, (BinaryFunc)cvt32f64f, + (BinaryFunc)cvt64s, 0 + }, + { + 0, 0, 0, 0, 0, 0, 0, 0 + } +}; + +BinaryFunc getConvertFunc(int sdepth, int ddepth) +{ + return cvtTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)]; +} + +BinaryFunc getConvertScaleFunc(int sdepth, int ddepth) +{ + return cvtScaleTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)]; +} + +} + +void cv::convertScaleAbs( InputArray _src, OutputArray _dst, double alpha, double beta ) +{ + Mat src = _src.getMat(); + int cn = src.channels(); + double scale[] = {alpha, beta}; + _dst.create( src.dims, src.size, CV_8UC(cn) ); + Mat dst = _dst.getMat(); + BinaryFunc func = cvtScaleAbsTab[src.depth()]; + CV_Assert( func != 0 ); + + if( src.dims <= 2 ) + { + Size sz = getContinuousSize(src, dst, cn); + func( src.data, src.step, 0, 0, dst.data, dst.step, sz, scale ); + } + else + { + const Mat* arrays[] = {&src, &dst, 0}; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs); + Size sz((int)it.size*cn, 1); + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + func( ptrs[0], 0, 0, 0, ptrs[1], 0, sz, scale ); + } +} + +void cv::Mat::convertTo(OutputArray _dst, int _type, double alpha, double beta) const +{ + bool noScale = fabs(alpha-1) < DBL_EPSILON && fabs(beta) < DBL_EPSILON; + + if( _type < 0 ) + _type = _dst.fixedType() ? _dst.type() : type(); + else + _type = CV_MAKETYPE(CV_MAT_DEPTH(_type), channels()); + + int sdepth = depth(), ddepth = CV_MAT_DEPTH(_type); + if( sdepth == ddepth && noScale ) + { + copyTo(_dst); + return; + } + + Mat src = *this; + + BinaryFunc func = noScale ? getConvertFunc(sdepth, ddepth) : getConvertScaleFunc(sdepth, ddepth); + double scale[] = {alpha, beta}; + int cn = channels(); + CV_Assert( func != 0 ); + + if( dims <= 2 ) + { + _dst.create( size(), _type ); + Mat dst = _dst.getMat(); + Size sz = getContinuousSize(src, dst, cn); + func( src.data, src.step, 0, 0, dst.data, dst.step, sz, scale ); + } + else + { + _dst.create( dims, size, _type ); + Mat dst = _dst.getMat(); + const Mat* arrays[] = {&src, &dst, 0}; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs); + Size sz((int)(it.size*cn), 1); + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + func(ptrs[0], 0, 0, 0, ptrs[1], 0, sz, scale); + } +} + +/****************************************************************************************\ +* LUT Transform * +\****************************************************************************************/ + +namespace cv +{ + +template static void +LUT8u_( const uchar* src, const T* lut, T* dst, int len, int cn, int lutcn ) +{ + if( lutcn == 1 ) + { + for( int i = 0; i < len*cn; i++ ) + dst[i] = lut[src[i]]; + } + else + { + for( int i = 0; i < len*cn; i += cn ) + for( int k = 0; k < cn; k++ ) + dst[i+k] = lut[src[i+k]*cn+k]; + } +} + +static void LUT8u_8u( const uchar* src, const uchar* lut, uchar* dst, int len, int cn, int lutcn ) +{ + LUT8u_( src, lut, dst, len, cn, lutcn ); +} + +static void LUT8u_8s( const uchar* src, const schar* lut, schar* dst, int len, int cn, int lutcn ) +{ + LUT8u_( src, lut, dst, len, cn, lutcn ); +} + +static void LUT8u_16u( const uchar* src, const ushort* lut, ushort* dst, int len, int cn, int lutcn ) +{ + LUT8u_( src, lut, dst, len, cn, lutcn ); +} + +static void LUT8u_16s( const uchar* src, const short* lut, short* dst, int len, int cn, int lutcn ) +{ + LUT8u_( src, lut, dst, len, cn, lutcn ); +} + +static void LUT8u_32s( const uchar* src, const int* lut, int* dst, int len, int cn, int lutcn ) +{ + LUT8u_( src, lut, dst, len, cn, lutcn ); +} + +static void LUT8u_32f( const uchar* src, const float* lut, float* dst, int len, int cn, int lutcn ) +{ + LUT8u_( src, lut, dst, len, cn, lutcn ); +} + +static void LUT8u_64f( const uchar* src, const double* lut, double* dst, int len, int cn, int lutcn ) +{ + LUT8u_( src, lut, dst, len, cn, lutcn ); +} + +typedef void (*LUTFunc)( const uchar* src, const uchar* lut, uchar* dst, int len, int cn, int lutcn ); + +static LUTFunc lutTab[] = +{ + (LUTFunc)LUT8u_8u, (LUTFunc)LUT8u_8s, (LUTFunc)LUT8u_16u, (LUTFunc)LUT8u_16s, + (LUTFunc)LUT8u_32s, (LUTFunc)LUT8u_32f, (LUTFunc)LUT8u_64f, 0 +}; + +} + +void cv::LUT( InputArray _src, InputArray _lut, OutputArray _dst, int interpolation ) +{ + Mat src = _src.getMat(), lut = _lut.getMat(); + CV_Assert( interpolation == 0 ); + int cn = src.channels(); + int lutcn = lut.channels(); + + CV_Assert( (lutcn == cn || lutcn == 1) && + lut.total() == 256 && lut.isContinuous() && + (src.depth() == CV_8U || src.depth() == CV_8S) ); + _dst.create( src.dims, src.size, CV_MAKETYPE(lut.depth(), cn)); + Mat dst = _dst.getMat(); + + LUTFunc func = lutTab[lut.depth()]; + CV_Assert( func != 0 ); + + const Mat* arrays[] = {&src, &dst, 0}; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs); + int len = (int)it.size; + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + func(ptrs[0], lut.data, ptrs[1], len, cn, lutcn); +} + + +void cv::normalize( InputArray _src, OutputArray _dst, double a, double b, + int norm_type, int rtype, InputArray _mask ) +{ + Mat src = _src.getMat(), mask = _mask.getMat(); + + double scale = 1, shift = 0; + if( norm_type == CV_MINMAX ) + { + double smin = 0, smax = 0; + double dmin = MIN( a, b ), dmax = MAX( a, b ); + minMaxLoc( _src, &smin, &smax, 0, 0, mask ); + scale = (dmax - dmin)*(smax - smin > DBL_EPSILON ? 1./(smax - smin) : 0); + shift = dmin - smin*scale; + } + else if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C ) + { + scale = norm( src, norm_type, mask ); + scale = scale > DBL_EPSILON ? a/scale : 0.; + shift = 0; + } + else + CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" ); + + if( rtype < 0 ) + rtype = _dst.fixedType() ? _dst.depth() : src.depth(); + + _dst.create(src.dims, src.size, CV_MAKETYPE(rtype, src.channels())); + Mat dst = _dst.getMat(); + + if( !mask.data ) + src.convertTo( dst, rtype, scale, shift ); + else + { + Mat temp; + src.convertTo( temp, rtype, scale, shift ); + temp.copyTo( dst, mask ); + } +} + +CV_IMPL void +cvSplit( const void* srcarr, void* dstarr0, void* dstarr1, void* dstarr2, void* dstarr3 ) +{ + void* dptrs[] = { dstarr0, dstarr1, dstarr2, dstarr3 }; + cv::Mat src = cv::cvarrToMat(srcarr); + int i, j, nz = 0; + for( i = 0; i < 4; i++ ) + nz += dptrs[i] != 0; + CV_Assert( nz > 0 ); + cv::vector dvec(nz); + cv::vector pairs(nz*2); + + for( i = j = 0; i < 4; i++ ) + { + if( dptrs[i] != 0 ) + { + dvec[j] = cv::cvarrToMat(dptrs[i]); + CV_Assert( dvec[j].size() == src.size() && + dvec[j].depth() == src.depth() && + dvec[j].channels() == 1 && i < src.channels() ); + pairs[j*2] = i; + pairs[j*2+1] = j; + j++; + } + } + if( nz == src.channels() ) + cv::split( src, dvec ); + else + { + cv::mixChannels( &src, 1, &dvec[0], nz, &pairs[0], nz ); + } +} + + +CV_IMPL void +cvMerge( const void* srcarr0, const void* srcarr1, const void* srcarr2, + const void* srcarr3, void* dstarr ) +{ + const void* sptrs[] = { srcarr0, srcarr1, srcarr2, srcarr3 }; + cv::Mat dst = cv::cvarrToMat(dstarr); + int i, j, nz = 0; + for( i = 0; i < 4; i++ ) + nz += sptrs[i] != 0; + CV_Assert( nz > 0 ); + cv::vector svec(nz); + cv::vector pairs(nz*2); + + for( i = j = 0; i < 4; i++ ) + { + if( sptrs[i] != 0 ) + { + svec[j] = cv::cvarrToMat(sptrs[i]); + CV_Assert( svec[j].size == dst.size && + svec[j].depth() == dst.depth() && + svec[j].channels() == 1 && i < dst.channels() ); + pairs[j*2] = j; + pairs[j*2+1] = i; + j++; + } + } + + if( nz == dst.channels() ) + cv::merge( svec, dst ); + else + { + cv::mixChannels( &svec[0], nz, &dst, 1, &pairs[0], nz ); + } +} + + +CV_IMPL void +cvMixChannels( const CvArr** src, int src_count, + CvArr** dst, int dst_count, + const int* from_to, int pair_count ) +{ + cv::AutoBuffer buf(src_count + dst_count); + + int i; + for( i = 0; i < src_count; i++ ) + buf[i] = cv::cvarrToMat(src[i]); + for( i = 0; i < dst_count; i++ ) + buf[i+src_count] = cv::cvarrToMat(dst[i]); + cv::mixChannels(&buf[0], src_count, &buf[src_count], dst_count, from_to, pair_count); +} + +CV_IMPL void +cvConvertScaleAbs( const void* srcarr, void* dstarr, + double scale, double shift ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + CV_Assert( src.size == dst.size && dst.type() == CV_8UC(src.channels())); + cv::convertScaleAbs( src, dst, scale, shift ); +} + +CV_IMPL void +cvConvertScale( const void* srcarr, void* dstarr, + double scale, double shift ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + + CV_Assert( src.size == dst.size && src.channels() == dst.channels() ); + src.convertTo(dst, dst.type(), scale, shift); +} + +CV_IMPL void cvLUT( const void* srcarr, void* dstarr, const void* lutarr ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), lut = cv::cvarrToMat(lutarr); + + CV_Assert( dst.size() == src.size() && dst.type() == CV_MAKETYPE(lut.depth(), src.channels()) ); + cv::LUT( src, lut, dst ); +} + +CV_IMPL void cvNormalize( const CvArr* srcarr, CvArr* dstarr, + double a, double b, int norm_type, const CvArr* maskarr ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask; + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + CV_Assert( dst.size() == src.size() && src.channels() == dst.channels() ); + cv::normalize( src, dst, a, b, norm_type, dst.type(), mask ); +} + +/* End of file. */ diff --git a/opencv/core/copy.cpp b/opencv/core/copy.cpp new file mode 100644 index 0000000..43f601c --- /dev/null +++ b/opencv/core/copy.cpp @@ -0,0 +1,568 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* //////////////////////////////////////////////////////////////////// +// +// Mat basic operations: Copy, Set +// +// */ + +#include "precomp.hpp" + +namespace cv +{ + +template static void +copyMask_(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size) +{ + for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep ) + { + const T* src = (const T*)_src; + T* dst = (T*)_dst; + int x = 0; + for( ; x <= size.width - 4; x += 4 ) + { + if( mask[x] ) + dst[x] = src[x]; + if( mask[x+1] ) + dst[x+1] = src[x+1]; + if( mask[x+2] ) + dst[x+2] = src[x+2]; + if( mask[x+3] ) + dst[x+3] = src[x+3]; + } + for( ; x < size.width; x++ ) + if( mask[x] ) + dst[x] = src[x]; + } +} + +static void +copyMaskGeneric(const uchar* _src, size_t sstep, const uchar* mask, size_t mstep, uchar* _dst, size_t dstep, Size size, void* _esz) +{ + size_t k, esz = *(size_t*)_esz; + for( ; size.height--; mask += mstep, _src += sstep, _dst += dstep ) + { + const uchar* src = _src; + uchar* dst = _dst; + int x = 0; + for( ; x < size.width; x++, src += esz, dst += esz ) + { + if( !mask[x] ) + continue; + for( k = 0; k < esz; k++ ) + dst[k] = src[k]; + } + } +} + + +#define DEF_COPY_MASK(suffix, type) \ +static void copyMask##suffix(const uchar* src, size_t sstep, const uchar* mask, size_t mstep, \ + uchar* dst, size_t dstep, Size size, void*) \ +{ \ + copyMask_(src, sstep, mask, mstep, dst, dstep, size); \ +} + + +DEF_COPY_MASK(8u, uchar); +DEF_COPY_MASK(16u, ushort); +DEF_COPY_MASK(8uC3, Vec3b); +DEF_COPY_MASK(32s, int); +DEF_COPY_MASK(16uC3, Vec3s); +DEF_COPY_MASK(32sC2, Vec2i); +DEF_COPY_MASK(32sC3, Vec3i); +DEF_COPY_MASK(32sC4, Vec4i); +DEF_COPY_MASK(32sC6, Vec6i); +DEF_COPY_MASK(32sC8, Vec8i); + +BinaryFunc copyMaskTab[] = +{ + 0, + copyMask8u, + copyMask16u, + copyMask8uC3, + copyMask32s, + 0, + copyMask16uC3, + 0, + copyMask32sC2, + 0, 0, 0, + copyMask32sC3, + 0, 0, 0, + copyMask32sC4, + 0, 0, 0, 0, 0, 0, 0, + copyMask32sC6, + 0, 0, 0, 0, 0, 0, 0, + copyMask32sC8 +}; + +BinaryFunc getCopyMaskFunc(size_t esz) +{ + return esz <= 32 && copyMaskTab[esz] ? copyMaskTab[esz] : copyMaskGeneric; +} + +/* dst = src */ +void Mat::copyTo( OutputArray _dst ) const +{ + int dtype = _dst.type(); + if( _dst.fixedType() && dtype != type() ) + { + convertTo( _dst, dtype ); + return; + } + + if( empty() ) + { + _dst.release(); + return; + } + + if( dims <= 2 ) + { + _dst.create( rows, cols, type() ); + Mat dst = _dst.getMat(); + if( data == dst.data ) + return; + + if( rows > 0 && cols > 0 ) + { + const uchar* sptr = data; + uchar* dptr = dst.data; + + // to handle the copying 1xn matrix => nx1 std vector. + Size sz = size() == dst.size() ? + getContinuousSize(*this, dst) : + getContinuousSize(*this); + size_t len = sz.width*elemSize(); + + for( ; sz.height--; sptr += step, dptr += dst.step ) + memcpy( dptr, sptr, len ); + } + return; + } + + _dst.create( dims, size, type() ); + Mat dst = _dst.getMat(); + if( data == dst.data ) + return; + + if( total() != 0 ) + { + const Mat* arrays[] = { this, &dst }; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs, 2); + size_t size = it.size*elemSize(); + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + memcpy(ptrs[1], ptrs[0], size); + } +} + +void Mat::copyTo( OutputArray _dst, InputArray _mask ) const +{ + Mat mask = _mask.getMat(); + if( !mask.data ) + { + copyTo(_dst); + return; + } + + int cn = channels(), mcn = mask.channels(); + CV_Assert( mask.depth() == CV_8U && (mcn == 1 || mcn == cn) ); + bool colorMask = mcn > 1; + + size_t esz = colorMask ? elemSize1() : elemSize(); + BinaryFunc copymask = getCopyMaskFunc(esz); + + uchar* data0 = _dst.getMat().data; + _dst.create( dims, size, type() ); + Mat dst = _dst.getMat(); + + if( dst.data != data0 ) // do not leave dst uninitialized + dst = Scalar(0); + + if( dims <= 2 ) + { + Size sz = getContinuousSize(*this, dst, mask, mcn); + copymask(data, step, mask.data, mask.step, dst.data, dst.step, sz, &esz); + return; + } + + const Mat* arrays[] = { this, &dst, &mask, 0 }; + uchar* ptrs[3]; + NAryMatIterator it(arrays, ptrs); + Size sz((int)(it.size*mcn), 1); + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + copymask(ptrs[0], 0, ptrs[2], 0, ptrs[1], 0, sz, &esz); +} + +Mat& Mat::operator = (const Scalar& s) +{ + const Mat* arrays[] = { this }; + uchar* ptr; + NAryMatIterator it(arrays, &ptr, 1); + size_t size = it.size*elemSize(); + + if( s[0] == 0 && s[1] == 0 && s[2] == 0 && s[3] == 0 ) + { + for( size_t i = 0; i < it.nplanes; i++, ++it ) + memset( ptr, 0, size ); + } + else + { + if( it.nplanes > 0 ) + { + double scalar[12]; + scalarToRawData(s, scalar, type(), 12); + size_t blockSize = 12*elemSize1(); + + for( size_t j = 0; j < size; j += blockSize ) + { + size_t sz = MIN(blockSize, size - j); + memcpy( ptr + j, scalar, sz ); + } + } + + for( size_t i = 1; i < it.nplanes; i++ ) + { + ++it; + memcpy( ptr, data, size ); + } + } + return *this; +} + + +Mat& Mat::setTo(InputArray _value, InputArray _mask) +{ + if( !data ) + return *this; + + Mat value = _value.getMat(), mask = _mask.getMat(); + + CV_Assert( checkScalar(value, type(), _value.kind(), _InputArray::MAT )); + CV_Assert( mask.empty() || mask.type() == CV_8U ); + + size_t esz = elemSize(); + BinaryFunc copymask = getCopyMaskFunc(esz); + + const Mat* arrays[] = { this, !mask.empty() ? &mask : 0, 0 }; + uchar* ptrs[2]={0,0}; + NAryMatIterator it(arrays, ptrs); + int total = (int)it.size, blockSize0 = std::min(total, (int)((BLOCK_SIZE + esz-1)/esz)); + AutoBuffer _scbuf(blockSize0*esz + 32); + uchar* scbuf = alignPtr((uchar*)_scbuf, (int)sizeof(double)); + convertAndUnrollScalar( value, type(), scbuf, blockSize0 ); + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( int j = 0; j < total; j += blockSize0 ) + { + Size sz(std::min(blockSize0, total - j), 1); + size_t blockSize = sz.width*esz; + if( ptrs[1] ) + { + copymask(scbuf, 0, ptrs[1], 0, ptrs[0], 0, sz, &esz); + ptrs[1] += sz.width; + } + else + memcpy(ptrs[0], scbuf, blockSize); + ptrs[0] += blockSize; + } + } + return *this; +} + + +static void +flipHoriz( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size size, size_t esz ) +{ + int i, j, limit = (int)(((size.width + 1)/2)*esz); + AutoBuffer _tab(size.width*esz); + int* tab = _tab; + + for( i = 0; i < size.width; i++ ) + for( size_t k = 0; k < esz; k++ ) + tab[i*esz + k] = (int)((size.width - i - 1)*esz + k); + + for( ; size.height--; src += sstep, dst += dstep ) + { + for( i = 0; i < limit; i++ ) + { + j = tab[i]; + uchar t0 = src[i], t1 = src[j]; + dst[i] = t1; dst[j] = t0; + } + } +} + +static void +flipVert( const uchar* src0, size_t sstep, uchar* dst0, size_t dstep, Size size, size_t esz ) +{ + const uchar* src1 = src0 + (size.height - 1)*sstep; + uchar* dst1 = dst0 + (size.height - 1)*dstep; + size.width *= (int)esz; + + for( int y = 0; y < (size.height + 1)/2; y++, src0 += sstep, src1 -= sstep, + dst0 += dstep, dst1 -= dstep ) + { + int i = 0; + if( ((size_t)src0|(size_t)dst0|(size_t)src1|(size_t)dst1) % sizeof(int) == 0 ) + { + for( ; i <= size.width - 16; i += 16 ) + { + int t0 = ((int*)(src0 + i))[0]; + int t1 = ((int*)(src1 + i))[0]; + + ((int*)(dst0 + i))[0] = t1; + ((int*)(dst1 + i))[0] = t0; + + t0 = ((int*)(src0 + i))[1]; + t1 = ((int*)(src1 + i))[1]; + + ((int*)(dst0 + i))[1] = t1; + ((int*)(dst1 + i))[1] = t0; + + t0 = ((int*)(src0 + i))[2]; + t1 = ((int*)(src1 + i))[2]; + + ((int*)(dst0 + i))[2] = t1; + ((int*)(dst1 + i))[2] = t0; + + t0 = ((int*)(src0 + i))[3]; + t1 = ((int*)(src1 + i))[3]; + + ((int*)(dst0 + i))[3] = t1; + ((int*)(dst1 + i))[3] = t0; + } + + for( ; i <= size.width - 4; i += 4 ) + { + int t0 = ((int*)(src0 + i))[0]; + int t1 = ((int*)(src1 + i))[0]; + + ((int*)(dst0 + i))[0] = t1; + ((int*)(dst1 + i))[0] = t0; + } + } + + for( ; i < size.width; i++ ) + { + uchar t0 = src0[i]; + uchar t1 = src1[i]; + + dst0[i] = t1; + dst1[i] = t0; + } + } +} + +void flip( InputArray _src, OutputArray _dst, int flip_mode ) +{ + Mat src = _src.getMat(); + + CV_Assert( src.dims <= 2 ); + _dst.create( src.size(), src.type() ); + Mat dst = _dst.getMat(); + size_t esz = src.elemSize(); + + if( flip_mode <= 0 ) + flipVert( src.data, src.step, dst.data, dst.step, src.size(), esz ); + else + flipHoriz( src.data, src.step, dst.data, dst.step, src.size(), esz ); + + if( flip_mode < 0 ) + flipHoriz( dst.data, dst.step, dst.data, dst.step, dst.size(), esz ); +} + + +void repeat(InputArray _src, int ny, int nx, OutputArray _dst) +{ + Mat src = _src.getMat(); + CV_Assert( src.dims <= 2 ); + + _dst.create(src.rows*ny, src.cols*nx, src.type()); + Mat dst = _dst.getMat(); + Size ssize = src.size(), dsize = dst.size(); + int esz = (int)src.elemSize(); + int x, y; + ssize.width *= esz; dsize.width *= esz; + + for( y = 0; y < ssize.height; y++ ) + { + for( x = 0; x < dsize.width; x += ssize.width ) + memcpy( dst.data + y*dst.step + x, src.data + y*src.step, ssize.width ); + } + + for( ; y < dsize.height; y++ ) + memcpy( dst.data + y*dst.step, dst.data + (y - ssize.height)*dst.step, dsize.width ); +} + +Mat repeat(const Mat& src, int ny, int nx) +{ + if( nx == 1 && ny == 1 ) + return src; + Mat dst; + repeat(src, ny, nx, dst); + return dst; +} + +} + +/* dst = src */ +CV_IMPL void +cvCopy( const void* srcarr, void* dstarr, const void* maskarr ) +{ + if( CV_IS_SPARSE_MAT(srcarr) && CV_IS_SPARSE_MAT(dstarr)) + { + CV_Assert( maskarr == 0 ); + CvSparseMat* src1 = (CvSparseMat*)srcarr; + CvSparseMat* dst1 = (CvSparseMat*)dstarr; + CvSparseMatIterator iterator; + CvSparseNode* node; + + dst1->dims = src1->dims; + memcpy( dst1->size, src1->size, src1->dims*sizeof(src1->size[0])); + dst1->valoffset = src1->valoffset; + dst1->idxoffset = src1->idxoffset; + cvClearSet( dst1->heap ); + + if( src1->heap->active_count >= dst1->hashsize*CV_SPARSE_HASH_RATIO ) + { + cvFree( &dst1->hashtable ); + dst1->hashsize = src1->hashsize; + dst1->hashtable = + (void**)cvAlloc( dst1->hashsize*sizeof(dst1->hashtable[0])); + } + + memset( dst1->hashtable, 0, dst1->hashsize*sizeof(dst1->hashtable[0])); + + for( node = cvInitSparseMatIterator( src1, &iterator ); + node != 0; node = cvGetNextSparseNode( &iterator )) + { + CvSparseNode* node_copy = (CvSparseNode*)cvSetNew( dst1->heap ); + int tabidx = node->hashval & (dst1->hashsize - 1); + memcpy( node_copy, node, dst1->heap->elem_size ); + node_copy->next = (CvSparseNode*)dst1->hashtable[tabidx]; + dst1->hashtable[tabidx] = node_copy; + } + return; + } + cv::Mat src = cv::cvarrToMat(srcarr, false, true, 1), dst = cv::cvarrToMat(dstarr, false, true, 1); + CV_Assert( src.depth() == dst.depth() && src.size == dst.size ); + + int coi1 = 0, coi2 = 0; + if( CV_IS_IMAGE(srcarr) ) + coi1 = cvGetImageCOI((const IplImage*)srcarr); + if( CV_IS_IMAGE(dstarr) ) + coi2 = cvGetImageCOI((const IplImage*)dstarr); + + if( coi1 || coi2 ) + { + CV_Assert( (coi1 != 0 || src.channels() == 1) && + (coi2 != 0 || dst.channels() == 1) ); + + int pair[] = { std::max(coi1-1, 0), std::max(coi2-1, 0) }; + cv::mixChannels( &src, 1, &dst, 1, pair, 1 ); + return; + } + else + CV_Assert( src.channels() == dst.channels() ); + + if( !maskarr ) + src.copyTo(dst); + else + src.copyTo(dst, cv::cvarrToMat(maskarr)); +} + +CV_IMPL void +cvSet( void* arr, CvScalar value, const void* maskarr ) +{ + cv::Mat m = cv::cvarrToMat(arr); + if( !maskarr ) + m = value; + else + m.setTo(cv::Scalar(value), cv::cvarrToMat(maskarr)); +} + +CV_IMPL void +cvSetZero( CvArr* arr ) +{ + if( CV_IS_SPARSE_MAT(arr) ) + { + CvSparseMat* mat1 = (CvSparseMat*)arr; + cvClearSet( mat1->heap ); + if( mat1->hashtable ) + memset( mat1->hashtable, 0, mat1->hashsize*sizeof(mat1->hashtable[0])); + return; + } + cv::Mat m = cv::cvarrToMat(arr); + m = cv::Scalar(0); +} + +CV_IMPL void +cvFlip( const CvArr* srcarr, CvArr* dstarr, int flip_mode ) +{ + cv::Mat src = cv::cvarrToMat(srcarr); + cv::Mat dst; + + if (!dstarr) + dst = src; + else + dst = cv::cvarrToMat(dstarr); + + CV_Assert( src.type() == dst.type() && src.size() == dst.size() ); + cv::flip( src, dst, flip_mode ); +} + +CV_IMPL void +cvRepeat( const CvArr* srcarr, CvArr* dstarr ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + CV_Assert( src.type() == dst.type() && + dst.rows % src.rows == 0 && dst.cols % src.cols == 0 ); + cv::repeat(src, dst.rows/src.rows, dst.cols/src.cols, dst); +} + +/* End of file. */ diff --git a/opencv/core/datastructs.cpp b/opencv/core/datastructs.cpp new file mode 100644 index 0000000..59a4829 --- /dev/null +++ b/opencv/core/datastructs.cpp @@ -0,0 +1,4064 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" + +#define ICV_FREE_PTR(storage) \ + ((schar*)(storage)->top + (storage)->block_size - (storage)->free_space) + +#define ICV_ALIGNED_SEQ_BLOCK_SIZE \ + (int)cvAlign(sizeof(CvSeqBlock), CV_STRUCT_ALIGN) + +CV_INLINE int +cvAlignLeft( int size, int align ) +{ + return size & -align; +} + +#define CV_GET_LAST_ELEM( seq, block ) \ + ((block)->data + ((block)->count - 1)*((seq)->elem_size)) + +#define CV_SWAP_ELEMS(a,b,elem_size) \ +{ \ + int k; \ + for( k = 0; k < elem_size; k++ ) \ + { \ + char t0 = (a)[k]; \ + char t1 = (b)[k]; \ + (a)[k] = t1; \ + (b)[k] = t0; \ + } \ +} + +#define ICV_SHIFT_TAB_MAX 32 +static const schar icvPower2ShiftTab[] = +{ + 0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5 +}; + +/****************************************************************************************\ +* Functions for manipulating memory storage - list of memory blocks * +\****************************************************************************************/ + +/* Initialize allocated storage: */ +static void +icvInitMemStorage( CvMemStorage* storage, int block_size ) +{ + if( !storage ) + CV_Error( CV_StsNullPtr, "" ); + + if( block_size <= 0 ) + block_size = CV_STORAGE_BLOCK_SIZE; + + block_size = cvAlign( block_size, CV_STRUCT_ALIGN ); + assert( sizeof(CvMemBlock) % CV_STRUCT_ALIGN == 0 ); + + memset( storage, 0, sizeof( *storage )); + storage->signature = CV_STORAGE_MAGIC_VAL; + storage->block_size = block_size; +} + + +/* Create root memory storage: */ +CV_IMPL CvMemStorage* +cvCreateMemStorage( int block_size ) +{ + CvMemStorage* storage = (CvMemStorage *)cvAlloc( sizeof( CvMemStorage )); + icvInitMemStorage( storage, block_size ); + return storage; +} + + +/* Create child memory storage: */ +CV_IMPL CvMemStorage * +cvCreateChildMemStorage( CvMemStorage * parent ) +{ + if( !parent ) + CV_Error( CV_StsNullPtr, "" ); + + CvMemStorage* storage = cvCreateMemStorage(parent->block_size); + storage->parent = parent; + + return storage; +} + + +/* Release all blocks of the storage (or return them to parent, if any): */ +static void +icvDestroyMemStorage( CvMemStorage* storage ) +{ + int k = 0; + + CvMemBlock *block; + CvMemBlock *dst_top = 0; + + if( !storage ) + CV_Error( CV_StsNullPtr, "" ); + + if( storage->parent ) + dst_top = storage->parent->top; + + for( block = storage->bottom; block != 0; k++ ) + { + CvMemBlock *temp = block; + + block = block->next; + if( storage->parent ) + { + if( dst_top ) + { + temp->prev = dst_top; + temp->next = dst_top->next; + if( temp->next ) + temp->next->prev = temp; + dst_top = dst_top->next = temp; + } + else + { + dst_top = storage->parent->bottom = storage->parent->top = temp; + temp->prev = temp->next = 0; + storage->free_space = storage->block_size - sizeof( *temp ); + } + } + else + { + cvFree( &temp ); + } + } + + storage->top = storage->bottom = 0; + storage->free_space = 0; +} + + +/* Release memory storage: */ +CV_IMPL void +cvReleaseMemStorage( CvMemStorage** storage ) +{ + if( !storage ) + CV_Error( CV_StsNullPtr, "" ); + + CvMemStorage* st = *storage; + *storage = 0; + if( st ) + { + icvDestroyMemStorage( st ); + cvFree( &st ); + } +} + + +/* Clears memory storage (return blocks to the parent, if any): */ +CV_IMPL void +cvClearMemStorage( CvMemStorage * storage ) +{ + if( !storage ) + CV_Error( CV_StsNullPtr, "" ); + + if( storage->parent ) + icvDestroyMemStorage( storage ); + else + { + storage->top = storage->bottom; + storage->free_space = storage->bottom ? storage->block_size - sizeof(CvMemBlock) : 0; + } +} + + +/* Moves stack pointer to next block. + If no blocks, allocate new one and link it to the storage: */ +static void +icvGoNextMemBlock( CvMemStorage * storage ) +{ + if( !storage ) + CV_Error( CV_StsNullPtr, "" ); + + if( !storage->top || !storage->top->next ) + { + CvMemBlock *block; + + if( !(storage->parent) ) + { + block = (CvMemBlock *)cvAlloc( storage->block_size ); + } + else + { + CvMemStorage *parent = storage->parent; + CvMemStoragePos parent_pos; + + cvSaveMemStoragePos( parent, &parent_pos ); + icvGoNextMemBlock( parent ); + + block = parent->top; + cvRestoreMemStoragePos( parent, &parent_pos ); + + if( block == parent->top ) /* the single allocated block */ + { + assert( parent->bottom == block ); + parent->top = parent->bottom = 0; + parent->free_space = 0; + } + else + { + /* cut the block from the parent's list of blocks */ + parent->top->next = block->next; + if( block->next ) + block->next->prev = parent->top; + } + } + + /* link block */ + block->next = 0; + block->prev = storage->top; + + if( storage->top ) + storage->top->next = block; + else + storage->top = storage->bottom = block; + } + + if( storage->top->next ) + storage->top = storage->top->next; + storage->free_space = storage->block_size - sizeof(CvMemBlock); + assert( storage->free_space % CV_STRUCT_ALIGN == 0 ); +} + + +/* Remember memory storage position: */ +CV_IMPL void +cvSaveMemStoragePos( const CvMemStorage * storage, CvMemStoragePos * pos ) +{ + if( !storage || !pos ) + CV_Error( CV_StsNullPtr, "" ); + + pos->top = storage->top; + pos->free_space = storage->free_space; +} + + +/* Restore memory storage position: */ +CV_IMPL void +cvRestoreMemStoragePos( CvMemStorage * storage, CvMemStoragePos * pos ) +{ + if( !storage || !pos ) + CV_Error( CV_StsNullPtr, "" ); + if( pos->free_space > storage->block_size ) + CV_Error( CV_StsBadSize, "" ); + + /* + // this breaks icvGoNextMemBlock, so comment it off for now + if( storage->parent && (!pos->top || pos->top->next) ) + { + CvMemBlock* save_bottom; + if( !pos->top ) + save_bottom = 0; + else + { + save_bottom = storage->bottom; + storage->bottom = pos->top->next; + pos->top->next = 0; + storage->bottom->prev = 0; + } + icvDestroyMemStorage( storage ); + storage->bottom = save_bottom; + }*/ + + storage->top = pos->top; + storage->free_space = pos->free_space; + + if( !storage->top ) + { + storage->top = storage->bottom; + storage->free_space = storage->top ? storage->block_size - sizeof(CvMemBlock) : 0; + } +} + + +/* Allocate continuous buffer of the specified size in the storage: */ +CV_IMPL void* +cvMemStorageAlloc( CvMemStorage* storage, size_t size ) +{ + schar *ptr = 0; + if( !storage ) + CV_Error( CV_StsNullPtr, "NULL storage pointer" ); + + if( size > INT_MAX ) + CV_Error( CV_StsOutOfRange, "Too large memory block is requested" ); + + assert( storage->free_space % CV_STRUCT_ALIGN == 0 ); + + if( (size_t)storage->free_space < size ) + { + size_t max_free_space = cvAlignLeft(storage->block_size - sizeof(CvMemBlock), CV_STRUCT_ALIGN); + if( max_free_space < size ) + CV_Error( CV_StsOutOfRange, "requested size is negative or too big" ); + + icvGoNextMemBlock( storage ); + } + + ptr = ICV_FREE_PTR(storage); + assert( (size_t)ptr % CV_STRUCT_ALIGN == 0 ); + storage->free_space = cvAlignLeft(storage->free_space - (int)size, CV_STRUCT_ALIGN ); + + return ptr; +} + + +CV_IMPL CvString +cvMemStorageAllocString( CvMemStorage* storage, const char* ptr, int len ) +{ + CvString str; + + str.len = len >= 0 ? len : (int)strlen(ptr); + str.ptr = (char*)cvMemStorageAlloc( storage, str.len + 1 ); + memcpy( str.ptr, ptr, str.len ); + str.ptr[str.len] = '\0'; + + return str; +} + + +/****************************************************************************************\ +* Sequence implementation * +\****************************************************************************************/ + +/* Create empty sequence: */ +CV_IMPL CvSeq * +cvCreateSeq( int seq_flags, int header_size, int elem_size, CvMemStorage * storage ) +{ + CvSeq *seq = 0; + + if( !storage ) + CV_Error( CV_StsNullPtr, "" ); + if( header_size < (int)sizeof( CvSeq ) || elem_size <= 0 ) + CV_Error( CV_StsBadSize, "" ); + + /* allocate sequence header */ + seq = (CvSeq*)cvMemStorageAlloc( storage, header_size ); + memset( seq, 0, header_size ); + + seq->header_size = header_size; + seq->flags = (seq_flags & ~CV_MAGIC_MASK) | CV_SEQ_MAGIC_VAL; + { + int elemtype = CV_MAT_TYPE(seq_flags); + int typesize = CV_ELEM_SIZE(elemtype); + + if( elemtype != CV_SEQ_ELTYPE_GENERIC && elemtype != CV_USRTYPE1 && + typesize != 0 && typesize != elem_size ) + CV_Error( CV_StsBadSize, + "Specified element size doesn't match to the size of the specified element type " + "(try to use 0 for element type)" ); + } + seq->elem_size = elem_size; + seq->storage = storage; + + cvSetSeqBlockSize( seq, (1 << 10)/elem_size ); + + return seq; +} + + +/* adjusts field of sequence. It determines how much the sequence + grows if there are no free space inside the sequence buffers */ +CV_IMPL void +cvSetSeqBlockSize( CvSeq *seq, int delta_elements ) +{ + int elem_size; + int useful_block_size; + + if( !seq || !seq->storage ) + CV_Error( CV_StsNullPtr, "" ); + if( delta_elements < 0 ) + CV_Error( CV_StsOutOfRange, "" ); + + useful_block_size = cvAlignLeft(seq->storage->block_size - sizeof(CvMemBlock) - + sizeof(CvSeqBlock), CV_STRUCT_ALIGN); + elem_size = seq->elem_size; + + if( delta_elements == 0 ) + { + delta_elements = (1 << 10) / elem_size; + delta_elements = MAX( delta_elements, 1 ); + } + if( delta_elements * elem_size > useful_block_size ) + { + delta_elements = useful_block_size / elem_size; + if( delta_elements == 0 ) + CV_Error( CV_StsOutOfRange, "Storage block size is too small " + "to fit the sequence elements" ); + } + + seq->delta_elems = delta_elements; +} + + +/* Find a sequence element by its index: */ +CV_IMPL schar* +cvGetSeqElem( const CvSeq *seq, int index ) +{ + CvSeqBlock *block; + int count, total = seq->total; + + if( (unsigned)index >= (unsigned)total ) + { + index += index < 0 ? total : 0; + index -= index >= total ? total : 0; + if( (unsigned)index >= (unsigned)total ) + return 0; + } + + block = seq->first; + if( index + index <= total ) + { + while( index >= (count = block->count) ) + { + block = block->next; + index -= count; + } + } + else + { + do + { + block = block->prev; + total -= block->count; + } + while( index < total ); + index -= total; + } + + return block->data + index * seq->elem_size; +} + + +/* Calculate index of a sequence element: */ +CV_IMPL int +cvSeqElemIdx( const CvSeq* seq, const void* _element, CvSeqBlock** _block ) +{ + const schar *element = (const schar *)_element; + int elem_size; + int id = -1; + CvSeqBlock *first_block; + CvSeqBlock *block; + + if( !seq || !element ) + CV_Error( CV_StsNullPtr, "" ); + + block = first_block = seq->first; + elem_size = seq->elem_size; + + for( ;; ) + { + if( (unsigned)(element - block->data) < (unsigned) (block->count * elem_size) ) + { + if( _block ) + *_block = block; + if( elem_size <= ICV_SHIFT_TAB_MAX && (id = icvPower2ShiftTab[elem_size - 1]) >= 0 ) + id = (int)((size_t)(element - block->data) >> id); + else + id = (int)((size_t)(element - block->data) / elem_size); + id += block->start_index - seq->first->start_index; + break; + } + block = block->next; + if( block == first_block ) + break; + } + + return id; +} + + +CV_IMPL int +cvSliceLength( CvSlice slice, const CvSeq* seq ) +{ + int total = seq->total; + int length = slice.end_index - slice.start_index; + + if( length != 0 ) + { + if( slice.start_index < 0 ) + slice.start_index += total; + if( slice.end_index <= 0 ) + slice.end_index += total; + + length = slice.end_index - slice.start_index; + } + + if( length < 0 ) + { + length += total; + /*if( length < 0 ) + length += total;*/ + } + else if( length > total ) + length = total; + + return length; +} + + +/* Copy all sequence elements into single continuous array: */ +CV_IMPL void* +cvCvtSeqToArray( const CvSeq *seq, void *array, CvSlice slice ) +{ + int elem_size, total; + CvSeqReader reader; + char *dst = (char*)array; + + if( !seq || !array ) + CV_Error( CV_StsNullPtr, "" ); + + elem_size = seq->elem_size; + total = cvSliceLength( slice, seq )*elem_size; + + if( total == 0 ) + return 0; + + cvStartReadSeq( seq, &reader, 0 ); + cvSetSeqReaderPos( &reader, slice.start_index, 0 ); + + do + { + int count = (int)(reader.block_max - reader.ptr); + if( count > total ) + count = total; + + memcpy( dst, reader.ptr, count ); + dst += count; + reader.block = reader.block->next; + reader.ptr = reader.block->data; + reader.block_max = reader.ptr + reader.block->count*elem_size; + total -= count; + } + while( total > 0 ); + + return array; +} + + +/* Construct a sequence from an array without copying any data. + NB: The resultant sequence cannot grow beyond its initial size: */ +CV_IMPL CvSeq* +cvMakeSeqHeaderForArray( int seq_flags, int header_size, int elem_size, + void *array, int total, CvSeq *seq, CvSeqBlock * block ) +{ + CvSeq* result = 0; + + if( elem_size <= 0 || header_size < (int)sizeof( CvSeq ) || total < 0 ) + CV_Error( CV_StsBadSize, "" ); + + if( !seq || ((!array || !block) && total > 0) ) + CV_Error( CV_StsNullPtr, "" ); + + memset( seq, 0, header_size ); + + seq->header_size = header_size; + seq->flags = (seq_flags & ~CV_MAGIC_MASK) | CV_SEQ_MAGIC_VAL; + { + int elemtype = CV_MAT_TYPE(seq_flags); + int typesize = CV_ELEM_SIZE(elemtype); + + if( elemtype != CV_SEQ_ELTYPE_GENERIC && + typesize != 0 && typesize != elem_size ) + CV_Error( CV_StsBadSize, + "Element size doesn't match to the size of predefined element type " + "(try to use 0 for sequence element type)" ); + } + seq->elem_size = elem_size; + seq->total = total; + seq->block_max = seq->ptr = (schar *) array + total * elem_size; + + if( total > 0 ) + { + seq->first = block; + block->prev = block->next = block; + block->start_index = 0; + block->count = total; + block->data = (schar *) array; + } + + result = seq; + + return result; +} + + +/* The function allocates space for at least one more sequence element. + If there are free sequence blocks (seq->free_blocks != 0) + they are reused, otherwise the space is allocated in the storage: */ +static void +icvGrowSeq( CvSeq *seq, int in_front_of ) +{ + CvSeqBlock *block; + + if( !seq ) + CV_Error( CV_StsNullPtr, "" ); + block = seq->free_blocks; + + if( !block ) + { + int elem_size = seq->elem_size; + int delta_elems = seq->delta_elems; + CvMemStorage *storage = seq->storage; + + if( seq->total >= delta_elems*4 ) + cvSetSeqBlockSize( seq, delta_elems*2 ); + + if( !storage ) + CV_Error( CV_StsNullPtr, "The sequence has NULL storage pointer" ); + + /* If there is a free space just after last allocated block + and it is big enough then enlarge the last block. + This can happen only if the new block is added to the end of sequence: */ + if( (unsigned)(ICV_FREE_PTR(storage) - seq->block_max) < CV_STRUCT_ALIGN && + storage->free_space >= seq->elem_size && !in_front_of ) + { + int delta = storage->free_space / elem_size; + + delta = MIN( delta, delta_elems ) * elem_size; + seq->block_max += delta; + storage->free_space = cvAlignLeft((int)(((schar*)storage->top + storage->block_size) - + seq->block_max), CV_STRUCT_ALIGN ); + return; + } + else + { + int delta = elem_size * delta_elems + ICV_ALIGNED_SEQ_BLOCK_SIZE; + + /* Try to allocate elements: */ + if( storage->free_space < delta ) + { + int small_block_size = MAX(1, delta_elems/3)*elem_size + + ICV_ALIGNED_SEQ_BLOCK_SIZE; + /* try to allocate smaller part */ + if( storage->free_space >= small_block_size + CV_STRUCT_ALIGN ) + { + delta = (storage->free_space - ICV_ALIGNED_SEQ_BLOCK_SIZE)/seq->elem_size; + delta = delta*seq->elem_size + ICV_ALIGNED_SEQ_BLOCK_SIZE; + } + else + { + icvGoNextMemBlock( storage ); + assert( storage->free_space >= delta ); + } + } + + block = (CvSeqBlock*)cvMemStorageAlloc( storage, delta ); + block->data = (schar*)cvAlignPtr( block + 1, CV_STRUCT_ALIGN ); + block->count = delta - ICV_ALIGNED_SEQ_BLOCK_SIZE; + block->prev = block->next = 0; + } + } + else + { + seq->free_blocks = block->next; + } + + if( !(seq->first) ) + { + seq->first = block; + block->prev = block->next = block; + } + else + { + block->prev = seq->first->prev; + block->next = seq->first; + block->prev->next = block->next->prev = block; + } + + /* For free blocks the field means + * total number of bytes in the block. + * + * For used blocks it means current number + * of sequence elements in the block: + */ + assert( block->count % seq->elem_size == 0 && block->count > 0 ); + + if( !in_front_of ) + { + seq->ptr = block->data; + seq->block_max = block->data + block->count; + block->start_index = block == block->prev ? 0 : + block->prev->start_index + block->prev->count; + } + else + { + int delta = block->count / seq->elem_size; + block->data += block->count; + + if( block != block->prev ) + { + assert( seq->first->start_index == 0 ); + seq->first = block; + } + else + { + seq->block_max = seq->ptr = block->data; + } + + block->start_index = 0; + + for( ;; ) + { + block->start_index += delta; + block = block->next; + if( block == seq->first ) + break; + } + } + + block->count = 0; +} + +/* Recycle a sequence block: */ +static void +icvFreeSeqBlock( CvSeq *seq, int in_front_of ) +{ + CvSeqBlock *block = seq->first; + + assert( (in_front_of ? block : block->prev)->count == 0 ); + + if( block == block->prev ) /* single block case */ + { + block->count = (int)(seq->block_max - block->data) + block->start_index * seq->elem_size; + block->data = seq->block_max - block->count; + seq->first = 0; + seq->ptr = seq->block_max = 0; + seq->total = 0; + } + else + { + if( !in_front_of ) + { + block = block->prev; + assert( seq->ptr == block->data ); + + block->count = (int)(seq->block_max - seq->ptr); + seq->block_max = seq->ptr = block->prev->data + + block->prev->count * seq->elem_size; + } + else + { + int delta = block->start_index; + + block->count = delta * seq->elem_size; + block->data -= block->count; + + /* Update start indices of sequence blocks: */ + for( ;; ) + { + block->start_index -= delta; + block = block->next; + if( block == seq->first ) + break; + } + + seq->first = block->next; + } + + block->prev->next = block->next; + block->next->prev = block->prev; + } + + assert( block->count > 0 && block->count % seq->elem_size == 0 ); + block->next = seq->free_blocks; + seq->free_blocks = block; +} + + +/****************************************************************************************\ +* Sequence Writer implementation * +\****************************************************************************************/ + +/* Initialize sequence writer: */ +CV_IMPL void +cvStartAppendToSeq( CvSeq *seq, CvSeqWriter * writer ) +{ + if( !seq || !writer ) + CV_Error( CV_StsNullPtr, "" ); + + memset( writer, 0, sizeof( *writer )); + writer->header_size = sizeof( CvSeqWriter ); + + writer->seq = seq; + writer->block = seq->first ? seq->first->prev : 0; + writer->ptr = seq->ptr; + writer->block_max = seq->block_max; +} + + +/* Initialize sequence writer: */ +CV_IMPL void +cvStartWriteSeq( int seq_flags, int header_size, + int elem_size, CvMemStorage * storage, CvSeqWriter * writer ) +{ + if( !storage || !writer ) + CV_Error( CV_StsNullPtr, "" ); + + CvSeq* seq = cvCreateSeq( seq_flags, header_size, elem_size, storage ); + cvStartAppendToSeq( seq, writer ); +} + + +/* Update sequence header: */ +CV_IMPL void +cvFlushSeqWriter( CvSeqWriter * writer ) +{ + if( !writer ) + CV_Error( CV_StsNullPtr, "" ); + + CvSeq* seq = writer->seq; + seq->ptr = writer->ptr; + + if( writer->block ) + { + int total = 0; + CvSeqBlock *first_block = writer->seq->first; + CvSeqBlock *block = first_block; + + writer->block->count = (int)((writer->ptr - writer->block->data) / seq->elem_size); + assert( writer->block->count > 0 ); + + do + { + total += block->count; + block = block->next; + } + while( block != first_block ); + + writer->seq->total = total; + } +} + + +/* Calls icvFlushSeqWriter and finishes writing process: */ +CV_IMPL CvSeq * +cvEndWriteSeq( CvSeqWriter * writer ) +{ + if( !writer ) + CV_Error( CV_StsNullPtr, "" ); + + cvFlushSeqWriter( writer ); + CvSeq* seq = writer->seq; + + /* Truncate the last block: */ + if( writer->block && writer->seq->storage ) + { + CvMemStorage *storage = seq->storage; + schar *storage_block_max = (schar *) storage->top + storage->block_size; + + assert( writer->block->count > 0 ); + + if( (unsigned)((storage_block_max - storage->free_space) + - seq->block_max) < CV_STRUCT_ALIGN ) + { + storage->free_space = cvAlignLeft((int)(storage_block_max - seq->ptr), CV_STRUCT_ALIGN); + seq->block_max = seq->ptr; + } + } + + writer->ptr = 0; + return seq; +} + + +/* Create new sequence block: */ +CV_IMPL void +cvCreateSeqBlock( CvSeqWriter * writer ) +{ + if( !writer || !writer->seq ) + CV_Error( CV_StsNullPtr, "" ); + + CvSeq* seq = writer->seq; + + cvFlushSeqWriter( writer ); + + icvGrowSeq( seq, 0 ); + + writer->block = seq->first->prev; + writer->ptr = seq->ptr; + writer->block_max = seq->block_max; +} + + +/****************************************************************************************\ +* Sequence Reader implementation * +\****************************************************************************************/ + +/* Initialize sequence reader: */ +CV_IMPL void +cvStartReadSeq( const CvSeq *seq, CvSeqReader * reader, int reverse ) +{ + CvSeqBlock *first_block; + CvSeqBlock *last_block; + + if( reader ) + { + reader->seq = 0; + reader->block = 0; + reader->ptr = reader->block_max = reader->block_min = 0; + } + + if( !seq || !reader ) + CV_Error( CV_StsNullPtr, "" ); + + reader->header_size = sizeof( CvSeqReader ); + reader->seq = (CvSeq*)seq; + + first_block = seq->first; + + if( first_block ) + { + last_block = first_block->prev; + reader->ptr = first_block->data; + reader->prev_elem = CV_GET_LAST_ELEM( seq, last_block ); + reader->delta_index = seq->first->start_index; + + if( reverse ) + { + schar *temp = reader->ptr; + + reader->ptr = reader->prev_elem; + reader->prev_elem = temp; + + reader->block = last_block; + } + else + { + reader->block = first_block; + } + + reader->block_min = reader->block->data; + reader->block_max = reader->block_min + reader->block->count * seq->elem_size; + } + else + { + reader->delta_index = 0; + reader->block = 0; + + reader->ptr = reader->prev_elem = reader->block_min = reader->block_max = 0; + } +} + + +/* Change the current reading block + * to the previous or to the next: + */ +CV_IMPL void +cvChangeSeqBlock( void* _reader, int direction ) +{ + CvSeqReader* reader = (CvSeqReader*)_reader; + + if( !reader ) + CV_Error( CV_StsNullPtr, "" ); + + if( direction > 0 ) + { + reader->block = reader->block->next; + reader->ptr = reader->block->data; + } + else + { + reader->block = reader->block->prev; + reader->ptr = CV_GET_LAST_ELEM( reader->seq, reader->block ); + } + reader->block_min = reader->block->data; + reader->block_max = reader->block_min + reader->block->count * reader->seq->elem_size; +} + + +/* Return the current reader position: */ +CV_IMPL int +cvGetSeqReaderPos( CvSeqReader* reader ) +{ + int elem_size; + int index = -1; + + if( !reader || !reader->ptr ) + CV_Error( CV_StsNullPtr, "" ); + + elem_size = reader->seq->elem_size; + if( elem_size <= ICV_SHIFT_TAB_MAX && (index = icvPower2ShiftTab[elem_size - 1]) >= 0 ) + index = (int)((reader->ptr - reader->block_min) >> index); + else + index = (int)((reader->ptr - reader->block_min) / elem_size); + + index += reader->block->start_index - reader->delta_index; + + return index; +} + + +/* Set reader position to given position, + * either absolute or relative to the + * current one: + */ +CV_IMPL void +cvSetSeqReaderPos( CvSeqReader* reader, int index, int is_relative ) +{ + CvSeqBlock *block; + int elem_size, count, total; + + if( !reader || !reader->seq ) + CV_Error( CV_StsNullPtr, "" ); + + total = reader->seq->total; + elem_size = reader->seq->elem_size; + + if( !is_relative ) + { + if( index < 0 ) + { + if( index < -total ) + CV_Error( CV_StsOutOfRange, "" ); + index += total; + } + else if( index >= total ) + { + index -= total; + if( index >= total ) + CV_Error( CV_StsOutOfRange, "" ); + } + + block = reader->seq->first; + if( index >= (count = block->count) ) + { + if( index + index <= total ) + { + do + { + block = block->next; + index -= count; + } + while( index >= (count = block->count) ); + } + else + { + do + { + block = block->prev; + total -= block->count; + } + while( index < total ); + index -= total; + } + } + reader->ptr = block->data + index * elem_size; + if( reader->block != block ) + { + reader->block = block; + reader->block_min = block->data; + reader->block_max = block->data + block->count * elem_size; + } + } + else + { + schar* ptr = reader->ptr; + index *= elem_size; + block = reader->block; + + if( index > 0 ) + { + while( ptr + index >= reader->block_max ) + { + int delta = (int)(reader->block_max - ptr); + index -= delta; + reader->block = block = block->next; + reader->block_min = ptr = block->data; + reader->block_max = block->data + block->count*elem_size; + } + reader->ptr = ptr + index; + } + else + { + while( ptr + index < reader->block_min ) + { + int delta = (int)(ptr - reader->block_min); + index += delta; + reader->block = block = block->prev; + reader->block_min = block->data; + reader->block_max = ptr = block->data + block->count*elem_size; + } + reader->ptr = ptr + index; + } + } +} + + +/* Push element onto the sequence: */ +CV_IMPL schar* +cvSeqPush( CvSeq *seq, const void *element ) +{ + schar *ptr = 0; + size_t elem_size; + + if( !seq ) + CV_Error( CV_StsNullPtr, "" ); + + elem_size = seq->elem_size; + ptr = seq->ptr; + + if( ptr >= seq->block_max ) + { + icvGrowSeq( seq, 0 ); + + ptr = seq->ptr; + assert( ptr + elem_size <= seq->block_max /*&& ptr == seq->block_min */ ); + } + + if( element ) + memcpy( ptr, element, elem_size ); + seq->first->prev->count++; + seq->total++; + seq->ptr = ptr + elem_size; + + return ptr; +} + + +/* Pop last element off of the sequence: */ +CV_IMPL void +cvSeqPop( CvSeq *seq, void *element ) +{ + schar *ptr; + int elem_size; + + if( !seq ) + CV_Error( CV_StsNullPtr, "" ); + if( seq->total <= 0 ) + CV_Error( CV_StsBadSize, "" ); + + elem_size = seq->elem_size; + seq->ptr = ptr = seq->ptr - elem_size; + + if( element ) + memcpy( element, ptr, elem_size ); + seq->ptr = ptr; + seq->total--; + + if( --(seq->first->prev->count) == 0 ) + { + icvFreeSeqBlock( seq, 0 ); + assert( seq->ptr == seq->block_max ); + } +} + + +/* Push element onto the front of the sequence: */ +CV_IMPL schar* +cvSeqPushFront( CvSeq *seq, const void *element ) +{ + schar* ptr = 0; + int elem_size; + CvSeqBlock *block; + + if( !seq ) + CV_Error( CV_StsNullPtr, "" ); + + elem_size = seq->elem_size; + block = seq->first; + + if( !block || block->start_index == 0 ) + { + icvGrowSeq( seq, 1 ); + + block = seq->first; + assert( block->start_index > 0 ); + } + + ptr = block->data -= elem_size; + + if( element ) + memcpy( ptr, element, elem_size ); + block->count++; + block->start_index--; + seq->total++; + + return ptr; +} + + +/* Shift out first element of the sequence: */ +CV_IMPL void +cvSeqPopFront( CvSeq *seq, void *element ) +{ + int elem_size; + CvSeqBlock *block; + + if( !seq ) + CV_Error( CV_StsNullPtr, "" ); + if( seq->total <= 0 ) + CV_Error( CV_StsBadSize, "" ); + + elem_size = seq->elem_size; + block = seq->first; + + if( element ) + memcpy( element, block->data, elem_size ); + block->data += elem_size; + block->start_index++; + seq->total--; + + if( --(block->count) == 0 ) + icvFreeSeqBlock( seq, 1 ); +} + +/* Insert new element in middle of sequence: */ +CV_IMPL schar* +cvSeqInsert( CvSeq *seq, int before_index, const void *element ) +{ + int elem_size; + int block_size; + CvSeqBlock *block; + int delta_index; + int total; + schar* ret_ptr = 0; + + if( !seq ) + CV_Error( CV_StsNullPtr, "" ); + + total = seq->total; + before_index += before_index < 0 ? total : 0; + before_index -= before_index > total ? total : 0; + + if( (unsigned)before_index > (unsigned)total ) + CV_Error( CV_StsOutOfRange, "" ); + + if( before_index == total ) + { + ret_ptr = cvSeqPush( seq, element ); + } + else if( before_index == 0 ) + { + ret_ptr = cvSeqPushFront( seq, element ); + } + else + { + elem_size = seq->elem_size; + + if( before_index >= total >> 1 ) + { + schar *ptr = seq->ptr + elem_size; + + if( ptr > seq->block_max ) + { + icvGrowSeq( seq, 0 ); + + ptr = seq->ptr + elem_size; + assert( ptr <= seq->block_max ); + } + + delta_index = seq->first->start_index; + block = seq->first->prev; + block->count++; + block_size = (int)(ptr - block->data); + + while( before_index < block->start_index - delta_index ) + { + CvSeqBlock *prev_block = block->prev; + + memmove( block->data + elem_size, block->data, block_size - elem_size ); + block_size = prev_block->count * elem_size; + memcpy( block->data, prev_block->data + block_size - elem_size, elem_size ); + block = prev_block; + + /* Check that we don't fall into an infinite loop: */ + assert( block != seq->first->prev ); + } + + before_index = (before_index - block->start_index + delta_index) * elem_size; + memmove( block->data + before_index + elem_size, block->data + before_index, + block_size - before_index - elem_size ); + + ret_ptr = block->data + before_index; + + if( element ) + memcpy( ret_ptr, element, elem_size ); + seq->ptr = ptr; + } + else + { + block = seq->first; + + if( block->start_index == 0 ) + { + icvGrowSeq( seq, 1 ); + + block = seq->first; + } + + delta_index = block->start_index; + block->count++; + block->start_index--; + block->data -= elem_size; + + while( before_index > block->start_index - delta_index + block->count ) + { + CvSeqBlock *next_block = block->next; + + block_size = block->count * elem_size; + memmove( block->data, block->data + elem_size, block_size - elem_size ); + memcpy( block->data + block_size - elem_size, next_block->data, elem_size ); + block = next_block; + + /* Check that we don't fall into an infinite loop: */ + assert( block != seq->first ); + } + + before_index = (before_index - block->start_index + delta_index) * elem_size; + memmove( block->data, block->data + elem_size, before_index - elem_size ); + + ret_ptr = block->data + before_index - elem_size; + + if( element ) + memcpy( ret_ptr, element, elem_size ); + } + + seq->total = total + 1; + } + + return ret_ptr; +} + + +/* Removes element from sequence: */ +CV_IMPL void +cvSeqRemove( CvSeq *seq, int index ) +{ + schar *ptr; + int elem_size; + int block_size; + CvSeqBlock *block; + int delta_index; + int total, front = 0; + + if( !seq ) + CV_Error( CV_StsNullPtr, "" ); + + total = seq->total; + + index += index < 0 ? total : 0; + index -= index >= total ? total : 0; + + if( (unsigned) index >= (unsigned) total ) + CV_Error( CV_StsOutOfRange, "Invalid index" ); + + if( index == total - 1 ) + { + cvSeqPop( seq, 0 ); + } + else if( index == 0 ) + { + cvSeqPopFront( seq, 0 ); + } + else + { + block = seq->first; + elem_size = seq->elem_size; + delta_index = block->start_index; + while( block->start_index - delta_index + block->count <= index ) + block = block->next; + + ptr = block->data + (index - block->start_index + delta_index) * elem_size; + + front = index < total >> 1; + if( !front ) + { + block_size = block->count * elem_size - (int)(ptr - block->data); + + while( block != seq->first->prev ) /* while not the last block */ + { + CvSeqBlock *next_block = block->next; + + memmove( ptr, ptr + elem_size, block_size - elem_size ); + memcpy( ptr + block_size - elem_size, next_block->data, elem_size ); + block = next_block; + ptr = block->data; + block_size = block->count * elem_size; + } + + memmove( ptr, ptr + elem_size, block_size - elem_size ); + seq->ptr -= elem_size; + } + else + { + ptr += elem_size; + block_size = (int)(ptr - block->data); + + while( block != seq->first ) + { + CvSeqBlock *prev_block = block->prev; + + memmove( block->data + elem_size, block->data, block_size - elem_size ); + block_size = prev_block->count * elem_size; + memcpy( block->data, prev_block->data + block_size - elem_size, elem_size ); + block = prev_block; + } + + memmove( block->data + elem_size, block->data, block_size - elem_size ); + block->data += elem_size; + block->start_index++; + } + + seq->total = total - 1; + if( --block->count == 0 ) + icvFreeSeqBlock( seq, front ); + } +} + + +/* Add several elements to the beginning or end of a sequence: */ +CV_IMPL void +cvSeqPushMulti( CvSeq *seq, const void *_elements, int count, int front ) +{ + char *elements = (char *) _elements; + + if( !seq ) + CV_Error( CV_StsNullPtr, "NULL sequence pointer" ); + if( count < 0 ) + CV_Error( CV_StsBadSize, "number of removed elements is negative" ); + + int elem_size = seq->elem_size; + + if( !front ) + { + while( count > 0 ) + { + int delta = (int)((seq->block_max - seq->ptr) / elem_size); + + delta = MIN( delta, count ); + if( delta > 0 ) + { + seq->first->prev->count += delta; + seq->total += delta; + count -= delta; + delta *= elem_size; + if( elements ) + { + memcpy( seq->ptr, elements, delta ); + elements += delta; + } + seq->ptr += delta; + } + + if( count > 0 ) + icvGrowSeq( seq, 0 ); + } + } + else + { + CvSeqBlock* block = seq->first; + + while( count > 0 ) + { + int delta; + + if( !block || block->start_index == 0 ) + { + icvGrowSeq( seq, 1 ); + + block = seq->first; + assert( block->start_index > 0 ); + } + + delta = MIN( block->start_index, count ); + count -= delta; + block->start_index -= delta; + block->count += delta; + seq->total += delta; + delta *= elem_size; + block->data -= delta; + + if( elements ) + memcpy( block->data, elements + count*elem_size, delta ); + } + } +} + + +/* Remove several elements from the end of sequence: */ +CV_IMPL void +cvSeqPopMulti( CvSeq *seq, void *_elements, int count, int front ) +{ + char *elements = (char *) _elements; + + if( !seq ) + CV_Error( CV_StsNullPtr, "NULL sequence pointer" ); + if( count < 0 ) + CV_Error( CV_StsBadSize, "number of removed elements is negative" ); + + count = MIN( count, seq->total ); + + if( !front ) + { + if( elements ) + elements += count * seq->elem_size; + + while( count > 0 ) + { + int delta = seq->first->prev->count; + + delta = MIN( delta, count ); + assert( delta > 0 ); + + seq->first->prev->count -= delta; + seq->total -= delta; + count -= delta; + delta *= seq->elem_size; + seq->ptr -= delta; + + if( elements ) + { + elements -= delta; + memcpy( elements, seq->ptr, delta ); + } + + if( seq->first->prev->count == 0 ) + icvFreeSeqBlock( seq, 0 ); + } + } + else + { + while( count > 0 ) + { + int delta = seq->first->count; + + delta = MIN( delta, count ); + assert( delta > 0 ); + + seq->first->count -= delta; + seq->total -= delta; + count -= delta; + seq->first->start_index += delta; + delta *= seq->elem_size; + + if( elements ) + { + memcpy( elements, seq->first->data, delta ); + elements += delta; + } + + seq->first->data += delta; + if( seq->first->count == 0 ) + icvFreeSeqBlock( seq, 1 ); + } + } +} + + +/* Remove all elements from a sequence: */ +CV_IMPL void +cvClearSeq( CvSeq *seq ) +{ + if( !seq ) + CV_Error( CV_StsNullPtr, "" ); + cvSeqPopMulti( seq, 0, seq->total ); +} + + +CV_IMPL CvSeq* +cvSeqSlice( const CvSeq* seq, CvSlice slice, CvMemStorage* storage, int copy_data ) +{ + CvSeq* subseq = 0; + int elem_size, count, length; + CvSeqReader reader; + CvSeqBlock *block, *first_block = 0, *last_block = 0; + + if( !CV_IS_SEQ(seq) ) + CV_Error( CV_StsBadArg, "Invalid sequence header" ); + + if( !storage ) + { + storage = seq->storage; + if( !storage ) + CV_Error( CV_StsNullPtr, "NULL storage pointer" ); + } + + elem_size = seq->elem_size; + length = cvSliceLength( slice, seq ); + if( slice.start_index < 0 ) + slice.start_index += seq->total; + else if( slice.start_index >= seq->total ) + slice.start_index -= seq->total; + if( (unsigned)length > (unsigned)seq->total || + ((unsigned)slice.start_index >= (unsigned)seq->total && length != 0) ) + CV_Error( CV_StsOutOfRange, "Bad sequence slice" ); + + subseq = cvCreateSeq( seq->flags, seq->header_size, elem_size, storage ); + + if( length > 0 ) + { + cvStartReadSeq( seq, &reader, 0 ); + cvSetSeqReaderPos( &reader, slice.start_index, 0 ); + count = (int)((reader.block_max - reader.ptr)/elem_size); + + do + { + int bl = MIN( count, length ); + + if( !copy_data ) + { + block = (CvSeqBlock*)cvMemStorageAlloc( storage, sizeof(*block) ); + if( !first_block ) + { + first_block = subseq->first = block->prev = block->next = block; + block->start_index = 0; + } + else + { + block->prev = last_block; + block->next = first_block; + last_block->next = first_block->prev = block; + block->start_index = last_block->start_index + last_block->count; + } + last_block = block; + block->data = reader.ptr; + block->count = bl; + subseq->total += bl; + } + else + cvSeqPushMulti( subseq, reader.ptr, bl, 0 ); + length -= bl; + reader.block = reader.block->next; + reader.ptr = reader.block->data; + count = reader.block->count; + } + while( length > 0 ); + } + + return subseq; +} + + +// Remove slice from the middle of the sequence. +// !!! TODO !!! Implement more efficient algorithm +CV_IMPL void +cvSeqRemoveSlice( CvSeq* seq, CvSlice slice ) +{ + int total, length; + + if( !CV_IS_SEQ(seq) ) + CV_Error( CV_StsBadArg, "Invalid sequence header" ); + + length = cvSliceLength( slice, seq ); + total = seq->total; + + if( slice.start_index < 0 ) + slice.start_index += total; + else if( slice.start_index >= total ) + slice.start_index -= total; + + if( (unsigned)slice.start_index >= (unsigned)total ) + CV_Error( CV_StsOutOfRange, "start slice index is out of range" ); + + slice.end_index = slice.start_index + length; + + if( slice.end_index < total ) + { + CvSeqReader reader_to, reader_from; + int elem_size = seq->elem_size; + + cvStartReadSeq( seq, &reader_to ); + cvStartReadSeq( seq, &reader_from ); + + if( slice.start_index > total - slice.end_index ) + { + int i, count = seq->total - slice.end_index; + cvSetSeqReaderPos( &reader_to, slice.start_index ); + cvSetSeqReaderPos( &reader_from, slice.end_index ); + + for( i = 0; i < count; i++ ) + { + memcpy( reader_to.ptr, reader_from.ptr, elem_size ); + CV_NEXT_SEQ_ELEM( elem_size, reader_to ); + CV_NEXT_SEQ_ELEM( elem_size, reader_from ); + } + + cvSeqPopMulti( seq, 0, slice.end_index - slice.start_index ); + } + else + { + int i, count = slice.start_index; + cvSetSeqReaderPos( &reader_to, slice.end_index ); + cvSetSeqReaderPos( &reader_from, slice.start_index ); + + for( i = 0; i < count; i++ ) + { + CV_PREV_SEQ_ELEM( elem_size, reader_to ); + CV_PREV_SEQ_ELEM( elem_size, reader_from ); + + memcpy( reader_to.ptr, reader_from.ptr, elem_size ); + } + + cvSeqPopMulti( seq, 0, slice.end_index - slice.start_index, 1 ); + } + } + else + { + cvSeqPopMulti( seq, 0, total - slice.start_index ); + cvSeqPopMulti( seq, 0, slice.end_index - total, 1 ); + } +} + + +// Insert a sequence into the middle of another sequence: +// !!! TODO !!! Implement more efficient algorithm +CV_IMPL void +cvSeqInsertSlice( CvSeq* seq, int index, const CvArr* from_arr ) +{ + CvSeqReader reader_to, reader_from; + int i, elem_size, total, from_total; + CvSeq from_header, *from = (CvSeq*)from_arr; + CvSeqBlock block; + + if( !CV_IS_SEQ(seq) ) + CV_Error( CV_StsBadArg, "Invalid destination sequence header" ); + + if( !CV_IS_SEQ(from)) + { + CvMat* mat = (CvMat*)from; + if( !CV_IS_MAT(mat)) + CV_Error( CV_StsBadArg, "Source is not a sequence nor matrix" ); + + if( !CV_IS_MAT_CONT(mat->type) || (mat->rows != 1 && mat->cols != 1) ) + CV_Error( CV_StsBadArg, "The source array must be 1d coninuous vector" ); + + from = cvMakeSeqHeaderForArray( CV_SEQ_KIND_GENERIC, sizeof(from_header), + CV_ELEM_SIZE(mat->type), + mat->data.ptr, mat->cols + mat->rows - 1, + &from_header, &block ); + } + + if( seq->elem_size != from->elem_size ) + CV_Error( CV_StsUnmatchedSizes, + "Source and destination sequence element sizes are different." ); + + from_total = from->total; + + if( from_total == 0 ) + return; + + total = seq->total; + index += index < 0 ? total : 0; + index -= index > total ? total : 0; + + if( (unsigned)index > (unsigned)total ) + CV_Error( CV_StsOutOfRange, "" ); + + elem_size = seq->elem_size; + + if( index < (total >> 1) ) + { + cvSeqPushMulti( seq, 0, from_total, 1 ); + + cvStartReadSeq( seq, &reader_to ); + cvStartReadSeq( seq, &reader_from ); + cvSetSeqReaderPos( &reader_from, from_total ); + + for( i = 0; i < index; i++ ) + { + memcpy( reader_to.ptr, reader_from.ptr, elem_size ); + CV_NEXT_SEQ_ELEM( elem_size, reader_to ); + CV_NEXT_SEQ_ELEM( elem_size, reader_from ); + } + } + else + { + cvSeqPushMulti( seq, 0, from_total ); + + cvStartReadSeq( seq, &reader_to ); + cvStartReadSeq( seq, &reader_from ); + cvSetSeqReaderPos( &reader_from, total ); + cvSetSeqReaderPos( &reader_to, seq->total ); + + for( i = 0; i < total - index; i++ ) + { + CV_PREV_SEQ_ELEM( elem_size, reader_to ); + CV_PREV_SEQ_ELEM( elem_size, reader_from ); + memcpy( reader_to.ptr, reader_from.ptr, elem_size ); + } + } + + cvStartReadSeq( from, &reader_from ); + cvSetSeqReaderPos( &reader_to, index ); + + for( i = 0; i < from_total; i++ ) + { + memcpy( reader_to.ptr, reader_from.ptr, elem_size ); + CV_NEXT_SEQ_ELEM( elem_size, reader_to ); + CV_NEXT_SEQ_ELEM( elem_size, reader_from ); + } +} + +// Sort the sequence using user-specified comparison function. +// The semantics is similar to qsort() function. +// The code is based on BSD system qsort(): +// * Copyright (c) 1992, 1993 +// * The Regents of the University of California. All rights reserved. +// * +// * Redistribution and use in source and binary forms, with or without +// * modification, are permitted provided that the following conditions +// * are met: +// * 1. Redistributions of source code must retain the above copyright +// * notice, this list of conditions and the following disclaimer. +// * 2. Redistributions in binary form must reproduce the above copyright +// * notice, this list of conditions and the following disclaimer in the +// * documentation and/or other materials provided with the distribution. +// * 3. All advertising materials mentioning features or use of this software +// * must display the following acknowledgement: +// * This product includes software developed by the University of +// * California, Berkeley and its contributors. +// * 4. Neither the name of the University nor the names of its contributors +// * may be used to endorse or promote products derived from this software +// * without specific prior written permission. +// * +// * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND +// * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE +// * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +// * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +// * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +// * SUCH DAMAGE. + +typedef struct CvSeqReaderPos +{ + CvSeqBlock* block; + schar* ptr; + schar* block_min; + schar* block_max; +} +CvSeqReaderPos; + +#define CV_SAVE_READER_POS( reader, pos ) \ +{ \ + (pos).block = (reader).block; \ + (pos).ptr = (reader).ptr; \ + (pos).block_min = (reader).block_min; \ + (pos).block_max = (reader).block_max; \ +} + +#define CV_RESTORE_READER_POS( reader, pos )\ +{ \ + (reader).block = (pos).block; \ + (reader).ptr = (pos).ptr; \ + (reader).block_min = (pos).block_min; \ + (reader).block_max = (pos).block_max; \ +} + +inline schar* +icvMed3( schar* a, schar* b, schar* c, CvCmpFunc cmp_func, void* aux ) +{ + return cmp_func(a, b, aux) < 0 ? + (cmp_func(b, c, aux) < 0 ? b : cmp_func(a, c, aux) < 0 ? c : a) + :(cmp_func(b, c, aux) > 0 ? b : cmp_func(a, c, aux) < 0 ? a : c); +} + +CV_IMPL void +cvSeqSort( CvSeq* seq, CvCmpFunc cmp_func, void* aux ) +{ + int elem_size; + int isort_thresh = 7; + CvSeqReader left, right; + int sp = 0; + + struct + { + CvSeqReaderPos lb; + CvSeqReaderPos ub; + } + stack[48]; + + if( !CV_IS_SEQ(seq) ) + CV_Error( !seq ? CV_StsNullPtr : CV_StsBadArg, "Bad input sequence" ); + + if( !cmp_func ) + CV_Error( CV_StsNullPtr, "Null compare function" ); + + if( seq->total <= 1 ) + return; + + elem_size = seq->elem_size; + isort_thresh *= elem_size; + + cvStartReadSeq( seq, &left, 0 ); + right = left; + CV_SAVE_READER_POS( left, stack[0].lb ); + CV_PREV_SEQ_ELEM( elem_size, right ); + CV_SAVE_READER_POS( right, stack[0].ub ); + + while( sp >= 0 ) + { + CV_RESTORE_READER_POS( left, stack[sp].lb ); + CV_RESTORE_READER_POS( right, stack[sp].ub ); + sp--; + + for(;;) + { + int i, n, m; + CvSeqReader ptr, ptr2; + + if( left.block == right.block ) + n = (int)(right.ptr - left.ptr) + elem_size; + else + { + n = cvGetSeqReaderPos( &right ); + n = (n - cvGetSeqReaderPos( &left ) + 1)*elem_size; + } + + if( n <= isort_thresh ) + { + insert_sort: + ptr = ptr2 = left; + CV_NEXT_SEQ_ELEM( elem_size, ptr ); + CV_NEXT_SEQ_ELEM( elem_size, right ); + while( ptr.ptr != right.ptr ) + { + ptr2.ptr = ptr.ptr; + if( ptr2.block != ptr.block ) + { + ptr2.block = ptr.block; + ptr2.block_min = ptr.block_min; + ptr2.block_max = ptr.block_max; + } + while( ptr2.ptr != left.ptr ) + { + schar* cur = ptr2.ptr; + CV_PREV_SEQ_ELEM( elem_size, ptr2 ); + if( cmp_func( ptr2.ptr, cur, aux ) <= 0 ) + break; + CV_SWAP_ELEMS( ptr2.ptr, cur, elem_size ); + } + CV_NEXT_SEQ_ELEM( elem_size, ptr ); + } + break; + } + else + { + CvSeqReader left0, left1, right0, right1; + CvSeqReader tmp0, tmp1; + schar *m1, *m2, *m3, *pivot; + int swap_cnt = 0; + int l, l0, l1, r, r0, r1; + + left0 = tmp0 = left; + right0 = right1 = right; + n /= elem_size; + + if( n > 40 ) + { + int d = n / 8; + schar *p1, *p2, *p3; + p1 = tmp0.ptr; + cvSetSeqReaderPos( &tmp0, d, 1 ); + p2 = tmp0.ptr; + cvSetSeqReaderPos( &tmp0, d, 1 ); + p3 = tmp0.ptr; + m1 = icvMed3( p1, p2, p3, cmp_func, aux ); + cvSetSeqReaderPos( &tmp0, (n/2) - d*3, 1 ); + p1 = tmp0.ptr; + cvSetSeqReaderPos( &tmp0, d, 1 ); + p2 = tmp0.ptr; + cvSetSeqReaderPos( &tmp0, d, 1 ); + p3 = tmp0.ptr; + m2 = icvMed3( p1, p2, p3, cmp_func, aux ); + cvSetSeqReaderPos( &tmp0, n - 1 - d*3 - n/2, 1 ); + p1 = tmp0.ptr; + cvSetSeqReaderPos( &tmp0, d, 1 ); + p2 = tmp0.ptr; + cvSetSeqReaderPos( &tmp0, d, 1 ); + p3 = tmp0.ptr; + m3 = icvMed3( p1, p2, p3, cmp_func, aux ); + } + else + { + m1 = tmp0.ptr; + cvSetSeqReaderPos( &tmp0, n/2, 1 ); + m2 = tmp0.ptr; + cvSetSeqReaderPos( &tmp0, n - 1 - n/2, 1 ); + m3 = tmp0.ptr; + } + + pivot = icvMed3( m1, m2, m3, cmp_func, aux ); + left = left0; + if( pivot != left.ptr ) + { + CV_SWAP_ELEMS( pivot, left.ptr, elem_size ); + pivot = left.ptr; + } + CV_NEXT_SEQ_ELEM( elem_size, left ); + left1 = left; + + for(;;) + { + while( left.ptr != right.ptr && (r = cmp_func(left.ptr, pivot, aux)) <= 0 ) + { + if( r == 0 ) + { + if( left1.ptr != left.ptr ) + CV_SWAP_ELEMS( left1.ptr, left.ptr, elem_size ); + swap_cnt = 1; + CV_NEXT_SEQ_ELEM( elem_size, left1 ); + } + CV_NEXT_SEQ_ELEM( elem_size, left ); + } + + while( left.ptr != right.ptr && (r = cmp_func(right.ptr,pivot, aux)) >= 0 ) + { + if( r == 0 ) + { + if( right1.ptr != right.ptr ) + CV_SWAP_ELEMS( right1.ptr, right.ptr, elem_size ); + swap_cnt = 1; + CV_PREV_SEQ_ELEM( elem_size, right1 ); + } + CV_PREV_SEQ_ELEM( elem_size, right ); + } + + if( left.ptr == right.ptr ) + { + r = cmp_func(left.ptr, pivot, aux); + if( r == 0 ) + { + if( left1.ptr != left.ptr ) + CV_SWAP_ELEMS( left1.ptr, left.ptr, elem_size ); + swap_cnt = 1; + CV_NEXT_SEQ_ELEM( elem_size, left1 ); + } + if( r <= 0 ) + { + CV_NEXT_SEQ_ELEM( elem_size, left ); + } + else + { + CV_PREV_SEQ_ELEM( elem_size, right ); + } + break; + } + + CV_SWAP_ELEMS( left.ptr, right.ptr, elem_size ); + CV_NEXT_SEQ_ELEM( elem_size, left ); + r = left.ptr == right.ptr; + CV_PREV_SEQ_ELEM( elem_size, right ); + swap_cnt = 1; + if( r ) + break; + } + + if( swap_cnt == 0 ) + { + left = left0, right = right0; + goto insert_sort; + } + + l = cvGetSeqReaderPos( &left ); + if( l == 0 ) + l = seq->total; + l0 = cvGetSeqReaderPos( &left0 ); + l1 = cvGetSeqReaderPos( &left1 ); + if( l1 == 0 ) + l1 = seq->total; + + n = MIN( l - l1, l1 - l0 ); + if( n > 0 ) + { + tmp0 = left0; + tmp1 = left; + cvSetSeqReaderPos( &tmp1, 0-n, 1 ); + for( i = 0; i < n; i++ ) + { + CV_SWAP_ELEMS( tmp0.ptr, tmp1.ptr, elem_size ); + CV_NEXT_SEQ_ELEM( elem_size, tmp0 ); + CV_NEXT_SEQ_ELEM( elem_size, tmp1 ); + } + } + + r = cvGetSeqReaderPos( &right ); + r0 = cvGetSeqReaderPos( &right0 ); + r1 = cvGetSeqReaderPos( &right1 ); + m = MIN( r0 - r1, r1 - r ); + if( m > 0 ) + { + tmp0 = left; + tmp1 = right0; + cvSetSeqReaderPos( &tmp1, 1-m, 1 ); + for( i = 0; i < m; i++ ) + { + CV_SWAP_ELEMS( tmp0.ptr, tmp1.ptr, elem_size ); + CV_NEXT_SEQ_ELEM( elem_size, tmp0 ); + CV_NEXT_SEQ_ELEM( elem_size, tmp1 ); + } + } + + n = l - l1; + m = r1 - r; + if( n > 1 ) + { + if( m > 1 ) + { + if( n > m ) + { + sp++; + CV_SAVE_READER_POS( left0, stack[sp].lb ); + cvSetSeqReaderPos( &left0, n - 1, 1 ); + CV_SAVE_READER_POS( left0, stack[sp].ub ); + left = right = right0; + cvSetSeqReaderPos( &left, 1 - m, 1 ); + } + else + { + sp++; + CV_SAVE_READER_POS( right0, stack[sp].ub ); + cvSetSeqReaderPos( &right0, 1 - m, 1 ); + CV_SAVE_READER_POS( right0, stack[sp].lb ); + left = right = left0; + cvSetSeqReaderPos( &right, n - 1, 1 ); + } + } + else + { + left = right = left0; + cvSetSeqReaderPos( &right, n - 1, 1 ); + } + } + else if( m > 1 ) + { + left = right = right0; + cvSetSeqReaderPos( &left, 1 - m, 1 ); + } + else + break; + } + } + } +} + + +CV_IMPL schar* +cvSeqSearch( CvSeq* seq, const void* _elem, CvCmpFunc cmp_func, + int is_sorted, int* _idx, void* userdata ) +{ + schar* result = 0; + const schar* elem = (const schar*)_elem; + int idx = -1; + int i, j; + + if( _idx ) + *_idx = idx; + + if( !CV_IS_SEQ(seq) ) + CV_Error( !seq ? CV_StsNullPtr : CV_StsBadArg, "Bad input sequence" ); + + if( !elem ) + CV_Error( CV_StsNullPtr, "Null element pointer" ); + + int elem_size = seq->elem_size; + int total = seq->total; + + if( total == 0 ) + return 0; + + if( !is_sorted ) + { + CvSeqReader reader; + cvStartReadSeq( seq, &reader, 0 ); + + if( cmp_func ) + { + for( i = 0; i < total; i++ ) + { + if( cmp_func( elem, reader.ptr, userdata ) == 0 ) + break; + CV_NEXT_SEQ_ELEM( elem_size, reader ); + } + } + else if( (elem_size & (sizeof(int)-1)) == 0 ) + { + for( i = 0; i < total; i++ ) + { + for( j = 0; j < elem_size; j += sizeof(int) ) + { + if( *(const int*)(reader.ptr + j) != *(const int*)(elem + j) ) + break; + } + if( j == elem_size ) + break; + CV_NEXT_SEQ_ELEM( elem_size, reader ); + } + } + else + { + for( i = 0; i < total; i++ ) + { + for( j = 0; j < elem_size; j++ ) + { + if( reader.ptr[j] != elem[j] ) + break; + } + if( j == elem_size ) + break; + CV_NEXT_SEQ_ELEM( elem_size, reader ); + } + } + + idx = i; + if( i < total ) + result = reader.ptr; + } + else + { + if( !cmp_func ) + CV_Error( CV_StsNullPtr, "Null compare function" ); + + i = 0, j = total; + + while( j > i ) + { + int k = (i+j)>>1, code; + schar* ptr = cvGetSeqElem( seq, k ); + code = cmp_func( elem, ptr, userdata ); + if( !code ) + { + result = ptr; + idx = k; + if( _idx ) + *_idx = idx; + return result; + } + if( code < 0 ) + j = k; + else + i = k+1; + } + idx = j; + } + + if( _idx ) + *_idx = idx; + + return result; +} + + +CV_IMPL void +cvSeqInvert( CvSeq* seq ) +{ + CvSeqReader left_reader, right_reader; + int elem_size; + int i, count; + + cvStartReadSeq( seq, &left_reader, 0 ); + cvStartReadSeq( seq, &right_reader, 1 ); + elem_size = seq->elem_size; + count = seq->total >> 1; + + for( i = 0; i < count; i++ ) + { + CV_SWAP_ELEMS( left_reader.ptr, right_reader.ptr, elem_size ); + CV_NEXT_SEQ_ELEM( elem_size, left_reader ); + CV_PREV_SEQ_ELEM( elem_size, right_reader ); + } +} + + +typedef struct CvPTreeNode +{ + struct CvPTreeNode* parent; + schar* element; + int rank; +} +CvPTreeNode; + + +// This function splits the input sequence or set into one or more equivalence classes. +// is_equal(a,b,...) returns non-zero if the two sequence elements +// belong to the same class. The function returns sequence of integers - +// 0-based class indexes for each element. +// +// The algorithm is described in "Introduction to Algorithms" +// by Cormen, Leiserson and Rivest, chapter "Data structures for disjoint sets" +CV_IMPL int +cvSeqPartition( const CvSeq* seq, CvMemStorage* storage, CvSeq** labels, + CvCmpFunc is_equal, void* userdata ) +{ + CvSeq* result = 0; + CvMemStorage* temp_storage = 0; + int class_idx = 0; + + CvSeqWriter writer; + CvSeqReader reader, reader0; + CvSeq* nodes; + int i, j; + int is_set; + + if( !labels ) + CV_Error( CV_StsNullPtr, "" ); + + if( !seq || !is_equal ) + CV_Error( CV_StsNullPtr, "" ); + + if( !storage ) + storage = seq->storage; + + if( !storage ) + CV_Error( CV_StsNullPtr, "" ); + + is_set = CV_IS_SET(seq); + + temp_storage = cvCreateChildMemStorage( storage ); + + nodes = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPTreeNode), temp_storage ); + + cvStartReadSeq( seq, &reader ); + memset( &writer, 0, sizeof(writer)); + cvStartAppendToSeq( nodes, &writer ); + + // Initial O(N) pass. Make a forest of single-vertex trees. + for( i = 0; i < seq->total; i++ ) + { + CvPTreeNode node = { 0, 0, 0 }; + if( !is_set || CV_IS_SET_ELEM( reader.ptr )) + node.element = reader.ptr; + CV_WRITE_SEQ_ELEM( node, writer ); + CV_NEXT_SEQ_ELEM( seq->elem_size, reader ); + } + + cvEndWriteSeq( &writer ); + + // Because in the next loop we will iterate + // through all the sequence nodes each time, + // we do not need to initialize reader every time: + cvStartReadSeq( nodes, &reader ); + cvStartReadSeq( nodes, &reader0 ); + + // The main O(N^2) pass. Merge connected components. + for( i = 0; i < nodes->total; i++ ) + { + CvPTreeNode* node = (CvPTreeNode*)(reader0.ptr); + CvPTreeNode* root = node; + CV_NEXT_SEQ_ELEM( nodes->elem_size, reader0 ); + + if( !node->element ) + continue; + + // find root + while( root->parent ) + root = root->parent; + + for( j = 0; j < nodes->total; j++ ) + { + CvPTreeNode* node2 = (CvPTreeNode*)reader.ptr; + + if( node2->element && node2 != node && + is_equal( node->element, node2->element, userdata )) + { + CvPTreeNode* root2 = node2; + + // unite both trees + while( root2->parent ) + root2 = root2->parent; + + if( root2 != root ) + { + if( root->rank > root2->rank ) + root2->parent = root; + else + { + root->parent = root2; + root2->rank += root->rank == root2->rank; + root = root2; + } + assert( root->parent == 0 ); + + // Compress path from node2 to the root: + while( node2->parent ) + { + CvPTreeNode* temp = node2; + node2 = node2->parent; + temp->parent = root; + } + + // Compress path from node to the root: + node2 = node; + while( node2->parent ) + { + CvPTreeNode* temp = node2; + node2 = node2->parent; + temp->parent = root; + } + } + } + + CV_NEXT_SEQ_ELEM( sizeof(*node), reader ); + } + } + + // Final O(N) pass (Enumerate classes) + // Reuse reader one more time + result = cvCreateSeq( 0, sizeof(CvSeq), sizeof(int), storage ); + cvStartAppendToSeq( result, &writer ); + + for( i = 0; i < nodes->total; i++ ) + { + CvPTreeNode* node = (CvPTreeNode*)reader.ptr; + int idx = -1; + + if( node->element ) + { + while( node->parent ) + node = node->parent; + if( node->rank >= 0 ) + node->rank = ~class_idx++; + idx = ~node->rank; + } + + CV_NEXT_SEQ_ELEM( sizeof(*node), reader ); + CV_WRITE_SEQ_ELEM( idx, writer ); + } + + cvEndWriteSeq( &writer ); + + if( labels ) + *labels = result; + + cvReleaseMemStorage( &temp_storage ); + return class_idx; +} + + +/****************************************************************************************\ +* Set implementation * +\****************************************************************************************/ + +/* Creates empty set: */ +CV_IMPL CvSet* +cvCreateSet( int set_flags, int header_size, int elem_size, CvMemStorage * storage ) +{ + if( !storage ) + CV_Error( CV_StsNullPtr, "" ); + if( header_size < (int)sizeof( CvSet ) || + elem_size < (int)sizeof(void*)*2 || + (elem_size & (sizeof(void*)-1)) != 0 ) + CV_Error( CV_StsBadSize, "" ); + + CvSet* set = (CvSet*) cvCreateSeq( set_flags, header_size, elem_size, storage ); + set->flags = (set->flags & ~CV_MAGIC_MASK) | CV_SET_MAGIC_VAL; + + return set; +} + + +/* Add new element to the set: */ +CV_IMPL int +cvSetAdd( CvSet* set, CvSetElem* element, CvSetElem** inserted_element ) +{ + int id = -1; + CvSetElem *free_elem; + + if( !set ) + CV_Error( CV_StsNullPtr, "" ); + + if( !(set->free_elems) ) + { + int count = set->total; + int elem_size = set->elem_size; + schar *ptr; + icvGrowSeq( (CvSeq *) set, 0 ); + + set->free_elems = (CvSetElem*) (ptr = set->ptr); + for( ; ptr + elem_size <= set->block_max; ptr += elem_size, count++ ) + { + ((CvSetElem*)ptr)->flags = count | CV_SET_ELEM_FREE_FLAG; + ((CvSetElem*)ptr)->next_free = (CvSetElem*)(ptr + elem_size); + } + assert( count <= CV_SET_ELEM_IDX_MASK+1 ); + ((CvSetElem*)(ptr - elem_size))->next_free = 0; + set->first->prev->count += count - set->total; + set->total = count; + set->ptr = set->block_max; + } + + free_elem = set->free_elems; + set->free_elems = free_elem->next_free; + + id = free_elem->flags & CV_SET_ELEM_IDX_MASK; + if( element ) + memcpy( free_elem, element, set->elem_size ); + + free_elem->flags = id; + set->active_count++; + + if( inserted_element ) + *inserted_element = free_elem; + + return id; +} + + +/* Remove element from a set given element index: */ +CV_IMPL void +cvSetRemove( CvSet* set, int index ) +{ + CvSetElem* elem = cvGetSetElem( set, index ); + if( elem ) + cvSetRemoveByPtr( set, elem ); + else if( !set ) + CV_Error( CV_StsNullPtr, "" ); +} + + +/* Remove all elements from a set: */ +CV_IMPL void +cvClearSet( CvSet* set ) +{ + cvClearSeq( (CvSeq*)set ); + set->free_elems = 0; + set->active_count = 0; +} + + +/****************************************************************************************\ +* Graph implementation * +\****************************************************************************************/ + +/* Create a new graph: */ +CV_IMPL CvGraph * +cvCreateGraph( int graph_type, int header_size, + int vtx_size, int edge_size, CvMemStorage * storage ) +{ + CvGraph *graph = 0; + CvSet *edges = 0; + CvSet *vertices = 0; + + if( header_size < (int) sizeof( CvGraph ) + || edge_size < (int) sizeof( CvGraphEdge ) + || vtx_size < (int) sizeof( CvGraphVtx ) + ){ + CV_Error( CV_StsBadSize, "" ); + } + + vertices = cvCreateSet( graph_type, header_size, vtx_size, storage ); + edges = cvCreateSet( CV_SEQ_KIND_GENERIC | CV_SEQ_ELTYPE_GRAPH_EDGE, + sizeof( CvSet ), edge_size, storage ); + + graph = (CvGraph*)vertices; + graph->edges = edges; + + return graph; +} + + +/* Remove all vertices and edges from a graph: */ +CV_IMPL void +cvClearGraph( CvGraph * graph ) +{ + if( !graph ) + CV_Error( CV_StsNullPtr, "" ); + + cvClearSet( graph->edges ); + cvClearSet( (CvSet*)graph ); +} + + +/* Add a vertex to a graph: */ +CV_IMPL int +cvGraphAddVtx( CvGraph* graph, const CvGraphVtx* _vertex, CvGraphVtx** _inserted_vertex ) +{ + CvGraphVtx *vertex = 0; + int index = -1; + + if( !graph ) + CV_Error( CV_StsNullPtr, "" ); + + vertex = (CvGraphVtx*)cvSetNew((CvSet*)graph); + if( vertex ) + { + if( _vertex ) + memcpy( vertex + 1, _vertex + 1, graph->elem_size - sizeof(CvGraphVtx) ); + vertex->first = 0; + index = vertex->flags; + } + + if( _inserted_vertex ) + *_inserted_vertex = vertex; + + return index; +} + + +/* Remove a vertex from the graph together with its incident edges: */ +CV_IMPL int +cvGraphRemoveVtxByPtr( CvGraph* graph, CvGraphVtx* vtx ) +{ + int count = -1; + + if( !graph || !vtx ) + CV_Error( CV_StsNullPtr, "" ); + + if( !CV_IS_SET_ELEM(vtx)) + CV_Error( CV_StsBadArg, "The vertex does not belong to the graph" ); + + count = graph->edges->active_count; + for( ;; ) + { + CvGraphEdge *edge = vtx->first; + if( !edge ) + break; + cvGraphRemoveEdgeByPtr( graph, edge->vtx[0], edge->vtx[1] ); + } + count -= graph->edges->active_count; + cvSetRemoveByPtr( (CvSet*)graph, vtx ); + + return count; +} + + +/* Remove a vertex from the graph together with its incident edges: */ +CV_IMPL int +cvGraphRemoveVtx( CvGraph* graph, int index ) +{ + int count = -1; + CvGraphVtx *vtx = 0; + + if( !graph ) + CV_Error( CV_StsNullPtr, "" ); + + vtx = cvGetGraphVtx( graph, index ); + if( !vtx ) + CV_Error( CV_StsBadArg, "The vertex is not found" ); + + count = graph->edges->active_count; + for( ;; ) + { + CvGraphEdge *edge = vtx->first; + count++; + + if( !edge ) + break; + cvGraphRemoveEdgeByPtr( graph, edge->vtx[0], edge->vtx[1] ); + } + count -= graph->edges->active_count; + cvSetRemoveByPtr( (CvSet*)graph, vtx ); + + return count; +} + + +/* Find a graph edge given pointers to the ending vertices: */ +CV_IMPL CvGraphEdge* +cvFindGraphEdgeByPtr( const CvGraph* graph, + const CvGraphVtx* start_vtx, + const CvGraphVtx* end_vtx ) +{ + int ofs = 0; + + if( !graph || !start_vtx || !end_vtx ) + CV_Error( CV_StsNullPtr, "" ); + + if( start_vtx == end_vtx ) + return 0; + + if( !CV_IS_GRAPH_ORIENTED( graph ) && + (start_vtx->flags & CV_SET_ELEM_IDX_MASK) > (end_vtx->flags & CV_SET_ELEM_IDX_MASK) ) + { + const CvGraphVtx* t; + CV_SWAP( start_vtx, end_vtx, t ); + } + + CvGraphEdge* edge = start_vtx->first; + for( ; edge; edge = edge->next[ofs] ) + { + ofs = start_vtx == edge->vtx[1]; + assert( ofs == 1 || start_vtx == edge->vtx[0] ); + if( edge->vtx[1] == end_vtx ) + break; + } + + return edge; +} + + +/* Find an edge in the graph given indices of the ending vertices: */ +CV_IMPL CvGraphEdge * +cvFindGraphEdge( const CvGraph* graph, int start_idx, int end_idx ) +{ + CvGraphVtx *start_vtx; + CvGraphVtx *end_vtx; + + if( !graph ) + CV_Error( CV_StsNullPtr, "graph pointer is NULL" ); + + start_vtx = cvGetGraphVtx( graph, start_idx ); + end_vtx = cvGetGraphVtx( graph, end_idx ); + + return cvFindGraphEdgeByPtr( graph, start_vtx, end_vtx ); +} + + +/* Given two vertices, return the edge + * connecting them, creating it if it + * did not already exist: + */ +CV_IMPL int +cvGraphAddEdgeByPtr( CvGraph* graph, + CvGraphVtx* start_vtx, CvGraphVtx* end_vtx, + const CvGraphEdge* _edge, + CvGraphEdge ** _inserted_edge ) +{ + CvGraphEdge *edge = 0; + int result = -1; + int delta; + + if( !graph ) + CV_Error( CV_StsNullPtr, "graph pointer is NULL" ); + + if( !CV_IS_GRAPH_ORIENTED( graph ) && + (start_vtx->flags & CV_SET_ELEM_IDX_MASK) > (end_vtx->flags & CV_SET_ELEM_IDX_MASK) ) + { + CvGraphVtx* t; + CV_SWAP( start_vtx, end_vtx, t ); + } + + edge = cvFindGraphEdgeByPtr( graph, start_vtx, end_vtx ); + if( edge ) + { + result = 0; + if( _inserted_edge ) + *_inserted_edge = edge; + return result; + } + + if( start_vtx == end_vtx ) + CV_Error( start_vtx ? CV_StsBadArg : CV_StsNullPtr, + "vertex pointers coinside (or set to NULL)" ); + + edge = (CvGraphEdge*)cvSetNew( (CvSet*)(graph->edges) ); + assert( edge->flags >= 0 ); + + edge->vtx[0] = start_vtx; + edge->vtx[1] = end_vtx; + edge->next[0] = start_vtx->first; + edge->next[1] = end_vtx->first; + start_vtx->first = end_vtx->first = edge; + + delta = graph->edges->elem_size - sizeof(*edge); + if( _edge ) + { + if( delta > 0 ) + memcpy( edge + 1, _edge + 1, delta ); + edge->weight = _edge->weight; + } + else + { + if( delta > 0 ) + memset( edge + 1, 0, delta ); + edge->weight = 1.f; + } + + result = 1; + + if( _inserted_edge ) + *_inserted_edge = edge; + + return result; +} + +/* Given two vertices, return the edge + * connecting them, creating it if it + * did not already exist: + */ +CV_IMPL int +cvGraphAddEdge( CvGraph* graph, + int start_idx, int end_idx, + const CvGraphEdge* _edge, + CvGraphEdge ** _inserted_edge ) +{ + CvGraphVtx *start_vtx; + CvGraphVtx *end_vtx; + + if( !graph ) + CV_Error( CV_StsNullPtr, "" ); + + start_vtx = cvGetGraphVtx( graph, start_idx ); + end_vtx = cvGetGraphVtx( graph, end_idx ); + + return cvGraphAddEdgeByPtr( graph, start_vtx, end_vtx, _edge, _inserted_edge ); +} + + +/* Remove the graph edge connecting two given vertices: */ +CV_IMPL void +cvGraphRemoveEdgeByPtr( CvGraph* graph, CvGraphVtx* start_vtx, CvGraphVtx* end_vtx ) +{ + int ofs, prev_ofs; + CvGraphEdge *edge, *next_edge, *prev_edge; + + if( !graph || !start_vtx || !end_vtx ) + CV_Error( CV_StsNullPtr, "" ); + + if( start_vtx == end_vtx ) + return; + + if( !CV_IS_GRAPH_ORIENTED( graph ) && + (start_vtx->flags & CV_SET_ELEM_IDX_MASK) > (end_vtx->flags & CV_SET_ELEM_IDX_MASK) ) + { + CvGraphVtx* t; + CV_SWAP( start_vtx, end_vtx, t ); + } + + for( ofs = prev_ofs = 0, prev_edge = 0, edge = start_vtx->first; edge != 0; + prev_ofs = ofs, prev_edge = edge, edge = edge->next[ofs] ) + { + ofs = start_vtx == edge->vtx[1]; + assert( ofs == 1 || start_vtx == edge->vtx[0] ); + if( edge->vtx[1] == end_vtx ) + break; + } + + if( !edge ) + return; + + next_edge = edge->next[ofs]; + if( prev_edge ) + prev_edge->next[prev_ofs] = next_edge; + else + start_vtx->first = next_edge; + + for( ofs = prev_ofs = 0, prev_edge = 0, edge = end_vtx->first; edge != 0; + prev_ofs = ofs, prev_edge = edge, edge = edge->next[ofs] ) + { + ofs = end_vtx == edge->vtx[1]; + assert( ofs == 1 || end_vtx == edge->vtx[0] ); + if( edge->vtx[0] == start_vtx ) + break; + } + + assert( edge != 0 ); + + next_edge = edge->next[ofs]; + if( prev_edge ) + prev_edge->next[prev_ofs] = next_edge; + else + end_vtx->first = next_edge; + + cvSetRemoveByPtr( graph->edges, edge ); +} + + +/* Remove the graph edge connecting two given vertices: */ +CV_IMPL void +cvGraphRemoveEdge( CvGraph* graph, int start_idx, int end_idx ) +{ + CvGraphVtx *start_vtx; + CvGraphVtx *end_vtx; + + if( !graph ) + CV_Error( CV_StsNullPtr, "" ); + + start_vtx = cvGetGraphVtx( graph, start_idx ); + end_vtx = cvGetGraphVtx( graph, end_idx ); + + cvGraphRemoveEdgeByPtr( graph, start_vtx, end_vtx ); +} + + +/* Count number of edges incident to a given vertex: */ +CV_IMPL int +cvGraphVtxDegreeByPtr( const CvGraph* graph, const CvGraphVtx* vertex ) +{ + CvGraphEdge *edge; + int count; + + if( !graph || !vertex ) + CV_Error( CV_StsNullPtr, "" ); + + for( edge = vertex->first, count = 0; edge; ) + { + count++; + edge = CV_NEXT_GRAPH_EDGE( edge, vertex ); + } + + return count; +} + + +/* Count number of edges incident to a given vertex: */ +CV_IMPL int +cvGraphVtxDegree( const CvGraph* graph, int vtx_idx ) +{ + CvGraphVtx *vertex; + CvGraphEdge *edge; + int count; + + if( !graph ) + CV_Error( CV_StsNullPtr, "" ); + + vertex = cvGetGraphVtx( graph, vtx_idx ); + if( !vertex ) + CV_Error( CV_StsObjectNotFound, "" ); + + for( edge = vertex->first, count = 0; edge; ) + { + count++; + edge = CV_NEXT_GRAPH_EDGE( edge, vertex ); + } + + return count; +} + + +typedef struct CvGraphItem +{ + CvGraphVtx* vtx; + CvGraphEdge* edge; +} +CvGraphItem; + + +static void +icvSeqElemsClearFlags( CvSeq* seq, int offset, int clear_mask ) +{ + CvSeqReader reader; + int i, total, elem_size; + + if( !seq ) + CV_Error( CV_StsNullPtr, "" ); + + elem_size = seq->elem_size; + total = seq->total; + + if( (unsigned)offset > (unsigned)elem_size ) + CV_Error( CV_StsBadArg, "" ); + + cvStartReadSeq( seq, &reader ); + + for( i = 0; i < total; i++ ) + { + int* flag_ptr = (int*)(reader.ptr + offset); + *flag_ptr &= ~clear_mask; + + CV_NEXT_SEQ_ELEM( elem_size, reader ); + } +} + + +static schar* +icvSeqFindNextElem( CvSeq* seq, int offset, int mask, + int value, int* start_index ) +{ + schar* elem_ptr = 0; + + CvSeqReader reader; + int total, elem_size, index; + + if( !seq || !start_index ) + CV_Error( CV_StsNullPtr, "" ); + + elem_size = seq->elem_size; + total = seq->total; + index = *start_index; + + if( (unsigned)offset > (unsigned)elem_size ) + CV_Error( CV_StsBadArg, "" ); + + if( total == 0 ) + return 0; + + if( (unsigned)index >= (unsigned)total ) + { + index %= total; + index += index < 0 ? total : 0; + } + + cvStartReadSeq( seq, &reader ); + + if( index != 0 ) + cvSetSeqReaderPos( &reader, index ); + + for( index = 0; index < total; index++ ) + { + int* flag_ptr = (int*)(reader.ptr + offset); + if( (*flag_ptr & mask) == value ) + break; + + CV_NEXT_SEQ_ELEM( elem_size, reader ); + } + + if( index < total ) + { + elem_ptr = reader.ptr; + *start_index = index; + } + + return elem_ptr; +} + +#define CV_FIELD_OFFSET( field, structtype ) ((int)(size_t)&((structtype*)0)->field) + +CV_IMPL CvGraphScanner* +cvCreateGraphScanner( CvGraph* graph, CvGraphVtx* vtx, int mask ) +{ + if( !graph ) + CV_Error( CV_StsNullPtr, "Null graph pointer" ); + + CV_Assert( graph->storage != 0 ); + + CvGraphScanner* scanner = (CvGraphScanner*)cvAlloc( sizeof(*scanner) ); + memset( scanner, 0, sizeof(*scanner)); + + scanner->graph = graph; + scanner->mask = mask; + scanner->vtx = vtx; + scanner->index = vtx == 0 ? 0 : -1; + + CvMemStorage* child_storage = cvCreateChildMemStorage( graph->storage ); + + scanner->stack = cvCreateSeq( 0, sizeof(CvSet), + sizeof(CvGraphItem), child_storage ); + + icvSeqElemsClearFlags( (CvSeq*)graph, + CV_FIELD_OFFSET( flags, CvGraphVtx), + CV_GRAPH_ITEM_VISITED_FLAG| + CV_GRAPH_SEARCH_TREE_NODE_FLAG ); + + icvSeqElemsClearFlags( (CvSeq*)(graph->edges), + CV_FIELD_OFFSET( flags, CvGraphEdge), + CV_GRAPH_ITEM_VISITED_FLAG ); + + return scanner; +} + + +CV_IMPL void +cvReleaseGraphScanner( CvGraphScanner** scanner ) +{ + if( !scanner ) + CV_Error( CV_StsNullPtr, "Null double pointer to graph scanner" ); + + if( *scanner ) + { + if( (*scanner)->stack ) + cvReleaseMemStorage( &((*scanner)->stack->storage)); + cvFree( scanner ); + } +} + + +CV_IMPL int +cvNextGraphItem( CvGraphScanner* scanner ) +{ + int code = -1; + CvGraphVtx* vtx; + CvGraphVtx* dst; + CvGraphEdge* edge; + CvGraphItem item; + + if( !scanner || !(scanner->stack)) + CV_Error( CV_StsNullPtr, "Null graph scanner" ); + + dst = scanner->dst; + vtx = scanner->vtx; + edge = scanner->edge; + + for(;;) + { + for(;;) + { + if( dst && !CV_IS_GRAPH_VERTEX_VISITED(dst) ) + { + scanner->vtx = vtx = dst; + edge = vtx->first; + dst->flags |= CV_GRAPH_ITEM_VISITED_FLAG; + + if((scanner->mask & CV_GRAPH_VERTEX)) + { + scanner->vtx = vtx; + scanner->edge = vtx->first; + scanner->dst = 0; + code = CV_GRAPH_VERTEX; + return code; + } + } + + while( edge ) + { + dst = edge->vtx[vtx == edge->vtx[0]]; + + if( !CV_IS_GRAPH_EDGE_VISITED(edge) ) + { + // Check that the edge is outgoing: + if( !CV_IS_GRAPH_ORIENTED( scanner->graph ) || dst != edge->vtx[0] ) + { + edge->flags |= CV_GRAPH_ITEM_VISITED_FLAG; + + if( !CV_IS_GRAPH_VERTEX_VISITED(dst) ) + { + item.vtx = vtx; + item.edge = edge; + + vtx->flags |= CV_GRAPH_SEARCH_TREE_NODE_FLAG; + + cvSeqPush( scanner->stack, &item ); + + if( scanner->mask & CV_GRAPH_TREE_EDGE ) + { + code = CV_GRAPH_TREE_EDGE; + scanner->vtx = vtx; + scanner->dst = dst; + scanner->edge = edge; + return code; + } + break; + } + else + { + if( scanner->mask & (CV_GRAPH_BACK_EDGE| + CV_GRAPH_CROSS_EDGE| + CV_GRAPH_FORWARD_EDGE) ) + { + code = (dst->flags & CV_GRAPH_SEARCH_TREE_NODE_FLAG) ? + CV_GRAPH_BACK_EDGE : + (edge->flags & CV_GRAPH_FORWARD_EDGE_FLAG) ? + CV_GRAPH_FORWARD_EDGE : CV_GRAPH_CROSS_EDGE; + edge->flags &= ~CV_GRAPH_FORWARD_EDGE_FLAG; + if( scanner->mask & code ) + { + scanner->vtx = vtx; + scanner->dst = dst; + scanner->edge = edge; + return code; + } + } + } + } + else if( (dst->flags & (CV_GRAPH_ITEM_VISITED_FLAG| + CV_GRAPH_SEARCH_TREE_NODE_FLAG)) == + (CV_GRAPH_ITEM_VISITED_FLAG| + CV_GRAPH_SEARCH_TREE_NODE_FLAG)) + { + edge->flags |= CV_GRAPH_FORWARD_EDGE_FLAG; + } + } + + edge = CV_NEXT_GRAPH_EDGE( edge, vtx ); + } + + if( !edge ) /* need to backtrack */ + { + if( scanner->stack->total == 0 ) + { + if( scanner->index >= 0 ) + vtx = 0; + else + scanner->index = 0; + break; + } + cvSeqPop( scanner->stack, &item ); + vtx = item.vtx; + vtx->flags &= ~CV_GRAPH_SEARCH_TREE_NODE_FLAG; + edge = item.edge; + dst = 0; + + if( scanner->mask & CV_GRAPH_BACKTRACKING ) + { + scanner->vtx = vtx; + scanner->edge = edge; + scanner->dst = edge->vtx[vtx == edge->vtx[0]]; + code = CV_GRAPH_BACKTRACKING; + return code; + } + } + } + + if( !vtx ) + { + vtx = (CvGraphVtx*)icvSeqFindNextElem( (CvSeq*)(scanner->graph), + CV_FIELD_OFFSET( flags, CvGraphVtx ), CV_GRAPH_ITEM_VISITED_FLAG|INT_MIN, + 0, &(scanner->index) ); + + if( !vtx ) + { + code = CV_GRAPH_OVER; + break; + } + } + + dst = vtx; + if( scanner->mask & CV_GRAPH_NEW_TREE ) + { + scanner->dst = dst; + scanner->edge = 0; + scanner->vtx = 0; + code = CV_GRAPH_NEW_TREE; + break; + } + } + + return code; +} + + +CV_IMPL CvGraph* +cvCloneGraph( const CvGraph* graph, CvMemStorage* storage ) +{ + int* flag_buffer = 0; + CvGraphVtx** ptr_buffer = 0; + CvGraph* result = 0; + + int i, k; + int vtx_size, edge_size; + CvSeqReader reader; + + if( !CV_IS_GRAPH(graph)) + CV_Error( CV_StsBadArg, "Invalid graph pointer" ); + + if( !storage ) + storage = graph->storage; + + if( !storage ) + CV_Error( CV_StsNullPtr, "NULL storage pointer" ); + + vtx_size = graph->elem_size; + edge_size = graph->edges->elem_size; + + flag_buffer = (int*)cvAlloc( graph->total*sizeof(flag_buffer[0])); + ptr_buffer = (CvGraphVtx**)cvAlloc( graph->total*sizeof(ptr_buffer[0])); + result = cvCreateGraph( graph->flags, graph->header_size, + vtx_size, edge_size, storage ); + memcpy( result + sizeof(CvGraph), graph + sizeof(CvGraph), + graph->header_size - sizeof(CvGraph)); + + // Pass 1. Save flags, copy vertices: + cvStartReadSeq( (CvSeq*)graph, &reader ); + for( i = 0, k = 0; i < graph->total; i++ ) + { + if( CV_IS_SET_ELEM( reader.ptr )) + { + CvGraphVtx* vtx = (CvGraphVtx*)reader.ptr; + CvGraphVtx* dstvtx = 0; + cvGraphAddVtx( result, vtx, &dstvtx ); + flag_buffer[k] = dstvtx->flags = vtx->flags; + vtx->flags = k; + ptr_buffer[k++] = dstvtx; + } + CV_NEXT_SEQ_ELEM( vtx_size, reader ); + } + + // Pass 2. Copy edges: + cvStartReadSeq( (CvSeq*)graph->edges, &reader ); + for( i = 0; i < graph->edges->total; i++ ) + { + if( CV_IS_SET_ELEM( reader.ptr )) + { + CvGraphEdge* edge = (CvGraphEdge*)reader.ptr; + CvGraphEdge* dstedge = 0; + CvGraphVtx* new_org = ptr_buffer[edge->vtx[0]->flags]; + CvGraphVtx* new_dst = ptr_buffer[edge->vtx[1]->flags]; + cvGraphAddEdgeByPtr( result, new_org, new_dst, edge, &dstedge ); + dstedge->flags = edge->flags; + } + CV_NEXT_SEQ_ELEM( edge_size, reader ); + } + + // Pass 3. Restore flags: + cvStartReadSeq( (CvSeq*)graph, &reader ); + for( i = 0, k = 0; i < graph->edges->total; i++ ) + { + if( CV_IS_SET_ELEM( reader.ptr )) + { + CvGraphVtx* vtx = (CvGraphVtx*)reader.ptr; + vtx->flags = flag_buffer[k++]; + } + CV_NEXT_SEQ_ELEM( vtx_size, reader ); + } + + cvFree( &flag_buffer ); + cvFree( &ptr_buffer ); + + if( cvGetErrStatus() < 0 ) + result = 0; + + return result; +} + + +/****************************************************************************************\ +* Working with sequence tree * +\****************************************************************************************/ + +// Gather pointers to all the sequences, accessible from the , to the single sequence. +CV_IMPL CvSeq* +cvTreeToNodeSeq( const void* first, int header_size, CvMemStorage* storage ) +{ + CvSeq* allseq = 0; + CvTreeNodeIterator iterator; + + if( !storage ) + CV_Error( CV_StsNullPtr, "NULL storage pointer" ); + + allseq = cvCreateSeq( 0, header_size, sizeof(first), storage ); + + if( first ) + { + cvInitTreeNodeIterator( &iterator, first, INT_MAX ); + + for(;;) + { + void* node = cvNextTreeNode( &iterator ); + if( !node ) + break; + cvSeqPush( allseq, &node ); + } + } + + + + return allseq; +} + + +typedef struct CvTreeNode +{ + int flags; /* micsellaneous flags */ + int header_size; /* size of sequence header */ + struct CvTreeNode* h_prev; /* previous sequence */ + struct CvTreeNode* h_next; /* next sequence */ + struct CvTreeNode* v_prev; /* 2nd previous sequence */ + struct CvTreeNode* v_next; /* 2nd next sequence */ +} +CvTreeNode; + + + +// Insert contour into tree given certain parent sequence. +// If parent is equal to frame (the most external contour), +// then added contour will have null pointer to parent: +CV_IMPL void +cvInsertNodeIntoTree( void* _node, void* _parent, void* _frame ) +{ + CvTreeNode* node = (CvTreeNode*)_node; + CvTreeNode* parent = (CvTreeNode*)_parent; + + if( !node || !parent ) + CV_Error( CV_StsNullPtr, "" ); + + node->v_prev = _parent != _frame ? parent : 0; + node->h_next = parent->v_next; + + assert( parent->v_next != node ); + + if( parent->v_next ) + parent->v_next->h_prev = node; + parent->v_next = node; +} + + +// Remove contour from tree, together with the contour's children: +CV_IMPL void +cvRemoveNodeFromTree( void* _node, void* _frame ) +{ + CvTreeNode* node = (CvTreeNode*)_node; + CvTreeNode* frame = (CvTreeNode*)_frame; + + if( !node ) + CV_Error( CV_StsNullPtr, "" ); + + if( node == frame ) + CV_Error( CV_StsBadArg, "frame node could not be deleted" ); + + if( node->h_next ) + node->h_next->h_prev = node->h_prev; + + if( node->h_prev ) + node->h_prev->h_next = node->h_next; + else + { + CvTreeNode* parent = node->v_prev; + if( !parent ) + parent = frame; + + if( parent ) + { + assert( parent->v_next == node ); + parent->v_next = node->h_next; + } + } +} + + +CV_IMPL void +cvInitTreeNodeIterator( CvTreeNodeIterator* treeIterator, + const void* first, int max_level ) +{ + if( !treeIterator || !first ) + CV_Error( CV_StsNullPtr, "" ); + + if( max_level < 0 ) + CV_Error( CV_StsOutOfRange, "" ); + + treeIterator->node = (void*)first; + treeIterator->level = 0; + treeIterator->max_level = max_level; +} + + +CV_IMPL void* +cvNextTreeNode( CvTreeNodeIterator* treeIterator ) +{ + CvTreeNode* prevNode = 0; + CvTreeNode* node; + int level; + + if( !treeIterator ) + CV_Error( CV_StsNullPtr, "NULL iterator pointer" ); + + prevNode = node = (CvTreeNode*)treeIterator->node; + level = treeIterator->level; + + if( node ) + { + if( node->v_next && level+1 < treeIterator->max_level ) + { + node = node->v_next; + level++; + } + else + { + while( node->h_next == 0 ) + { + node = node->v_prev; + if( --level < 0 ) + { + node = 0; + break; + } + } + node = node && treeIterator->max_level != 0 ? node->h_next : 0; + } + } + + treeIterator->node = node; + treeIterator->level = level; + return prevNode; +} + + +CV_IMPL void* +cvPrevTreeNode( CvTreeNodeIterator* treeIterator ) +{ + CvTreeNode* prevNode = 0; + CvTreeNode* node; + int level; + + if( !treeIterator ) + CV_Error( CV_StsNullPtr, "" ); + + prevNode = node = (CvTreeNode*)treeIterator->node; + level = treeIterator->level; + + if( node ) + { + if( !node->h_prev ) + { + node = node->v_prev; + if( --level < 0 ) + node = 0; + } + else + { + node = node->h_prev; + + while( node->v_next && level < treeIterator->max_level ) + { + node = node->v_next; + level++; + + while( node->h_next ) + node = node->h_next; + } + } + } + + treeIterator->node = node; + treeIterator->level = level; + return prevNode; +} + + +namespace cv +{ + +// This is reimplementation of kd-trees from cvkdtree*.* by Xavier Delacour, cleaned-up and +// adopted to work with the new OpenCV data structures. It's in cxcore to be shared by +// both cv (CvFeatureTree) and ml (kNN). + +// The algorithm is taken from: +// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search +// in highdimensional spaces. In Proc. IEEE Conf. Comp. Vision Patt. Recog., +// pages 1000--1006, 1997. http://citeseer.ist.psu.edu/beis97shape.html + +const int MAX_TREE_DEPTH = 32; + +KDTree::KDTree() +{ + maxDepth = -1; + normType = NORM_L2; +} + +KDTree::KDTree(InputArray _points, bool _copyData) +{ + maxDepth = -1; + normType = NORM_L2; + build(_points, _copyData); +} + +KDTree::KDTree(InputArray _points, InputArray _labels, bool _copyData) +{ + maxDepth = -1; + normType = NORM_L2; + build(_points, _labels, _copyData); +} + +struct SubTree +{ + SubTree() : first(0), last(0), nodeIdx(0), depth(0) {} + SubTree(int _first, int _last, int _nodeIdx, int _depth) + : first(_first), last(_last), nodeIdx(_nodeIdx), depth(_depth) {} + int first; + int last; + int nodeIdx; + int depth; +}; + + +static float +medianPartition( size_t* ofs, int a, int b, const float* vals ) +{ + int k, a0 = a, b0 = b; + int middle = (a + b)/2; + while( b > a ) + { + int i0 = a, i1 = (a+b)/2, i2 = b; + float v0 = vals[ofs[i0]], v1 = vals[ofs[i1]], v2 = vals[ofs[i2]]; + int ip = v0 < v1 ? (v1 < v2 ? i1 : v0 < v2 ? i2 : i0) : + v0 < v2 ? i0 : (v1 < v2 ? i2 : i1); + float pivot = vals[ofs[ip]]; + std::swap(ofs[ip], ofs[i2]); + + for( i1 = i0, i0--; i1 <= i2; i1++ ) + if( vals[ofs[i1]] <= pivot ) + { + i0++; + std::swap(ofs[i0], ofs[i1]); + } + if( i0 == middle ) + break; + if( i0 > middle ) + b = i0 - (b == i0); + else + a = i0; + } + + float pivot = vals[ofs[middle]]; + int less = 0, more = 0; + for( k = a0; k < middle; k++ ) + { + CV_Assert(vals[ofs[k]] <= pivot); + less += vals[ofs[k]] < pivot; + } + for( k = b0; k > middle; k-- ) + { + CV_Assert(vals[ofs[k]] >= pivot); + more += vals[ofs[k]] > pivot; + } + CV_Assert(std::abs(more - less) <= 1); + + return vals[ofs[middle]]; +} + +static void +computeSums( const Mat& points, const size_t* ofs, int a, int b, double* sums ) +{ + int i, j, dims = points.cols; + const float* data = points.ptr(0); + for( j = 0; j < dims; j++ ) + sums[j*2] = sums[j*2+1] = 0; + for( i = a; i <= b; i++ ) + { + const float* row = data + ofs[i]; + for( j = 0; j < dims; j++ ) + { + double t = row[j], s = sums[j*2] + t, s2 = sums[j*2+1] + t*t; + sums[j*2] = s; sums[j*2+1] = s2; + } + } +} + + +void KDTree::build(InputArray _points, bool _copyData) +{ + build(_points, noArray(), _copyData); +} + + +void KDTree::build(InputArray __points, InputArray __labels, bool _copyData) +{ + Mat _points = __points.getMat(), _labels = __labels.getMat(); + CV_Assert(_points.type() == CV_32F && !_points.empty()); + vector().swap(nodes); + + if( !_copyData ) + points = _points; + else + { + points.release(); + points.create(_points.size(), _points.type()); + } + + int i, j, n = _points.rows, dims = _points.cols, top = 0; + const float* data = _points.ptr(0); + float* dstdata = points.ptr(0); + size_t step = _points.step1(); + size_t dstep = points.step1(); + int ptpos = 0; + labels.resize(n); + const int* _labels_data = 0; + + if( !_labels.empty() ) + { + int nlabels = _labels.checkVector(1, CV_32S, true); + CV_Assert(nlabels == n); + _labels_data = (const int*)_labels.data; + } + + Mat sumstack(MAX_TREE_DEPTH*2, dims*2, CV_64F); + SubTree stack[MAX_TREE_DEPTH*2]; + + vector _ptofs(n); + size_t* ptofs = &_ptofs[0]; + + for( i = 0; i < n; i++ ) + ptofs[i] = i*step; + + nodes.push_back(Node()); + computeSums(points, ptofs, 0, n-1, sumstack.ptr(top)); + stack[top++] = SubTree(0, n-1, 0, 0); + int _maxDepth = 0; + + while( --top >= 0 ) + { + int first = stack[top].first, last = stack[top].last; + int depth = stack[top].depth, nidx = stack[top].nodeIdx; + int count = last - first + 1, dim = -1; + const double* sums = sumstack.ptr(top); + double invCount = 1./count, maxVar = -1.; + + if( count == 1 ) + { + int idx0 = (int)(ptofs[first]/step); + int idx = _copyData ? ptpos++ : idx0; + nodes[nidx].idx = ~idx; + if( _copyData ) + { + const float* src = data + ptofs[first]; + float* dst = dstdata + idx*dstep; + for( j = 0; j < dims; j++ ) + dst[j] = src[j]; + } + labels[idx] = _labels_data ? _labels_data[idx0] : idx0; + _maxDepth = std::max(_maxDepth, depth); + continue; + } + + // find the dimensionality with the biggest variance + for( j = 0; j < dims; j++ ) + { + double m = sums[j*2]*invCount; + double varj = sums[j*2+1]*invCount - m*m; + if( maxVar < varj ) + { + maxVar = varj; + dim = j; + } + } + + int left = (int)nodes.size(), right = left + 1; + nodes.push_back(Node()); + nodes.push_back(Node()); + nodes[nidx].idx = dim; + nodes[nidx].left = left; + nodes[nidx].right = right; + nodes[nidx].boundary = medianPartition(ptofs, first, last, data + dim); + + int middle = (first + last)/2; + double *lsums = (double*)sums, *rsums = lsums + dims*2; + computeSums(points, ptofs, middle+1, last, rsums); + for( j = 0; j < dims*2; j++ ) + lsums[j] = sums[j] - rsums[j]; + stack[top++] = SubTree(first, middle, left, depth+1); + stack[top++] = SubTree(middle+1, last, right, depth+1); + } + maxDepth = _maxDepth; +} + + +struct PQueueElem +{ + PQueueElem() : dist(0), idx(0) {} + PQueueElem(float _dist, int _idx) : dist(_dist), idx(_idx) {} + float dist; + int idx; +}; + + +int KDTree::findNearest(InputArray _vec, int K, int emax, + OutputArray _neighborsIdx, OutputArray _neighbors, + OutputArray _dist, OutputArray _labels) const + +{ + Mat vecmat = _vec.getMat(); + CV_Assert( vecmat.isContinuous() && vecmat.type() == CV_32F && vecmat.total() == (size_t)points.cols ); + const float* vec = vecmat.ptr(); + K = std::min(K, points.rows); + int dims = points.cols; + + CV_Assert(K > 0 && (normType == NORM_L2 || normType == NORM_L1)); + + AutoBuffer _buf((K+1)*(sizeof(float) + sizeof(int))); + int* idx = (int*)(uchar*)_buf; + float* dist = (float*)(idx + K + 1); + int i, j, ncount = 0, e = 0; + + int qsize = 0, maxqsize = 1 << 10; + AutoBuffer _pqueue(maxqsize*sizeof(PQueueElem)); + PQueueElem* pqueue = (PQueueElem*)(uchar*)_pqueue; + emax = std::max(emax, 1); + + for( e = 0; e < emax; ) + { + float d, alt_d = 0.f; + int nidx; + + if( e == 0 ) + nidx = 0; + else + { + // take the next node from the priority queue + if( qsize == 0 ) + break; + nidx = pqueue[0].idx; + alt_d = pqueue[0].dist; + if( --qsize > 0 ) + { + std::swap(pqueue[0], pqueue[qsize]); + d = pqueue[0].dist; + for( i = 0;;) + { + int left = i*2 + 1, right = i*2 + 2; + if( left >= qsize ) + break; + if( right < qsize && pqueue[right].dist < pqueue[left].dist ) + left = right; + if( pqueue[left].dist >= d ) + break; + std::swap(pqueue[i], pqueue[left]); + i = left; + } + } + + if( ncount == K && alt_d > dist[ncount-1] ) + continue; + } + + for(;;) + { + if( nidx < 0 ) + break; + const Node& n = nodes[nidx]; + + if( n.idx < 0 ) + { + i = ~n.idx; + const float* row = points.ptr(i); + if( normType == NORM_L2 ) + for( j = 0, d = 0.f; j < dims; j++ ) + { + float t = vec[j] - row[j]; + d += t*t; + } + else + for( j = 0, d = 0.f; j < dims; j++ ) + d += std::abs(vec[j] - row[j]); + + dist[ncount] = d; + idx[ncount] = i; + for( i = ncount-1; i >= 0; i-- ) + { + if( dist[i] <= d ) + break; + std::swap(dist[i], dist[i+1]); + std::swap(idx[i], idx[i+1]); + } + ncount += ncount < K; + e++; + break; + } + + int alt; + if( vec[n.idx] <= n.boundary ) + { + nidx = n.left; + alt = n.right; + } + else + { + nidx = n.right; + alt = n.left; + } + + d = vec[n.idx] - n.boundary; + if( normType == NORM_L2 ) + d = d*d + alt_d; + else + d = std::abs(d) + alt_d; + // subtree prunning + if( ncount == K && d > dist[ncount-1] ) + continue; + // add alternative subtree to the priority queue + pqueue[qsize] = PQueueElem(d, alt); + for( i = qsize; i > 0; ) + { + int parent = (i-1)/2; + if( parent < 0 || pqueue[parent].dist <= d ) + break; + std::swap(pqueue[i], pqueue[parent]); + i = parent; + } + qsize += qsize+1 < maxqsize; + } + } + + K = std::min(K, ncount); + if( _neighborsIdx.needed() ) + { + _neighborsIdx.create(K, 1, CV_32S, -1, true); + Mat nidx = _neighborsIdx.getMat(); + Mat(nidx.size(), CV_32S, &idx[0]).copyTo(nidx); + } + if( _dist.needed() ) + sqrt(Mat(K, 1, CV_32F, dist), _dist); + + if( _neighbors.needed() || _labels.needed() ) + getPoints(Mat(K, 1, CV_32S, idx), _neighbors, _labels); + return K; +} + + +void KDTree::findOrthoRange(InputArray _lowerBound, + InputArray _upperBound, + OutputArray _neighborsIdx, + OutputArray _neighbors, + OutputArray _labels ) const +{ + int dims = points.cols; + Mat lowerBound = _lowerBound.getMat(), upperBound = _upperBound.getMat(); + CV_Assert( lowerBound.size == upperBound.size && + lowerBound.isContinuous() && + upperBound.isContinuous() && + lowerBound.type() == upperBound.type() && + lowerBound.type() == CV_32F && + lowerBound.total() == (size_t)dims ); + const float* L = lowerBound.ptr(); + const float* R = upperBound.ptr(); + + vector idx; + AutoBuffer _stack(MAX_TREE_DEPTH*2 + 1); + int* stack = _stack; + int top = 0; + + stack[top++] = 0; + + while( --top >= 0 ) + { + int nidx = stack[top]; + if( nidx < 0 ) + break; + const Node& n = nodes[nidx]; + if( n.idx < 0 ) + { + int j, i = ~n.idx; + const float* row = points.ptr(i); + for( j = 0; j < dims; j++ ) + if( row[j] < L[j] || row[j] >= R[j] ) + break; + if( j == dims ) + idx.push_back(i); + continue; + } + if( L[n.idx] <= n.boundary ) + stack[top++] = n.left; + if( R[n.idx] > n.boundary ) + stack[top++] = n.right; + } + + if( _neighborsIdx.needed() ) + { + _neighborsIdx.create((int)idx.size(), 1, CV_32S, -1, true); + Mat nidx = _neighborsIdx.getMat(); + Mat(nidx.size(), CV_32S, &idx[0]).copyTo(nidx); + } + getPoints( idx, _neighbors, _labels ); +} + + +void KDTree::getPoints(InputArray _idx, OutputArray _pts, OutputArray _labels) const +{ + Mat idxmat = _idx.getMat(), pts, labelsmat; + CV_Assert( idxmat.isContinuous() && idxmat.type() == CV_32S && + (idxmat.cols == 1 || idxmat.rows == 1) ); + const int* idx = idxmat.ptr(); + int* dstlabels = 0; + + int dims = points.cols; + int i, nidx = (int)idxmat.total(); + if( nidx == 0 ) + { + _pts.release(); + _labels.release(); + return; + } + + if( _pts.needed() ) + { + _pts.create( nidx, dims, points.type()); + pts = _pts.getMat(); + } + + if(_labels.needed()) + { + _labels.create(nidx, 1, CV_32S, -1, true); + labelsmat = _labels.getMat(); + CV_Assert( labelsmat.isContinuous() ); + dstlabels = labelsmat.ptr(); + } + const int* srclabels = !labels.empty() ? &labels[0] : 0; + + for( i = 0; i < nidx; i++ ) + { + int k = idx[i]; + CV_Assert( (unsigned)k < (unsigned)points.rows ); + const float* src = points.ptr(k); + if( pts.data ) + std::copy(src, src + dims, pts.ptr(i)); + if( dstlabels ) + dstlabels[i] = srclabels ? srclabels[k] : k; + } +} + + +const float* KDTree::getPoint(int ptidx, int* label) const +{ + CV_Assert( (unsigned)ptidx < (unsigned)points.rows); + if(label) + *label = labels[ptidx]; + return points.ptr(ptidx); +} + + +int KDTree::dims() const +{ + return !points.empty() ? points.cols : 0; +} + +//////////////////////////////////////////////////////////////////////////////// + +schar* seqPush( CvSeq* seq, const void* element ) +{ + return cvSeqPush(seq, element); +} + +schar* seqPushFront( CvSeq* seq, const void* element ) +{ + return cvSeqPushFront(seq, element); +} + +void seqPop( CvSeq* seq, void* element ) +{ + cvSeqPop(seq, element); +} + +void seqPopFront( CvSeq* seq, void* element ) +{ + cvSeqPopFront(seq, element); +} + +void seqRemove( CvSeq* seq, int index ) +{ + cvSeqRemove(seq, index); +} + +void clearSeq( CvSeq* seq ) +{ + cvClearSeq(seq); +} + +schar* getSeqElem( const CvSeq* seq, int index ) +{ + return cvGetSeqElem(seq, index); +} + +void seqRemoveSlice( CvSeq* seq, CvSlice slice ) +{ + return cvSeqRemoveSlice(seq, slice); +} + +void seqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr ) +{ + cvSeqInsertSlice(seq, before_index, from_arr); +} + +} + +/* End of file. */ diff --git a/opencv/core/drawing.cpp b/opencv/core/drawing.cpp new file mode 100644 index 0000000..f8e8a51 --- /dev/null +++ b/opencv/core/drawing.cpp @@ -0,0 +1,2408 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" + +namespace cv +{ + +enum { XY_SHIFT = 16, XY_ONE = 1 << XY_SHIFT, DRAWING_STORAGE_BLOCK = (1<<12) - 256 }; + +struct PolyEdge +{ + PolyEdge() : y0(0), y1(0), x(0), dx(0), next(0) {} + //PolyEdge(int _y0, int _y1, int _x, int _dx) : y0(_y0), y1(_y1), x(_x), dx(_dx) {} + + int y0, y1; + int x, dx; + PolyEdge *next; +}; + +static void +CollectPolyEdges( Mat& img, const Point* v, int npts, + vector& edges, const void* color, int line_type, + int shift, Point offset=Point() ); + +static void +FillEdgeCollection( Mat& img, vector& edges, const void* color ); + +static void +PolyLine( Mat& img, const Point* v, int npts, bool closed, + const void* color, int thickness, int line_type, int shift ); + +static void +FillConvexPoly( Mat& img, const Point* v, int npts, + const void* color, int line_type, int shift ); + +/****************************************************************************************\ +* Lines * +\****************************************************************************************/ + +bool clipLine( Size img_size, Point& pt1, Point& pt2 ) +{ + int x1, y1, x2, y2; + int c1, c2; + int right = img_size.width-1, bottom = img_size.height-1; + + if( img_size.width <= 0 || img_size.height <= 0 ) + return false; + + x1 = pt1.x; y1 = pt1.y; x2 = pt2.x; y2 = pt2.y; + c1 = (x1 < 0) + (x1 > right) * 2 + (y1 < 0) * 4 + (y1 > bottom) * 8; + c2 = (x2 < 0) + (x2 > right) * 2 + (y2 < 0) * 4 + (y2 > bottom) * 8; + + if( (c1 & c2) == 0 && (c1 | c2) != 0 ) + { + int a; + if( c1 & 12 ) + { + a = c1 < 8 ? 0 : bottom; + x1 += (int) (((int64) (a - y1)) * (x2 - x1) / (y2 - y1)); + y1 = a; + c1 = (x1 < 0) + (x1 > right) * 2; + } + if( c2 & 12 ) + { + a = c2 < 8 ? 0 : bottom; + x2 += (int) (((int64) (a - y2)) * (x2 - x1) / (y2 - y1)); + y2 = a; + c2 = (x2 < 0) + (x2 > right) * 2; + } + if( (c1 & c2) == 0 && (c1 | c2) != 0 ) + { + if( c1 ) + { + a = c1 == 1 ? 0 : right; + y1 += (int) (((int64) (a - x1)) * (y2 - y1) / (x2 - x1)); + x1 = a; + c1 = 0; + } + if( c2 ) + { + a = c2 == 1 ? 0 : right; + y2 += (int) (((int64) (a - x2)) * (y2 - y1) / (x2 - x1)); + x2 = a; + c2 = 0; + } + } + + assert( (c1 & c2) != 0 || (x1 | y1 | x2 | y2) >= 0 ); + + pt1.x = x1; + pt1.y = y1; + pt2.x = x2; + pt2.y = y2; + } + + return (c1 | c2) == 0; +} + +bool clipLine( Rect img_rect, Point& pt1, Point& pt2 ) +{ + Point tl = img_rect.tl(); + pt1 -= tl; pt2 -= tl; + bool inside = clipLine(img_rect.size(), pt1, pt2); + pt1 += tl; pt2 += tl; + + return inside; +} + +/* + Initializes line iterator. + Returns number of points on the line or negative number if error. +*/ +LineIterator::LineIterator(const Mat& img, Point pt1, Point pt2, + int connectivity, bool left_to_right) +{ + count = -1; + + CV_Assert( connectivity == 8 || connectivity == 4 ); + + if( (unsigned)pt1.x >= (unsigned)(img.cols) || + (unsigned)pt2.x >= (unsigned)(img.cols) || + (unsigned)pt1.y >= (unsigned)(img.rows) || + (unsigned)pt2.y >= (unsigned)(img.rows) ) + { + if( !clipLine( img.size(), pt1, pt2 ) ) + { + ptr = img.data; + err = plusDelta = minusDelta = plusStep = minusStep = count = 0; + return; + } + } + + int bt_pix0 = (int)img.elemSize(), bt_pix = bt_pix0; + size_t step = img.step; + + int dx = pt2.x - pt1.x; + int dy = pt2.y - pt1.y; + int s = dx < 0 ? -1 : 0; + + if( left_to_right ) + { + dx = (dx ^ s) - s; + dy = (dy ^ s) - s; + pt1.x ^= (pt1.x ^ pt2.x) & s; + pt1.y ^= (pt1.y ^ pt2.y) & s; + } + else + { + dx = (dx ^ s) - s; + bt_pix = (bt_pix ^ s) - s; + } + + ptr = (uchar*)(img.data + pt1.y * step + pt1.x * bt_pix0); + + s = dy < 0 ? -1 : 0; + dy = (dy ^ s) - s; + step = (step ^ s) - s; + + s = dy > dx ? -1 : 0; + + /* conditional swaps */ + dx ^= dy & s; + dy ^= dx & s; + dx ^= dy & s; + + bt_pix ^= step & s; + step ^= bt_pix & s; + bt_pix ^= step & s; + + if( connectivity == 8 ) + { + assert( dx >= 0 && dy >= 0 ); + + err = dx - (dy + dy); + plusDelta = dx + dx; + minusDelta = -(dy + dy); + plusStep = (int)step; + minusStep = bt_pix; + count = dx + 1; + } + else /* connectivity == 4 */ + { + assert( dx >= 0 && dy >= 0 ); + + err = 0; + plusDelta = (dx + dx) + (dy + dy); + minusDelta = -(dy + dy); + plusStep = (int)step - bt_pix; + minusStep = bt_pix; + count = dx + dy + 1; + } + + this->ptr0 = img.data; + this->step = (int)img.step; + this->elemSize = bt_pix0; +} + +static void +Line( Mat& img, Point pt1, Point pt2, + const void* _color, int connectivity = 8 ) +{ + if( connectivity == 0 ) + connectivity = 8; + if( connectivity == 1 ) + connectivity = 4; + + LineIterator iterator(img, pt1, pt2, connectivity, true); + int i, count = iterator.count; + int pix_size = (int)img.elemSize(); + const uchar* color = (const uchar*)_color; + + for( i = 0; i < count; i++, ++iterator ) + { + uchar* ptr = *iterator; + if( pix_size == 1 ) + ptr[0] = color[0]; + else if( pix_size == 3 ) + { + ptr[0] = color[0]; + ptr[1] = color[1]; + ptr[2] = color[2]; + } + else + memcpy( *iterator, color, pix_size ); + } +} + + +/* Correction table depent on the slope */ +static const uchar SlopeCorrTable[] = { + 181, 181, 181, 182, 182, 183, 184, 185, 187, 188, 190, 192, 194, 196, 198, 201, + 203, 206, 209, 211, 214, 218, 221, 224, 227, 231, 235, 238, 242, 246, 250, 254 +}; + +/* Gaussian for antialiasing filter */ +static const int FilterTable[] = { + 168, 177, 185, 194, 202, 210, 218, 224, 231, 236, 241, 246, 249, 252, 254, 254, + 254, 254, 252, 249, 246, 241, 236, 231, 224, 218, 210, 202, 194, 185, 177, 168, + 158, 149, 140, 131, 122, 114, 105, 97, 89, 82, 75, 68, 62, 56, 50, 45, + 40, 36, 32, 28, 25, 22, 19, 16, 14, 12, 11, 9, 8, 7, 5, 5 +}; + +static void +LineAA( Mat& img, Point pt1, Point pt2, const void* color ) +{ + int dx, dy; + int ecount, scount = 0; + int slope; + int ax, ay; + int x_step, y_step; + int i, j; + int ep_table[9]; + int cb = ((uchar*)color)[0], cg = ((uchar*)color)[1], cr = ((uchar*)color)[2]; + int _cb, _cg, _cr; + int nch = img.channels(); + uchar* ptr = img.data; + size_t step = img.step; + Size size = img.size(); + + if( !((nch == 1 || nch == 3) && img.depth() == CV_8U) ) + { + Line(img, pt1, pt2, color); + return; + } + + pt1.x -= XY_ONE*2; + pt1.y -= XY_ONE*2; + pt2.x -= XY_ONE*2; + pt2.y -= XY_ONE*2; + ptr += img.step*2 + 2*nch; + + size.width = ((size.width - 5) << XY_SHIFT) + 1; + size.height = ((size.height - 5) << XY_SHIFT) + 1; + + if( !clipLine( size, pt1, pt2 )) + return; + + dx = pt2.x - pt1.x; + dy = pt2.y - pt1.y; + + j = dx < 0 ? -1 : 0; + ax = (dx ^ j) - j; + i = dy < 0 ? -1 : 0; + ay = (dy ^ i) - i; + + if( ax > ay ) + { + dx = ax; + dy = (dy ^ j) - j; + pt1.x ^= pt2.x & j; + pt2.x ^= pt1.x & j; + pt1.x ^= pt2.x & j; + pt1.y ^= pt2.y & j; + pt2.y ^= pt1.y & j; + pt1.y ^= pt2.y & j; + + x_step = XY_ONE; + y_step = (int) (((int64) dy << XY_SHIFT) / (ax | 1)); + pt2.x += XY_ONE; + ecount = (pt2.x >> XY_SHIFT) - (pt1.x >> XY_SHIFT); + j = -(pt1.x & (XY_ONE - 1)); + pt1.y += (int) ((((int64) y_step) * j) >> XY_SHIFT) + (XY_ONE >> 1); + slope = (y_step >> (XY_SHIFT - 5)) & 0x3f; + slope ^= (y_step < 0 ? 0x3f : 0); + + /* Get 4-bit fractions for end-point adjustments */ + i = (pt1.x >> (XY_SHIFT - 7)) & 0x78; + j = (pt2.x >> (XY_SHIFT - 7)) & 0x78; + } + else + { + dy = ay; + dx = (dx ^ i) - i; + pt1.x ^= pt2.x & i; + pt2.x ^= pt1.x & i; + pt1.x ^= pt2.x & i; + pt1.y ^= pt2.y & i; + pt2.y ^= pt1.y & i; + pt1.y ^= pt2.y & i; + + x_step = (int) (((int64) dx << XY_SHIFT) / (ay | 1)); + y_step = XY_ONE; + pt2.y += XY_ONE; + ecount = (pt2.y >> XY_SHIFT) - (pt1.y >> XY_SHIFT); + j = -(pt1.y & (XY_ONE - 1)); + pt1.x += (int) ((((int64) x_step) * j) >> XY_SHIFT) + (XY_ONE >> 1); + slope = (x_step >> (XY_SHIFT - 5)) & 0x3f; + slope ^= (x_step < 0 ? 0x3f : 0); + + /* Get 4-bit fractions for end-point adjustments */ + i = (pt1.y >> (XY_SHIFT - 7)) & 0x78; + j = (pt2.y >> (XY_SHIFT - 7)) & 0x78; + } + + slope = (slope & 0x20) ? 0x100 : SlopeCorrTable[slope]; + + /* Calc end point correction table */ + { + int t0 = slope << 7; + int t1 = ((0x78 - i) | 4) * slope; + int t2 = (j | 4) * slope; + + ep_table[0] = 0; + ep_table[8] = slope; + ep_table[1] = ep_table[3] = ((((j - i) & 0x78) | 4) * slope >> 8) & 0x1ff; + ep_table[2] = (t1 >> 8) & 0x1ff; + ep_table[4] = ((((j - i) + 0x80) | 4) * slope >> 8) & 0x1ff; + ep_table[5] = ((t1 + t0) >> 8) & 0x1ff; + ep_table[6] = (t2 >> 8) & 0x1ff; + ep_table[7] = ((t2 + t0) >> 8) & 0x1ff; + } + + if( nch == 3 ) + { + #define ICV_PUT_POINT() \ + { \ + _cb = tptr[0]; \ + _cb += ((cb - _cb)*a + 127)>> 8;\ + _cg = tptr[1]; \ + _cg += ((cg - _cg)*a + 127)>> 8;\ + _cr = tptr[2]; \ + _cr += ((cr - _cr)*a + 127)>> 8;\ + tptr[0] = (uchar)_cb; \ + tptr[1] = (uchar)_cg; \ + tptr[2] = (uchar)_cr; \ + } + if( ax > ay ) + { + ptr += (pt1.x >> XY_SHIFT) * 3; + + while( ecount >= 0 ) + { + uchar *tptr = ptr + ((pt1.y >> XY_SHIFT) - 1) * step; + + int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 + + (((ecount >= 2) + 1) & (ecount | 2))]; + int a, dist = (pt1.y >> (XY_SHIFT - 5)) & 31; + + a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff; + ICV_PUT_POINT(); + ICV_PUT_POINT(); + + tptr += step; + a = (ep_corr * FilterTable[dist] >> 8) & 0xff; + ICV_PUT_POINT(); + ICV_PUT_POINT(); + + tptr += step; + a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff; + ICV_PUT_POINT(); + ICV_PUT_POINT(); + + pt1.y += y_step; + ptr += 3; + scount++; + ecount--; + } + } + else + { + ptr += (pt1.y >> XY_SHIFT) * step; + + while( ecount >= 0 ) + { + uchar *tptr = ptr + ((pt1.x >> XY_SHIFT) - 1) * 3; + + int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 + + (((ecount >= 2) + 1) & (ecount | 2))]; + int a, dist = (pt1.x >> (XY_SHIFT - 5)) & 31; + + a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff; + ICV_PUT_POINT(); + ICV_PUT_POINT(); + + tptr += 3; + a = (ep_corr * FilterTable[dist] >> 8) & 0xff; + ICV_PUT_POINT(); + ICV_PUT_POINT(); + + tptr += 3; + a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff; + ICV_PUT_POINT(); + ICV_PUT_POINT(); + + pt1.x += x_step; + ptr += step; + scount++; + ecount--; + } + } + #undef ICV_PUT_POINT + } + else + { + #define ICV_PUT_POINT() \ + { \ + _cb = tptr[0]; \ + _cb += ((cb - _cb)*a + 127)>> 8;\ + tptr[0] = (uchar)_cb; \ + } + + if( ax > ay ) + { + ptr += (pt1.x >> XY_SHIFT); + + while( ecount >= 0 ) + { + uchar *tptr = ptr + ((pt1.y >> XY_SHIFT) - 1) * step; + + int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 + + (((ecount >= 2) + 1) & (ecount | 2))]; + int a, dist = (pt1.y >> (XY_SHIFT - 5)) & 31; + + a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff; + ICV_PUT_POINT(); + ICV_PUT_POINT(); + + tptr += step; + a = (ep_corr * FilterTable[dist] >> 8) & 0xff; + ICV_PUT_POINT(); + ICV_PUT_POINT(); + + tptr += step; + a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff; + ICV_PUT_POINT(); + ICV_PUT_POINT(); + + pt1.y += y_step; + ptr++; + scount++; + ecount--; + } + } + else + { + ptr += (pt1.y >> XY_SHIFT) * step; + + while( ecount >= 0 ) + { + uchar *tptr = ptr + ((pt1.x >> XY_SHIFT) - 1); + + int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 + + (((ecount >= 2) + 1) & (ecount | 2))]; + int a, dist = (pt1.x >> (XY_SHIFT - 5)) & 31; + + a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff; + ICV_PUT_POINT(); + ICV_PUT_POINT(); + + tptr++; + a = (ep_corr * FilterTable[dist] >> 8) & 0xff; + ICV_PUT_POINT(); + ICV_PUT_POINT(); + + tptr++; + a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff; + ICV_PUT_POINT(); + ICV_PUT_POINT(); + + pt1.x += x_step; + ptr += step; + scount++; + ecount--; + } + } + #undef ICV_PUT_POINT + } +} + + +static void +Line2( Mat& img, Point pt1, Point pt2, const void* color ) +{ + int dx, dy; + int ecount; + int ax, ay; + int i, j; + int x_step, y_step; + int cb = ((uchar*)color)[0]; + int cg = ((uchar*)color)[1]; + int cr = ((uchar*)color)[2]; + int pix_size = (int)img.elemSize(); + uchar *ptr = img.data, *tptr; + size_t step = img.step; + Size size = img.size(); + + //assert( img && (nch == 1 || nch == 3) && img.depth() == CV_8U ); + + pt1.x -= XY_ONE*2; + pt1.y -= XY_ONE*2; + pt2.x -= XY_ONE*2; + pt2.y -= XY_ONE*2; + ptr += img.step*2 + 2*pix_size; + + size.width = ((size.width - 5) << XY_SHIFT) + 1; + size.height = ((size.height - 5) << XY_SHIFT) + 1; + + if( !clipLine( size, pt1, pt2 )) + return; + + dx = pt2.x - pt1.x; + dy = pt2.y - pt1.y; + + j = dx < 0 ? -1 : 0; + ax = (dx ^ j) - j; + i = dy < 0 ? -1 : 0; + ay = (dy ^ i) - i; + + if( ax > ay ) + { + dx = ax; + dy = (dy ^ j) - j; + pt1.x ^= pt2.x & j; + pt2.x ^= pt1.x & j; + pt1.x ^= pt2.x & j; + pt1.y ^= pt2.y & j; + pt2.y ^= pt1.y & j; + pt1.y ^= pt2.y & j; + + x_step = XY_ONE; + y_step = (int) (((int64) dy << XY_SHIFT) / (ax | 1)); + ecount = (pt2.x - pt1.x) >> XY_SHIFT; + } + else + { + dy = ay; + dx = (dx ^ i) - i; + pt1.x ^= pt2.x & i; + pt2.x ^= pt1.x & i; + pt1.x ^= pt2.x & i; + pt1.y ^= pt2.y & i; + pt2.y ^= pt1.y & i; + pt1.y ^= pt2.y & i; + + x_step = (int) (((int64) dx << XY_SHIFT) / (ay | 1)); + y_step = XY_ONE; + ecount = (pt2.y - pt1.y) >> XY_SHIFT; + } + + pt1.x += (XY_ONE >> 1); + pt1.y += (XY_ONE >> 1); + + if( pix_size == 3 ) + { + #define ICV_PUT_POINT() \ + { \ + tptr[0] = (uchar)cb; \ + tptr[1] = (uchar)cg; \ + tptr[2] = (uchar)cr; \ + } + + tptr = ptr + ((pt2.x + (XY_ONE >> 1))>> XY_SHIFT)*3 + + ((pt2.y + (XY_ONE >> 1)) >> XY_SHIFT)*step; + ICV_PUT_POINT(); + + if( ax > ay ) + { + ptr += (pt1.x >> XY_SHIFT) * 3; + + while( ecount >= 0 ) + { + tptr = ptr + (pt1.y >> XY_SHIFT) * step; + ICV_PUT_POINT(); + pt1.y += y_step; + ptr += 3; + ecount--; + } + } + else + { + ptr += (pt1.y >> XY_SHIFT) * step; + + while( ecount >= 0 ) + { + tptr = ptr + (pt1.x >> XY_SHIFT) * 3; + ICV_PUT_POINT(); + pt1.x += x_step; + ptr += step; + ecount--; + } + } + + #undef ICV_PUT_POINT + } + else if( pix_size == 1 ) + { + #define ICV_PUT_POINT() \ + { \ + tptr[0] = (uchar)cb; \ + } + + tptr = ptr + ((pt2.x + (XY_ONE >> 1))>> XY_SHIFT) + + ((pt2.y + (XY_ONE >> 1)) >> XY_SHIFT)*step; + ICV_PUT_POINT(); + + if( ax > ay ) + { + ptr += (pt1.x >> XY_SHIFT); + + while( ecount >= 0 ) + { + tptr = ptr + (pt1.y >> XY_SHIFT) * step; + ICV_PUT_POINT(); + pt1.y += y_step; + ptr++; + ecount--; + } + } + else + { + ptr += (pt1.y >> XY_SHIFT) * step; + + while( ecount >= 0 ) + { + tptr = ptr + (pt1.x >> XY_SHIFT); + ICV_PUT_POINT(); + pt1.x += x_step; + ptr += step; + ecount--; + } + } + #undef ICV_PUT_POINT + } + else + { + #define ICV_PUT_POINT() \ + for( j = 0; j < pix_size; j++ ) \ + tptr[j] = ((uchar*)color)[j]; + + tptr = ptr + ((pt2.x + (XY_ONE >> 1))>> XY_SHIFT)*pix_size + + ((pt2.y + (XY_ONE >> 1)) >> XY_SHIFT)*step; + ICV_PUT_POINT(); + + if( ax > ay ) + { + ptr += (pt1.x >> XY_SHIFT) * pix_size; + + while( ecount >= 0 ) + { + tptr = ptr + (pt1.y >> XY_SHIFT) * step; + ICV_PUT_POINT(); + pt1.y += y_step; + ptr += pix_size; + ecount--; + } + } + else + { + ptr += (pt1.y >> XY_SHIFT) * step; + + while( ecount >= 0 ) + { + tptr = ptr + (pt1.x >> XY_SHIFT) * pix_size; + ICV_PUT_POINT(); + pt1.x += x_step; + ptr += step; + ecount--; + } + } + + #undef ICV_PUT_POINT + } +} + + +/****************************************************************************************\ +* Antialiazed Elliptic Arcs via Antialiazed Lines * +\****************************************************************************************/ + +static const float SinTable[] = + { 0.0000000f, 0.0174524f, 0.0348995f, 0.0523360f, 0.0697565f, 0.0871557f, + 0.1045285f, 0.1218693f, 0.1391731f, 0.1564345f, 0.1736482f, 0.1908090f, + 0.2079117f, 0.2249511f, 0.2419219f, 0.2588190f, 0.2756374f, 0.2923717f, + 0.3090170f, 0.3255682f, 0.3420201f, 0.3583679f, 0.3746066f, 0.3907311f, + 0.4067366f, 0.4226183f, 0.4383711f, 0.4539905f, 0.4694716f, 0.4848096f, + 0.5000000f, 0.5150381f, 0.5299193f, 0.5446390f, 0.5591929f, 0.5735764f, + 0.5877853f, 0.6018150f, 0.6156615f, 0.6293204f, 0.6427876f, 0.6560590f, + 0.6691306f, 0.6819984f, 0.6946584f, 0.7071068f, 0.7193398f, 0.7313537f, + 0.7431448f, 0.7547096f, 0.7660444f, 0.7771460f, 0.7880108f, 0.7986355f, + 0.8090170f, 0.8191520f, 0.8290376f, 0.8386706f, 0.8480481f, 0.8571673f, + 0.8660254f, 0.8746197f, 0.8829476f, 0.8910065f, 0.8987940f, 0.9063078f, + 0.9135455f, 0.9205049f, 0.9271839f, 0.9335804f, 0.9396926f, 0.9455186f, + 0.9510565f, 0.9563048f, 0.9612617f, 0.9659258f, 0.9702957f, 0.9743701f, + 0.9781476f, 0.9816272f, 0.9848078f, 0.9876883f, 0.9902681f, 0.9925462f, + 0.9945219f, 0.9961947f, 0.9975641f, 0.9986295f, 0.9993908f, 0.9998477f, + 1.0000000f, 0.9998477f, 0.9993908f, 0.9986295f, 0.9975641f, 0.9961947f, + 0.9945219f, 0.9925462f, 0.9902681f, 0.9876883f, 0.9848078f, 0.9816272f, + 0.9781476f, 0.9743701f, 0.9702957f, 0.9659258f, 0.9612617f, 0.9563048f, + 0.9510565f, 0.9455186f, 0.9396926f, 0.9335804f, 0.9271839f, 0.9205049f, + 0.9135455f, 0.9063078f, 0.8987940f, 0.8910065f, 0.8829476f, 0.8746197f, + 0.8660254f, 0.8571673f, 0.8480481f, 0.8386706f, 0.8290376f, 0.8191520f, + 0.8090170f, 0.7986355f, 0.7880108f, 0.7771460f, 0.7660444f, 0.7547096f, + 0.7431448f, 0.7313537f, 0.7193398f, 0.7071068f, 0.6946584f, 0.6819984f, + 0.6691306f, 0.6560590f, 0.6427876f, 0.6293204f, 0.6156615f, 0.6018150f, + 0.5877853f, 0.5735764f, 0.5591929f, 0.5446390f, 0.5299193f, 0.5150381f, + 0.5000000f, 0.4848096f, 0.4694716f, 0.4539905f, 0.4383711f, 0.4226183f, + 0.4067366f, 0.3907311f, 0.3746066f, 0.3583679f, 0.3420201f, 0.3255682f, + 0.3090170f, 0.2923717f, 0.2756374f, 0.2588190f, 0.2419219f, 0.2249511f, + 0.2079117f, 0.1908090f, 0.1736482f, 0.1564345f, 0.1391731f, 0.1218693f, + 0.1045285f, 0.0871557f, 0.0697565f, 0.0523360f, 0.0348995f, 0.0174524f, + 0.0000000f, -0.0174524f, -0.0348995f, -0.0523360f, -0.0697565f, -0.0871557f, + -0.1045285f, -0.1218693f, -0.1391731f, -0.1564345f, -0.1736482f, -0.1908090f, + -0.2079117f, -0.2249511f, -0.2419219f, -0.2588190f, -0.2756374f, -0.2923717f, + -0.3090170f, -0.3255682f, -0.3420201f, -0.3583679f, -0.3746066f, -0.3907311f, + -0.4067366f, -0.4226183f, -0.4383711f, -0.4539905f, -0.4694716f, -0.4848096f, + -0.5000000f, -0.5150381f, -0.5299193f, -0.5446390f, -0.5591929f, -0.5735764f, + -0.5877853f, -0.6018150f, -0.6156615f, -0.6293204f, -0.6427876f, -0.6560590f, + -0.6691306f, -0.6819984f, -0.6946584f, -0.7071068f, -0.7193398f, -0.7313537f, + -0.7431448f, -0.7547096f, -0.7660444f, -0.7771460f, -0.7880108f, -0.7986355f, + -0.8090170f, -0.8191520f, -0.8290376f, -0.8386706f, -0.8480481f, -0.8571673f, + -0.8660254f, -0.8746197f, -0.8829476f, -0.8910065f, -0.8987940f, -0.9063078f, + -0.9135455f, -0.9205049f, -0.9271839f, -0.9335804f, -0.9396926f, -0.9455186f, + -0.9510565f, -0.9563048f, -0.9612617f, -0.9659258f, -0.9702957f, -0.9743701f, + -0.9781476f, -0.9816272f, -0.9848078f, -0.9876883f, -0.9902681f, -0.9925462f, + -0.9945219f, -0.9961947f, -0.9975641f, -0.9986295f, -0.9993908f, -0.9998477f, + -1.0000000f, -0.9998477f, -0.9993908f, -0.9986295f, -0.9975641f, -0.9961947f, + -0.9945219f, -0.9925462f, -0.9902681f, -0.9876883f, -0.9848078f, -0.9816272f, + -0.9781476f, -0.9743701f, -0.9702957f, -0.9659258f, -0.9612617f, -0.9563048f, + -0.9510565f, -0.9455186f, -0.9396926f, -0.9335804f, -0.9271839f, -0.9205049f, + -0.9135455f, -0.9063078f, -0.8987940f, -0.8910065f, -0.8829476f, -0.8746197f, + -0.8660254f, -0.8571673f, -0.8480481f, -0.8386706f, -0.8290376f, -0.8191520f, + -0.8090170f, -0.7986355f, -0.7880108f, -0.7771460f, -0.7660444f, -0.7547096f, + -0.7431448f, -0.7313537f, -0.7193398f, -0.7071068f, -0.6946584f, -0.6819984f, + -0.6691306f, -0.6560590f, -0.6427876f, -0.6293204f, -0.6156615f, -0.6018150f, + -0.5877853f, -0.5735764f, -0.5591929f, -0.5446390f, -0.5299193f, -0.5150381f, + -0.5000000f, -0.4848096f, -0.4694716f, -0.4539905f, -0.4383711f, -0.4226183f, + -0.4067366f, -0.3907311f, -0.3746066f, -0.3583679f, -0.3420201f, -0.3255682f, + -0.3090170f, -0.2923717f, -0.2756374f, -0.2588190f, -0.2419219f, -0.2249511f, + -0.2079117f, -0.1908090f, -0.1736482f, -0.1564345f, -0.1391731f, -0.1218693f, + -0.1045285f, -0.0871557f, -0.0697565f, -0.0523360f, -0.0348995f, -0.0174524f, + -0.0000000f, 0.0174524f, 0.0348995f, 0.0523360f, 0.0697565f, 0.0871557f, + 0.1045285f, 0.1218693f, 0.1391731f, 0.1564345f, 0.1736482f, 0.1908090f, + 0.2079117f, 0.2249511f, 0.2419219f, 0.2588190f, 0.2756374f, 0.2923717f, + 0.3090170f, 0.3255682f, 0.3420201f, 0.3583679f, 0.3746066f, 0.3907311f, + 0.4067366f, 0.4226183f, 0.4383711f, 0.4539905f, 0.4694716f, 0.4848096f, + 0.5000000f, 0.5150381f, 0.5299193f, 0.5446390f, 0.5591929f, 0.5735764f, + 0.5877853f, 0.6018150f, 0.6156615f, 0.6293204f, 0.6427876f, 0.6560590f, + 0.6691306f, 0.6819984f, 0.6946584f, 0.7071068f, 0.7193398f, 0.7313537f, + 0.7431448f, 0.7547096f, 0.7660444f, 0.7771460f, 0.7880108f, 0.7986355f, + 0.8090170f, 0.8191520f, 0.8290376f, 0.8386706f, 0.8480481f, 0.8571673f, + 0.8660254f, 0.8746197f, 0.8829476f, 0.8910065f, 0.8987940f, 0.9063078f, + 0.9135455f, 0.9205049f, 0.9271839f, 0.9335804f, 0.9396926f, 0.9455186f, + 0.9510565f, 0.9563048f, 0.9612617f, 0.9659258f, 0.9702957f, 0.9743701f, + 0.9781476f, 0.9816272f, 0.9848078f, 0.9876883f, 0.9902681f, 0.9925462f, + 0.9945219f, 0.9961947f, 0.9975641f, 0.9986295f, 0.9993908f, 0.9998477f, + 1.0000000f +}; + + +static void +sincos( int angle, float& cosval, float& sinval ) +{ + angle += (angle < 0 ? 360 : 0); + sinval = SinTable[angle]; + cosval = SinTable[450 - angle]; +} + +/* + constructs polygon that represents elliptic arc. +*/ +void ellipse2Poly( Point center, Size axes, int angle, + int arc_start, int arc_end, + int delta, vector& pts ) +{ + float alpha, beta; + double size_a = axes.width, size_b = axes.height; + double cx = center.x, cy = center.y; + Point prevPt(INT_MIN,INT_MIN); + int i; + + while( angle < 0 ) + angle += 360; + while( angle > 360 ) + angle -= 360; + + if( arc_start > arc_end ) + { + i = arc_start; + arc_start = arc_end; + arc_end = i; + } + while( arc_start < 0 ) + { + arc_start += 360; + arc_end += 360; + } + while( arc_end > 360 ) + { + arc_end -= 360; + arc_start -= 360; + } + if( arc_end - arc_start > 360 ) + { + arc_start = 0; + arc_end = 360; + } + sincos( angle, alpha, beta ); + pts.resize(0); + + for( i = arc_start; i < arc_end + delta; i += delta ) + { + double x, y; + angle = i; + if( angle > arc_end ) + angle = arc_end; + if( angle < 0 ) + angle += 360; + + x = size_a * SinTable[450-angle]; + y = size_b * SinTable[angle]; + Point pt; + pt.x = cvRound( cx + x * alpha - y * beta ); + pt.y = cvRound( cy + x * beta + y * alpha ); + if( pt != prevPt ) + pts.push_back(pt); + } + + if( pts.size() < 2 ) + pts.push_back(pts[0]); +} + + +static void +EllipseEx( Mat& img, Point center, Size axes, + int angle, int arc_start, int arc_end, + const void* color, int thickness, int line_type ) +{ + axes.width = std::abs(axes.width), axes.height = std::abs(axes.height); + int delta = (std::max(axes.width,axes.height)+(XY_ONE>>1))>>XY_SHIFT; + delta = delta < 3 ? 90 : delta < 10 ? 30 : delta < 15 ? 18 : 5; + + vector v; + ellipse2Poly( center, axes, angle, arc_start, arc_end, delta, v ); + + if( thickness >= 0 ) + PolyLine( img, &v[0], (int)v.size(), false, color, thickness, line_type, XY_SHIFT ); + else if( arc_end - arc_start >= 360 ) + FillConvexPoly( img, &v[0], (int)v.size(), color, line_type, XY_SHIFT ); + else + { + v.push_back(center); + vector edges; + CollectPolyEdges( img, &v[0], (int)v.size(), edges, color, line_type, XY_SHIFT ); + FillEdgeCollection( img, edges, color ); + } +} + + +/****************************************************************************************\ +* Polygons filling * +\****************************************************************************************/ + +/* helper macros: filling horizontal row */ +#define ICV_HLINE( ptr, xl, xr, color, pix_size ) \ +{ \ + uchar* hline_ptr = (uchar*)(ptr) + (xl)*(pix_size); \ + uchar* hline_max_ptr = (uchar*)(ptr) + (xr)*(pix_size); \ + \ + for( ; hline_ptr <= hline_max_ptr; hline_ptr += (pix_size))\ + { \ + int hline_j; \ + for( hline_j = 0; hline_j < (pix_size); hline_j++ ) \ + { \ + hline_ptr[hline_j] = ((uchar*)color)[hline_j]; \ + } \ + } \ +} + + +/* filling convex polygon. v - array of vertices, ntps - number of points */ +static void +FillConvexPoly( Mat& img, const Point* v, int npts, const void* color, int line_type, int shift ) +{ + struct + { + int idx, di; + int x, dx, ye; + } + edge[2]; + + int delta = shift ? 1 << (shift - 1) : 0; + int i, y, imin = 0, left = 0, right = 1, x1, x2; + int edges = npts; + int xmin, xmax, ymin, ymax; + uchar* ptr = img.data; + Size size = img.size(); + int pix_size = (int)img.elemSize(); + Point p0; + int delta1, delta2; + + if( line_type < CV_AA ) + delta1 = delta2 = XY_ONE >> 1; + else + delta1 = XY_ONE - 1, delta2 = 0; + + p0 = v[npts - 1]; + p0.x <<= XY_SHIFT - shift; + p0.y <<= XY_SHIFT - shift; + + assert( 0 <= shift && shift <= XY_SHIFT ); + xmin = xmax = v[0].x; + ymin = ymax = v[0].y; + + for( i = 0; i < npts; i++ ) + { + Point p = v[i]; + if( p.y < ymin ) + { + ymin = p.y; + imin = i; + } + + ymax = std::max( ymax, p.y ); + xmax = std::max( xmax, p.x ); + xmin = MIN( xmin, p.x ); + + p.x <<= XY_SHIFT - shift; + p.y <<= XY_SHIFT - shift; + + if( line_type <= 8 ) + { + if( shift == 0 ) + { + Point pt0, pt1; + pt0.x = p0.x >> XY_SHIFT; + pt0.y = p0.y >> XY_SHIFT; + pt1.x = p.x >> XY_SHIFT; + pt1.y = p.y >> XY_SHIFT; + Line( img, pt0, pt1, color, line_type ); + } + else + Line2( img, p0, p, color ); + } + else + LineAA( img, p0, p, color ); + p0 = p; + } + + xmin = (xmin + delta) >> shift; + xmax = (xmax + delta) >> shift; + ymin = (ymin + delta) >> shift; + ymax = (ymax + delta) >> shift; + + if( npts < 3 || xmax < 0 || ymax < 0 || xmin >= size.width || ymin >= size.height ) + return; + + ymax = MIN( ymax, size.height - 1 ); + edge[0].idx = edge[1].idx = imin; + + edge[0].ye = edge[1].ye = y = ymin; + edge[0].di = 1; + edge[1].di = npts - 1; + + ptr += img.step*y; + + do + { + if( line_type < CV_AA || y < ymax || y == ymin ) + { + for( i = 0; i < 2; i++ ) + { + if( y >= edge[i].ye ) + { + int idx = edge[i].idx, di = edge[i].di; + int xs = 0, xe, ye, ty = 0; + + for(;;) + { + ty = (v[idx].y + delta) >> shift; + if( ty > y || edges == 0 ) + break; + xs = v[idx].x; + idx += di; + idx -= ((idx < npts) - 1) & npts; /* idx -= idx >= npts ? npts : 0 */ + edges--; + } + + ye = ty; + xs <<= XY_SHIFT - shift; + xe = v[idx].x << (XY_SHIFT - shift); + + /* no more edges */ + if( y >= ye ) + return; + + edge[i].ye = ye; + edge[i].dx = ((xe - xs)*2 + (ye - y)) / (2 * (ye - y)); + edge[i].x = xs; + edge[i].idx = idx; + } + } + } + + if( edge[left].x > edge[right].x ) + { + left ^= 1; + right ^= 1; + } + + x1 = edge[left].x; + x2 = edge[right].x; + + if( y >= 0 ) + { + int xx1 = (x1 + delta1) >> XY_SHIFT; + int xx2 = (x2 + delta2) >> XY_SHIFT; + + if( xx2 >= 0 && xx1 < size.width ) + { + if( xx1 < 0 ) + xx1 = 0; + if( xx2 >= size.width ) + xx2 = size.width - 1; + ICV_HLINE( ptr, xx1, xx2, color, pix_size ); + } + } + + x1 += edge[left].dx; + x2 += edge[right].dx; + + edge[left].x = x1; + edge[right].x = x2; + ptr += img.step; + } + while( ++y <= ymax ); +} + + +/******** Arbitrary polygon **********/ + +static void +CollectPolyEdges( Mat& img, const Point* v, int count, vector& edges, + const void* color, int line_type, int shift, Point offset ) +{ + int i, delta = offset.y + (shift ? 1 << (shift - 1) : 0); + Point pt0 = v[count-1], pt1; + pt0.x = (pt0.x + offset.x) << (XY_SHIFT - shift); + pt0.y = (pt0.y + delta) >> shift; + + edges.reserve( edges.size() + count ); + + for( i = 0; i < count; i++, pt0 = pt1 ) + { + Point t0, t1; + PolyEdge edge; + + pt1 = v[i]; + pt1.x = (pt1.x + offset.x) << (XY_SHIFT - shift); + pt1.y = (pt1.y + delta) >> shift; + + if( line_type < CV_AA ) + { + t0.y = pt0.y; t1.y = pt1.y; + t0.x = (pt0.x + (XY_ONE >> 1)) >> XY_SHIFT; + t1.x = (pt1.x + (XY_ONE >> 1)) >> XY_SHIFT; + Line( img, t0, t1, color, line_type ); + } + else + { + t0.x = pt0.x; t1.x = pt1.x; + t0.y = pt0.y << XY_SHIFT; + t1.y = pt1.y << XY_SHIFT; + LineAA( img, t0, t1, color ); + } + + if( pt0.y == pt1.y ) + continue; + + if( pt0.y < pt1.y ) + { + edge.y0 = pt0.y; + edge.y1 = pt1.y; + edge.x = pt0.x; + } + else + { + edge.y0 = pt1.y; + edge.y1 = pt0.y; + edge.x = pt1.x; + } + edge.dx = (pt1.x - pt0.x) / (pt1.y - pt0.y); + edges.push_back(edge); + } +} + +struct CmpEdges +{ + bool operator ()(const PolyEdge& e1, const PolyEdge& e2) + { + return e1.y0 - e2.y0 ? e1.y0 < e2.y0 : + e1.x - e2.x ? e1.x < e2.x : e1.dx < e2.dx; + } +}; + +/**************** helper macros and functions for sequence/contour processing ***********/ + +static void +FillEdgeCollection( Mat& img, vector& edges, const void* color ) +{ + PolyEdge tmp; + int i, y, total = (int)edges.size(); + Size size = img.size(); + PolyEdge* e; + int y_max = INT_MIN, x_max = INT_MIN, y_min = INT_MAX, x_min = INT_MAX; + int pix_size = (int)img.elemSize(); + + if( total < 2 ) + return; + + for( i = 0; i < total; i++ ) + { + PolyEdge& e1 = edges[i]; + assert( e1.y0 < e1.y1 ); + y_min = std::min( y_min, e1.y0 ); + y_max = std::max( y_max, e1.y1 ); + x_min = std::min( x_min, e1.x ); + x_max = std::max( x_max, e1.x ); + } + + if( y_max < 0 || y_min >= size.height || x_max < 0 || x_min >= (size.width<y0; y < y_max; y++ ) + { + PolyEdge *last, *prelast, *keep_prelast; + int sort_flag = 0; + int draw = 0; + int clipline = y < 0; + + prelast = &tmp; + last = tmp.next; + while( last || e->y0 == y ) + { + if( last && last->y1 == y ) + { + // exclude edge if y reachs its lower point + prelast->next = last->next; + last = last->next; + continue; + } + keep_prelast = prelast; + if( last && (e->y0 > y || last->x < e->x) ) + { + // go to the next edge in active list + prelast = last; + last = last->next; + } + else if( i < total ) + { + // insert new edge into active list if y reachs its upper point + prelast->next = e; + e->next = last; + prelast = e; + e = &edges[++i]; + } + else + break; + + if( draw ) + { + if( !clipline ) + { + // convert x's from fixed-point to image coordinates + uchar *timg = img.data + y * img.step; + int x1 = keep_prelast->x; + int x2 = prelast->x; + + if( x1 > x2 ) + { + int t = x1; + + x1 = x2; + x2 = t; + } + + x1 = (x1 + XY_ONE - 1) >> XY_SHIFT; + x2 = x2 >> XY_SHIFT; + + // clip and draw the line + if( x1 < size.width && x2 >= 0 ) + { + if( x1 < 0 ) + x1 = 0; + if( x2 >= size.width ) + x2 = size.width - 1; + ICV_HLINE( timg, x1, x2, color, pix_size ); + } + } + keep_prelast->x += keep_prelast->dx; + prelast->x += prelast->dx; + } + draw ^= 1; + } + + // sort edges (using bubble sort) + keep_prelast = 0; + + do + { + prelast = &tmp; + last = tmp.next; + + while( last != keep_prelast && last->next != 0 ) + { + PolyEdge *te = last->next; + + // swap edges + if( last->x > te->x ) + { + prelast->next = te; + last->next = te->next; + te->next = last; + prelast = te; + sort_flag = 1; + } + else + { + prelast = last; + last = te; + } + } + keep_prelast = prelast; + } + while( sort_flag && keep_prelast != tmp.next && keep_prelast != &tmp ); + } +} + + +/* draws simple or filled circle */ +static void +Circle( Mat& img, Point center, int radius, const void* color, int fill ) +{ + Size size = img.size(); + size_t step = img.step; + int pix_size = (int)img.elemSize(); + uchar* ptr = img.data; + int err = 0, dx = radius, dy = 0, plus = 1, minus = (radius << 1) - 1; + int inside = center.x >= radius && center.x < size.width - radius && + center.y >= radius && center.y < size.height - radius; + + #define ICV_PUT_POINT( ptr, x ) \ + memcpy( ptr + (x)*pix_size, color, pix_size ); + + while( dx >= dy ) + { + int mask; + int y11 = center.y - dy, y12 = center.y + dy, y21 = center.y - dx, y22 = center.y + dx; + int x11 = center.x - dx, x12 = center.x + dx, x21 = center.x - dy, x22 = center.x + dy; + + if( inside ) + { + uchar *tptr0 = ptr + y11 * step; + uchar *tptr1 = ptr + y12 * step; + + if( !fill ) + { + ICV_PUT_POINT( tptr0, x11 ); + ICV_PUT_POINT( tptr1, x11 ); + ICV_PUT_POINT( tptr0, x12 ); + ICV_PUT_POINT( tptr1, x12 ); + } + else + { + ICV_HLINE( tptr0, x11, x12, color, pix_size ); + ICV_HLINE( tptr1, x11, x12, color, pix_size ); + } + + tptr0 = ptr + y21 * step; + tptr1 = ptr + y22 * step; + + if( !fill ) + { + ICV_PUT_POINT( tptr0, x21 ); + ICV_PUT_POINT( tptr1, x21 ); + ICV_PUT_POINT( tptr0, x22 ); + ICV_PUT_POINT( tptr1, x22 ); + } + else + { + ICV_HLINE( tptr0, x21, x22, color, pix_size ); + ICV_HLINE( tptr1, x21, x22, color, pix_size ); + } + } + else if( x11 < size.width && x12 >= 0 && y21 < size.height && y22 >= 0 ) + { + if( fill ) + { + x11 = std::max( x11, 0 ); + x12 = MIN( x12, size.width - 1 ); + } + + if( (unsigned)y11 < (unsigned)size.height ) + { + uchar *tptr = ptr + y11 * step; + + if( !fill ) + { + if( x11 >= 0 ) + ICV_PUT_POINT( tptr, x11 ); + if( x12 < size.width ) + ICV_PUT_POINT( tptr, x12 ); + } + else + ICV_HLINE( tptr, x11, x12, color, pix_size ); + } + + if( (unsigned)y12 < (unsigned)size.height ) + { + uchar *tptr = ptr + y12 * step; + + if( !fill ) + { + if( x11 >= 0 ) + ICV_PUT_POINT( tptr, x11 ); + if( x12 < size.width ) + ICV_PUT_POINT( tptr, x12 ); + } + else + ICV_HLINE( tptr, x11, x12, color, pix_size ); + } + + if( x21 < size.width && x22 >= 0 ) + { + if( fill ) + { + x21 = std::max( x21, 0 ); + x22 = MIN( x22, size.width - 1 ); + } + + if( (unsigned)y21 < (unsigned)size.height ) + { + uchar *tptr = ptr + y21 * step; + + if( !fill ) + { + if( x21 >= 0 ) + ICV_PUT_POINT( tptr, x21 ); + if( x22 < size.width ) + ICV_PUT_POINT( tptr, x22 ); + } + else + ICV_HLINE( tptr, x21, x22, color, pix_size ); + } + + if( (unsigned)y22 < (unsigned)size.height ) + { + uchar *tptr = ptr + y22 * step; + + if( !fill ) + { + if( x21 >= 0 ) + ICV_PUT_POINT( tptr, x21 ); + if( x22 < size.width ) + ICV_PUT_POINT( tptr, x22 ); + } + else + ICV_HLINE( tptr, x21, x22, color, pix_size ); + } + } + } + dy++; + err += plus; + plus += 2; + + mask = (err <= 0) - 1; + + err -= minus & mask; + dx += mask; + minus -= mask & 2; + } + + #undef ICV_PUT_POINT +} + + +static void +ThickLine( Mat& img, Point p0, Point p1, const void* color, + int thickness, int line_type, int flags, int shift ) +{ + static const double INV_XY_ONE = 1./XY_ONE; + + p0.x <<= XY_SHIFT - shift; + p0.y <<= XY_SHIFT - shift; + p1.x <<= XY_SHIFT - shift; + p1.y <<= XY_SHIFT - shift; + + if( thickness <= 1 ) + { + if( line_type < CV_AA ) + { + if( line_type == 1 || line_type == 4 || shift == 0 ) + { + p0.x = (p0.x + (XY_ONE>>1)) >> XY_SHIFT; + p0.y = (p0.y + (XY_ONE>>1)) >> XY_SHIFT; + p1.x = (p1.x + (XY_ONE>>1)) >> XY_SHIFT; + p1.y = (p1.y + (XY_ONE>>1)) >> XY_SHIFT; + Line( img, p0, p1, color, line_type ); + } + else + Line2( img, p0, p1, color ); + } + else + LineAA( img, p0, p1, color ); + } + else + { + Point pt[4], dp = Point(0,0); + double dx = (p0.x - p1.x)*INV_XY_ONE, dy = (p1.y - p0.y)*INV_XY_ONE; + double r = dx * dx + dy * dy; + int i, oddThickness = thickness & 1; + thickness <<= XY_SHIFT - 1; + + if( fabs(r) > DBL_EPSILON ) + { + r = (thickness + oddThickness*XY_ONE*0.5)/std::sqrt(r); + dp.x = cvRound( dy * r ); + dp.y = cvRound( dx * r ); + + pt[0].x = p0.x + dp.x; + pt[0].y = p0.y + dp.y; + pt[1].x = p0.x - dp.x; + pt[1].y = p0.y - dp.y; + pt[2].x = p1.x - dp.x; + pt[2].y = p1.y - dp.y; + pt[3].x = p1.x + dp.x; + pt[3].y = p1.y + dp.y; + + FillConvexPoly( img, pt, 4, color, line_type, XY_SHIFT ); + } + + for( i = 0; i < 2; i++ ) + { + if( flags & (i+1) ) + { + if( line_type < CV_AA ) + { + Point center; + center.x = (p0.x + (XY_ONE>>1)) >> XY_SHIFT; + center.y = (p0.y + (XY_ONE>>1)) >> XY_SHIFT; + Circle( img, center, (thickness + (XY_ONE>>1)) >> XY_SHIFT, color, 1 ); + } + else + { + EllipseEx( img, p0, cvSize(thickness, thickness), + 0, 0, 360, color, -1, line_type ); + } + } + p0 = p1; + } + } +} + + +static void +PolyLine( Mat& img, const Point* v, int count, bool is_closed, + const void* color, int thickness, + int line_type, int shift ) +{ + if( !v || count <= 0 ) + return; + + int i = is_closed ? count - 1 : 0; + int flags = 2 + !is_closed; + Point p0; + CV_Assert( 0 <= shift && shift <= XY_SHIFT && thickness >= 0 ); + + p0 = v[i]; + for( i = !is_closed; i < count; i++ ) + { + Point p = v[i]; + ThickLine( img, p0, p, color, thickness, line_type, flags, shift ); + p0 = p; + flags = 2; + } +} + +/****************************************************************************************\ +* External functions * +\****************************************************************************************/ + +void line( Mat& img, Point pt1, Point pt2, const Scalar& color, + int thickness, int line_type, int shift ) +{ + if( line_type == CV_AA && img.depth() != CV_8U ) + line_type = 8; + + CV_Assert( 0 <= thickness && thickness <= 255 ); + CV_Assert( 0 <= shift && shift <= XY_SHIFT ); + + double buf[4]; + scalarToRawData( color, buf, img.type(), 0 ); + ThickLine( img, pt1, pt2, buf, thickness, line_type, 3, shift ); +} + +void rectangle( Mat& img, Point pt1, Point pt2, + const Scalar& color, int thickness, + int lineType, int shift ) +{ + if( lineType == CV_AA && img.depth() != CV_8U ) + lineType = 8; + + CV_Assert( thickness <= 255 ); + CV_Assert( 0 <= shift && shift <= XY_SHIFT ); + + double buf[4]; + scalarToRawData(color, buf, img.type(), 0); + + Point pt[4]; + + pt[0] = pt1; + pt[1].x = pt2.x; + pt[1].y = pt1.y; + pt[2] = pt2; + pt[3].x = pt1.x; + pt[3].y = pt2.y; + + if( thickness >= 0 ) + PolyLine( img, pt, 4, true, buf, thickness, lineType, shift ); + else + FillConvexPoly( img, pt, 4, buf, lineType, shift ); +} + + +void rectangle( Mat& img, Rect rec, + const Scalar& color, int thickness, + int lineType, int shift ) +{ + CV_Assert( 0 <= shift && shift <= XY_SHIFT ); + if( rec.area() > 0 ) + rectangle( img, rec.tl(), rec.br() - Point(1<= 0 && thickness <= 255 && + 0 <= shift && shift <= XY_SHIFT ); + + double buf[4]; + scalarToRawData(color, buf, img.type(), 0); + + if( thickness > 1 || line_type >= CV_AA ) + { + center.x <<= XY_SHIFT - shift; + center.y <<= XY_SHIFT - shift; + radius <<= XY_SHIFT - shift; + EllipseEx( img, center, Size(radius, radius), + 0, 0, 360, buf, thickness, line_type ); + } + else + Circle( img, center, radius, buf, thickness < 0 ); +} + + +void ellipse( Mat& img, Point center, Size axes, + double angle, double start_angle, double end_angle, + const Scalar& color, int thickness, int line_type, int shift ) +{ + if( line_type == CV_AA && img.depth() != CV_8U ) + line_type = 8; + + CV_Assert( axes.width >= 0 && axes.height >= 0 && + thickness <= 255 && 0 <= shift && shift <= XY_SHIFT ); + + double buf[4]; + scalarToRawData(color, buf, img.type(), 0); + + int _angle = cvRound(angle); + int _start_angle = cvRound(start_angle); + int _end_angle = cvRound(end_angle); + center.x <<= XY_SHIFT - shift; + center.y <<= XY_SHIFT - shift; + axes.width <<= XY_SHIFT - shift; + axes.height <<= XY_SHIFT - shift; + + EllipseEx( img, center, axes, _angle, _start_angle, + _end_angle, buf, thickness, line_type ); +} + +void ellipse(Mat& img, const RotatedRect& box, const Scalar& color, + int thickness, int lineType) +{ + if( lineType == CV_AA && img.depth() != CV_8U ) + lineType = 8; + + CV_Assert( box.size.width >= 0 && box.size.height >= 0 && + thickness <= 255 ); + + double buf[4]; + scalarToRawData(color, buf, img.type(), 0); + + int _angle = cvRound(box.angle); + Point center(cvRound(box.center.x*(1 << XY_SHIFT)), + cvRound(box.center.y*(1 << XY_SHIFT))); + Size axes(cvRound(box.size.width*(1 << (XY_SHIFT - 1))), + cvRound(box.size.height*(1 << (XY_SHIFT - 1)))); + EllipseEx( img, center, axes, _angle, 0, 360, buf, thickness, lineType ); +} + +void fillConvexPoly( Mat& img, const Point* pts, int npts, + const Scalar& color, int line_type, int shift ) +{ + if( !pts || npts <= 0 ) + return; + + if( line_type == CV_AA && img.depth() != CV_8U ) + line_type = 8; + + double buf[4]; + CV_Assert( 0 <= shift && shift <= XY_SHIFT ); + scalarToRawData(color, buf, img.type(), 0); + FillConvexPoly( img, pts, npts, buf, line_type, shift ); +} + + +void fillPoly( Mat& img, const Point** pts, const int* npts, int ncontours, + const Scalar& color, int line_type, + int shift, Point offset ) +{ + if( line_type == CV_AA && img.depth() != CV_8U ) + line_type = 8; + + CV_Assert( pts && npts && ncontours >= 0 && 0 <= shift && shift <= XY_SHIFT ); + + double buf[4]; + scalarToRawData(color, buf, img.type(), 0); + + vector edges; + + int i, total = 0; + for( i = 0; i < ncontours; i++ ) + total += npts[i]; + + edges.reserve( total + 1 ); + for( i = 0; i < ncontours; i++ ) + CollectPolyEdges( img, pts[i], npts[i], edges, buf, line_type, shift, offset ); + + FillEdgeCollection(img, edges, buf); +} + + +void polylines( Mat& img, const Point** pts, const int* npts, int ncontours, bool isClosed, + const Scalar& color, int thickness, int line_type, int shift ) +{ + if( line_type == CV_AA && img.depth() != CV_8U ) + line_type = 8; + + CV_Assert( pts && npts && ncontours >= 0 && + 0 <= thickness && thickness <= 255 && + 0 <= shift && shift <= XY_SHIFT ); + + double buf[4]; + scalarToRawData( color, buf, img.type(), 0 ); + + for( int i = 0; i < ncontours; i++ ) + PolyLine( img, pts[i], npts[i], isClosed, buf, thickness, line_type, shift ); +} + + +enum { FONT_SIZE_SHIFT=8, FONT_ITALIC_ALPHA=(1 << 8), + FONT_ITALIC_DIGIT=(2 << 8), FONT_ITALIC_PUNCT=(4 << 8), + FONT_ITALIC_BRACES=(8 << 8), FONT_HAVE_GREEK=(16 << 8), + FONT_HAVE_CYRILLIC=(32 << 8) }; + +static const int HersheyPlain[] = { +(5 + 4*16) + FONT_HAVE_GREEK, +199, 214, 217, 233, 219, 197, 234, 216, 221, 222, 228, 225, 211, 224, 210, 220, +200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 212, 213, 191, 226, 192, +215, 190, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, +14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 193, 84, +194, 85, 86, 87, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, +112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, +195, 223, 196, 88 }; + +static const int HersheyPlainItalic[] = { +(5 + 4*16) + FONT_ITALIC_ALPHA + FONT_HAVE_GREEK, +199, 214, 217, 233, 219, 197, 234, 216, 221, 222, 228, 225, 211, 224, 210, 220, +200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 212, 213, 191, 226, 192, +215, 190, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, +64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 193, 84, +194, 85, 86, 87, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, +162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, +195, 223, 196, 88 }; + +static const int HersheyComplexSmall[] = { +(6 + 7*16) + FONT_HAVE_GREEK, +1199, 1214, 1217, 1275, 1274, 1271, 1272, 1216, 1221, 1222, 1219, 1232, 1211, 1231, 1210, 1220, +1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1212, 2213, 1241, 1238, 1242, +1215, 1273, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, +1014, 1015, 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1223, 1084, +1224, 1247, 586, 1249, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, +1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 1126, +1225, 1229, 1226, 1246 }; + +static const int HersheyComplexSmallItalic[] = { +(6 + 7*16) + FONT_ITALIC_ALPHA + FONT_HAVE_GREEK, +1199, 1214, 1217, 1275, 1274, 1271, 1272, 1216, 1221, 1222, 1219, 1232, 1211, 1231, 1210, 1220, +1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1208, 1209, 1212, 1213, 1241, 1238, 1242, +1215, 1273, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, +1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1223, 1084, +1224, 1247, 586, 1249, 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, +1162, 1163, 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, +1225, 1229, 1226, 1246 }; + +static const int HersheySimplex[] = { +(9 + 12*16) + FONT_HAVE_GREEK, +2199, 714, 717, 733, 719, 697, 734, 716, 721, 722, 728, 725, 711, 724, 710, 720, +700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 712, 713, 691, 726, 692, +715, 690, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, +514, 515, 516, 517, 518, 519, 520, 521, 522, 523, 524, 525, 526, 693, 584, +694, 2247, 586, 2249, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, +612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, +695, 723, 696, 2246 }; + +static const int HersheyDuplex[] = { +(9 + 12*16) + FONT_HAVE_GREEK, +2199, 2714, 2728, 2732, 2719, 2733, 2718, 2727, 2721, 2722, 2723, 2725, 2711, 2724, 2710, 2720, +2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2708, 2709, 2712, 2713, 2730, 2726, 2731, +2715, 2734, 2501, 2502, 2503, 2504, 2505, 2506, 2507, 2508, 2509, 2510, 2511, 2512, 2513, +2514, 2515, 2516, 2517, 2518, 2519, 2520, 2521, 2522, 2523, 2524, 2525, 2526, 2223, 2084, +2224, 2247, 587, 2249, 2601, 2602, 2603, 2604, 2605, 2606, 2607, 2608, 2609, 2610, 2611, +2612, 2613, 2614, 2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625, 2626, +2225, 2229, 2226, 2246 }; + +static const int HersheyComplex[] = { +(9 + 12*16) + FONT_HAVE_GREEK + FONT_HAVE_CYRILLIC, +2199, 2214, 2217, 2275, 2274, 2271, 2272, 2216, 2221, 2222, 2219, 2232, 2211, 2231, 2210, 2220, +2200, 2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2212, 2213, 2241, 2238, 2242, +2215, 2273, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, +2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2223, 2084, +2224, 2247, 587, 2249, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, +2112, 2113, 2114, 2115, 2116, 2117, 2118, 2119, 2120, 2121, 2122, 2123, 2124, 2125, 2126, +2225, 2229, 2226, 2246 }; + +static const int HersheyComplexItalic[] = { +(9 + 12*16) + FONT_ITALIC_ALPHA + FONT_ITALIC_DIGIT + FONT_ITALIC_PUNCT + +FONT_HAVE_GREEK + FONT_HAVE_CYRILLIC, +2199, 2764, 2778, 2782, 2769, 2783, 2768, 2777, 2771, 2772, 2219, 2232, 2211, 2231, 2210, 2220, +2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759, 2212, 2213, 2241, 2238, 2242, +2765, 2273, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, 2062, 2063, +2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2223, 2084, +2224, 2247, 587, 2249, 2151, 2152, 2153, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, +2162, 2163, 2164, 2165, 2166, 2167, 2168, 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2176, +2225, 2229, 2226, 2246 }; + +static const int HersheyTriplex[] = { +(9 + 12*16) + FONT_HAVE_GREEK, +2199, 3214, 3228, 3232, 3219, 3233, 3218, 3227, 3221, 3222, 3223, 3225, 3211, 3224, 3210, 3220, +3200, 3201, 3202, 3203, 3204, 3205, 3206, 3207, 3208, 3209, 3212, 3213, 3230, 3226, 3231, +3215, 3234, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011, 3012, 3013, +2014, 3015, 3016, 3017, 3018, 3019, 3020, 3021, 3022, 3023, 3024, 3025, 3026, 2223, 2084, +2224, 2247, 587, 2249, 3101, 3102, 3103, 3104, 3105, 3106, 3107, 3108, 3109, 3110, 3111, +3112, 3113, 3114, 3115, 3116, 3117, 3118, 3119, 3120, 3121, 3122, 3123, 3124, 3125, 3126, +2225, 2229, 2226, 2246 }; + +static const int HersheyTriplexItalic[] = { +(9 + 12*16) + FONT_ITALIC_ALPHA + FONT_ITALIC_DIGIT + +FONT_ITALIC_PUNCT + FONT_HAVE_GREEK, +2199, 3264, 3278, 3282, 3269, 3233, 3268, 3277, 3271, 3272, 3223, 3225, 3261, 3224, 3260, 3270, +3250, 3251, 3252, 3253, 3254, 3255, 3256, 3257, 3258, 3259, 3262, 3263, 3230, 3226, 3231, +3265, 3234, 3051, 3052, 3053, 3054, 3055, 3056, 3057, 3058, 3059, 3060, 3061, 3062, 3063, +2064, 3065, 3066, 3067, 3068, 3069, 3070, 3071, 3072, 3073, 3074, 3075, 3076, 2223, 2084, +2224, 2247, 587, 2249, 3151, 3152, 3153, 3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, +3162, 3163, 3164, 3165, 3166, 3167, 3168, 3169, 3170, 3171, 3172, 3173, 3174, 3175, 3176, +2225, 2229, 2226, 2246 }; + +static const int HersheyScriptSimplex[] = { +(9 + 12*16) + FONT_ITALIC_ALPHA + FONT_HAVE_GREEK, +2199, 714, 717, 733, 719, 697, 734, 716, 721, 722, 728, 725, 711, 724, 710, 720, +700, 701, 702, 703, 704, 705, 706, 707, 708, 709, 712, 713, 691, 726, 692, +715, 690, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561, 562, 563, +564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576, 693, 584, +694, 2247, 586, 2249, 651, 652, 653, 654, 655, 656, 657, 658, 659, 660, 661, +662, 663, 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, +695, 723, 696, 2246 }; + +static const int HersheyScriptComplex[] = { +(9 + 12*16) + FONT_ITALIC_ALPHA + FONT_ITALIC_DIGIT + FONT_ITALIC_PUNCT + FONT_HAVE_GREEK, +2199, 2764, 2778, 2782, 2769, 2783, 2768, 2777, 2771, 2772, 2219, 2232, 2211, 2231, 2210, 2220, +2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759, 2212, 2213, 2241, 2238, 2242, +2215, 2273, 2551, 2552, 2553, 2554, 2555, 2556, 2557, 2558, 2559, 2560, 2561, 2562, 2563, +2564, 2565, 2566, 2567, 2568, 2569, 2570, 2571, 2572, 2573, 2574, 2575, 2576, 2223, 2084, +2224, 2247, 586, 2249, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 2658, 2659, 2660, 2661, +2662, 2663, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2672, 2673, 2674, 2675, 2676, +2225, 2229, 2226, 2246 }; + + +static const int* getFontData(int fontFace) +{ + bool isItalic = (fontFace & FONT_ITALIC) != 0; + const int* ascii = 0; + + switch( fontFace & 15 ) + { + case FONT_HERSHEY_SIMPLEX: + ascii = HersheySimplex; + break; + case FONT_HERSHEY_PLAIN: + ascii = !isItalic ? HersheyPlain : HersheyPlainItalic; + break; + case FONT_HERSHEY_DUPLEX: + ascii = HersheyDuplex; + break; + case FONT_HERSHEY_COMPLEX: + ascii = !isItalic ? HersheyComplex : HersheyComplexItalic; + break; + case FONT_HERSHEY_TRIPLEX: + ascii = !isItalic ? HersheyTriplex : HersheyTriplexItalic; + break; + case FONT_HERSHEY_COMPLEX_SMALL: + ascii = !isItalic ? HersheyComplexSmall : HersheyComplexSmallItalic; + break; + case FONT_HERSHEY_SCRIPT_SIMPLEX: + ascii = HersheyScriptSimplex; + break; + case FONT_HERSHEY_SCRIPT_COMPLEX: + ascii = HersheyScriptComplex; + break; + default: + CV_Error( CV_StsOutOfRange, "Unknown font type" ); + } + return ascii; +} + + +void putText( Mat& img, const string& text, Point org, + int fontFace, double fontScale, Scalar color, + int thickness, int line_type, bool bottomLeftOrigin ) + +{ + const int* ascii = getFontData(fontFace); + + double buf[4]; + scalarToRawData(color, buf, img.type(), 0); + + int base_line = -(ascii[0] & 15); + int hscale = cvRound(fontScale*XY_ONE), vscale = hscale; + + if( line_type == CV_AA && img.depth() != CV_8U ) + line_type = 8; + + if( bottomLeftOrigin ) + vscale = -vscale; + + int view_x = org.x << XY_SHIFT; + int view_y = (org.y << XY_SHIFT) + base_line*vscale; + vector pts; + pts.reserve(1 << 10); + const char **faces = cv::g_HersheyGlyphs; + + for( int i = 0; text[i] != '\0'; i++ ) + { + int c = (uchar)text[i]; + Point p; + + if( c >= 127 || c < ' ' ) + c = '?'; + + const char* ptr = faces[ascii[(c-' ')+1]]; + p.x = (uchar)ptr[0] - 'R'; + p.y = (uchar)ptr[1] - 'R'; + int dx = p.y*hscale; + view_x -= p.x*hscale; + pts.resize(0); + + for( ptr += 2;; ) + { + if( *ptr == ' ' || !*ptr ) + { + if( pts.size() > 1 ) + PolyLine( img, &pts[0], (int)pts.size(), false, buf, thickness, line_type, XY_SHIFT ); + if( !*ptr++ ) + break; + pts.resize(0); + } + else + { + p.x = (uchar)ptr[0] - 'R'; + p.y = (uchar)ptr[1] - 'R'; + ptr += 2; + pts.push_back(Point(p.x*hscale + view_x, p.y*vscale + view_y)); + } + } + view_x += dx; + } +} + +Size getTextSize( const string& text, int fontFace, double fontScale, int thickness, int* _base_line) +{ + Size size; + double view_x = 0; + const char **faces = cv::g_HersheyGlyphs; + const int* ascii = getFontData(fontFace); + + int base_line = (ascii[0] & 15); + int cap_line = (ascii[0] >> 4) & 15; + size.height = cvRound((cap_line + base_line)*fontScale + (thickness+1)/2); + + for( int i = 0; text[i] != '\0'; i++ ) + { + int c = (uchar)text[i]; + Point p; + + if( c >= 127 || c < ' ' ) + c = '?'; + + const char* ptr = faces[ascii[(c-' ')+1]]; + p.x = (uchar)ptr[0] - 'R'; + p.y = (uchar)ptr[1] - 'R'; + view_x += (p.y - p.x)*fontScale; + } + + size.width = cvRound(view_x + thickness); + if( _base_line ) + *_base_line = cvRound(base_line*fontScale + thickness*0.5); + return size; +} + +} + + +void cv::fillConvexPoly(InputOutputArray _img, InputArray _points, + const Scalar& color, int lineType, int shift) +{ + Mat img = _img.getMat(), points = _points.getMat(); + CV_Assert(points.checkVector(2, CV_32S) >= 0); + fillConvexPoly(img, (const Point*)points.data, points.rows*points.cols*points.channels()/2, color, lineType, shift); +} + + +void cv::fillPoly(InputOutputArray _img, InputArrayOfArrays pts, + const Scalar& color, int lineType, int shift, Point offset) +{ + Mat img = _img.getMat(); + int i, ncontours = (int)pts.total(); + if( ncontours == 0 ) + return; + AutoBuffer _ptsptr(ncontours); + AutoBuffer _npts(ncontours); + Point** ptsptr = _ptsptr; + int* npts = _npts; + + for( i = 0; i < ncontours; i++ ) + { + Mat p = pts.getMat(i); + CV_Assert(p.checkVector(2, CV_32S) >= 0); + ptsptr[i] = (Point*)p.data; + npts[i] = p.rows*p.cols*p.channels()/2; + } + fillPoly(img, (const Point**)ptsptr, npts, (int)ncontours, color, lineType, shift, offset); +} + + +void cv::polylines(InputOutputArray _img, InputArrayOfArrays pts, + bool isClosed, const Scalar& color, + int thickness, int lineType, int shift ) +{ + Mat img = _img.getMat(); + int i, ncontours = (int)pts.total(); + if( ncontours == 0 ) + return; + AutoBuffer _ptsptr(ncontours); + AutoBuffer _npts(ncontours); + Point** ptsptr = _ptsptr; + int* npts = _npts; + + for( i = 0; i < ncontours; i++ ) + { + Mat p = pts.getMat(i); + CV_Assert(p.checkVector(2, CV_32S) >= 0); + ptsptr[i] = (Point*)p.data; + npts[i] = p.rows*p.cols*p.channels()/2; + } + polylines(img, (const Point**)ptsptr, npts, (int)ncontours, isClosed, color, thickness, lineType, shift); +} + + +static const int CodeDeltas[8][2] = +{ {1, 0}, {1, -1}, {0, -1}, {-1, -1}, {-1, 0}, {-1, 1}, {0, 1}, {1, 1} }; + +#define CV_ADJUST_EDGE_COUNT( count, seq ) \ + ((count) -= ((count) == (seq)->total && !CV_IS_SEQ_CLOSED(seq))) + +CV_IMPL void +cvDrawContours( void* _img, CvSeq* contour, + CvScalar _externalColor, CvScalar _holeColor, + int maxLevel, int thickness, + int line_type, CvPoint _offset ) +{ + CvSeq *contour0 = contour, *h_next = 0; + CvTreeNodeIterator iterator; + cv::vector edges; + cv::vector pts; + cv::Scalar externalColor = _externalColor, holeColor = _holeColor; + cv::Mat img = cv::cvarrToMat(_img); + cv::Point offset = _offset; + double ext_buf[4], hole_buf[4]; + + if( line_type == CV_AA && img.depth() != CV_8U ) + line_type = 8; + + if( !contour ) + return; + + CV_Assert( thickness <= 255 ); + + scalarToRawData( externalColor, ext_buf, img.type(), 0 ); + scalarToRawData( holeColor, hole_buf, img.type(), 0 ); + + maxLevel = MAX(maxLevel, INT_MIN+2); + maxLevel = MIN(maxLevel, INT_MAX-1); + + if( maxLevel < 0 ) + { + h_next = contour->h_next; + contour->h_next = 0; + maxLevel = -maxLevel+1; + } + + cvInitTreeNodeIterator( &iterator, contour, maxLevel ); + while( (contour = (CvSeq*)cvNextTreeNode( &iterator )) != 0 ) + { + CvSeqReader reader; + int i, count = contour->total; + int elem_type = CV_MAT_TYPE(contour->flags); + void* clr = (contour->flags & CV_SEQ_FLAG_HOLE) == 0 ? ext_buf : hole_buf; + + cvStartReadSeq( contour, &reader, 0 ); + if( thickness < 0 ) + pts.resize(0); + + if( CV_IS_SEQ_CHAIN_CONTOUR( contour )) + { + cv::Point pt = ((CvChain*)contour)->origin; + cv::Point prev_pt = pt; + char prev_code = reader.ptr ? reader.ptr[0] : '\0'; + + prev_pt += offset; + + for( i = 0; i < count; i++ ) + { + char code; + CV_READ_SEQ_ELEM( code, reader ); + + assert( (code & ~7) == 0 ); + + if( code != prev_code ) + { + prev_code = code; + if( thickness >= 0 ) + cv::ThickLine( img, prev_pt, pt, clr, thickness, line_type, 2, 0 ); + else + pts.push_back(pt); + prev_pt = pt; + } + + pt.x += CodeDeltas[(int)code][0]; + pt.y += CodeDeltas[(int)code][1]; + } + + if( thickness >= 0 ) + cv::ThickLine( img, prev_pt, + cv::Point(((CvChain*)contour)->origin) + offset, + clr, thickness, line_type, 2, 0 ); + else + cv::CollectPolyEdges(img, &pts[0], (int)pts.size(), + edges, ext_buf, line_type, 0, offset); + } + else if( CV_IS_SEQ_POLYLINE( contour )) + { + CV_Assert( elem_type == CV_32SC2 ); + cv::Point pt1, pt2; + int shift = 0; + + count -= !CV_IS_SEQ_CLOSED(contour); + CV_READ_SEQ_ELEM( pt1, reader ); + pt1 += offset; + if( thickness < 0 ) + pts.push_back(pt1); + + for( i = 0; i < count; i++ ) + { + CV_READ_SEQ_ELEM( pt2, reader ); + pt2 += offset; + if( thickness >= 0 ) + cv::ThickLine( img, pt1, pt2, clr, thickness, line_type, 2, shift ); + else + pts.push_back(pt2); + pt1 = pt2; + } + if( thickness < 0 ) + cv::CollectPolyEdges( img, &pts[0], (int)pts.size(), + edges, ext_buf, line_type, 0, cv::Point() ); + } + } + + if( thickness < 0 ) + cv::FillEdgeCollection( img, edges, ext_buf ); + + if( h_next && contour0 ) + contour0->h_next = h_next; +} + +CV_IMPL int +cvClipLine( CvSize size, CvPoint* pt1, CvPoint* pt2 ) +{ + CV_Assert( pt1 && pt2 ); + return cv::clipLine( size, *(cv::Point*)pt1, *(cv::Point*)pt2 ); +} + + +CV_IMPL int +cvEllipse2Poly( CvPoint center, CvSize axes, int angle, + int arc_start, int arc_end, CvPoint* _pts, int delta ) +{ + cv::vector pts; + cv::ellipse2Poly( center, axes, angle, arc_start, arc_end, delta, pts ); + memcpy( _pts, &pts[0], pts.size()*sizeof(_pts[0]) ); + return (int)pts.size(); +} + +CV_IMPL CvScalar +cvColorToScalar( double packed_color, int type ) +{ + CvScalar scalar; + + if( CV_MAT_DEPTH( type ) == CV_8U ) + { + int icolor = cvRound( packed_color ); + if( CV_MAT_CN( type ) > 1 ) + { + scalar.val[0] = icolor & 255; + scalar.val[1] = (icolor >> 8) & 255; + scalar.val[2] = (icolor >> 16) & 255; + scalar.val[3] = (icolor >> 24) & 255; + } + else + { + scalar.val[0] = CV_CAST_8U( icolor ); + scalar.val[1] = scalar.val[2] = scalar.val[3] = 0; + } + } + else if( CV_MAT_DEPTH( type ) == CV_8S ) + { + int icolor = cvRound( packed_color ); + if( CV_MAT_CN( type ) > 1 ) + { + scalar.val[0] = (char)icolor; + scalar.val[1] = (char)(icolor >> 8); + scalar.val[2] = (char)(icolor >> 16); + scalar.val[3] = (char)(icolor >> 24); + } + else + { + scalar.val[0] = CV_CAST_8S( icolor ); + scalar.val[1] = scalar.val[2] = scalar.val[3] = 0; + } + } + else + { + int cn = CV_MAT_CN( type ); + switch( cn ) + { + case 1: + scalar.val[0] = packed_color; + scalar.val[1] = scalar.val[2] = scalar.val[3] = 0; + break; + case 2: + scalar.val[0] = scalar.val[1] = packed_color; + scalar.val[2] = scalar.val[3] = 0; + break; + case 3: + scalar.val[0] = scalar.val[1] = scalar.val[2] = packed_color; + scalar.val[3] = 0; + break; + default: + scalar.val[0] = scalar.val[1] = + scalar.val[2] = scalar.val[3] = packed_color; + break; + } + } + + return scalar; +} + +CV_IMPL int +cvInitLineIterator( const CvArr* img, CvPoint pt1, CvPoint pt2, + CvLineIterator* iterator, int connectivity, + int left_to_right ) +{ + CV_Assert( iterator != 0 ); + cv::LineIterator li(cv::cvarrToMat(img), pt1, pt2, connectivity, left_to_right!=0); + + iterator->err = li.err; + iterator->minus_delta = li.minusDelta; + iterator->plus_delta = li.plusDelta; + iterator->minus_step = li.minusStep; + iterator->plus_step = li.plusStep; + iterator->ptr = li.ptr; + + return li.count; +} + +CV_IMPL void +cvLine( CvArr* _img, CvPoint pt1, CvPoint pt2, CvScalar color, + int thickness, int line_type, int shift ) +{ + cv::Mat img = cv::cvarrToMat(_img); + cv::line( img, pt1, pt2, color, thickness, line_type, shift ); +} + +CV_IMPL void +cvRectangle( CvArr* _img, CvPoint pt1, CvPoint pt2, + CvScalar color, int thickness, + int line_type, int shift ) +{ + cv::Mat img = cv::cvarrToMat(_img); + cv::rectangle( img, pt1, pt2, color, thickness, line_type, shift ); +} + +CV_IMPL void +cvRectangleR( CvArr* _img, CvRect rec, + CvScalar color, int thickness, + int line_type, int shift ) +{ + cv::Mat img = cv::cvarrToMat(_img); + cv::rectangle( img, rec, color, thickness, line_type, shift ); +} + +CV_IMPL void +cvCircle( CvArr* _img, CvPoint center, int radius, + CvScalar color, int thickness, int line_type, int shift ) +{ + cv::Mat img = cv::cvarrToMat(_img); + cv::circle( img, center, radius, color, thickness, line_type, shift ); +} + +CV_IMPL void +cvEllipse( CvArr* _img, CvPoint center, CvSize axes, + double angle, double start_angle, double end_angle, + CvScalar color, int thickness, int line_type, int shift ) +{ + cv::Mat img = cv::cvarrToMat(_img); + cv::ellipse( img, center, axes, angle, start_angle, end_angle, + color, thickness, line_type, shift ); +} + +CV_IMPL void +cvFillConvexPoly( CvArr* _img, const CvPoint *pts, int npts, + CvScalar color, int line_type, int shift ) +{ + cv::Mat img = cv::cvarrToMat(_img); + cv::fillConvexPoly( img, (const cv::Point*)pts, npts, + color, line_type, shift ); +} + +CV_IMPL void +cvFillPoly( CvArr* _img, CvPoint **pts, const int *npts, int ncontours, + CvScalar color, int line_type, int shift ) +{ + cv::Mat img = cv::cvarrToMat(_img); + + cv::fillPoly( img, (const cv::Point**)pts, npts, ncontours, color, line_type, shift ); +} + +CV_IMPL void +cvPolyLine( CvArr* _img, CvPoint **pts, const int *npts, + int ncontours, int closed, CvScalar color, + int thickness, int line_type, int shift ) +{ + cv::Mat img = cv::cvarrToMat(_img); + + cv::polylines( img, (const cv::Point**)pts, npts, ncontours, + closed != 0, color, thickness, line_type, shift ); +} + +CV_IMPL void +cvPutText( CvArr* _img, const char *text, CvPoint org, const CvFont *_font, CvScalar color ) +{ + cv::Mat img = cv::cvarrToMat(_img); + CV_Assert( text != 0 && _font != 0); + cv::putText( img, text, org, _font->font_face, (_font->hscale+_font->vscale)*0.5, + color, _font->thickness, _font->line_type, + CV_IS_IMAGE(_img) && ((IplImage*)_img)->origin != 0 ); +} + + +CV_IMPL void +cvInitFont( CvFont *font, int font_face, double hscale, double vscale, + double shear, int thickness, int line_type ) +{ + CV_Assert( font != 0 && hscale > 0 && vscale > 0 && thickness >= 0 ); + + font->ascii = cv::getFontData(font_face); + font->font_face = font_face; + font->hscale = (float)hscale; + font->vscale = (float)vscale; + font->thickness = thickness; + font->shear = (float)shear; + font->greek = font->cyrillic = 0; + font->line_type = line_type; +} + +CV_IMPL void +cvGetTextSize( const char *text, const CvFont *_font, CvSize *_size, int *_base_line ) +{ + CV_Assert(text != 0 && _font != 0); + cv::Size size = cv::getTextSize( text, _font->font_face, (_font->hscale + _font->vscale)*0.5, + _font->thickness, _base_line ); + if( _size ) + *_size = size; +} + +/* End of file. */ diff --git a/opencv/core/dxt.cpp b/opencv/core/dxt.cpp new file mode 100644 index 0000000..d1cf00a --- /dev/null +++ b/opencv/core/dxt.cpp @@ -0,0 +1,2640 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +namespace cv +{ + +// On Win64 optimized versions of DFT and DCT fail the tests (fixed in VS2010) +#if defined _MSC_VER && !defined CV_ICC && defined _M_X64 && _MSC_VER < 1600 +#pragma optimize("", off) +#endif + +/****************************************************************************************\ + Discrete Fourier Transform +\****************************************************************************************/ + +#define CV_MAX_LOCAL_DFT_SIZE (1 << 15) + +static unsigned char bitrevTab[] = +{ + 0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0,0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0, + 0x08,0x88,0x48,0xc8,0x28,0xa8,0x68,0xe8,0x18,0x98,0x58,0xd8,0x38,0xb8,0x78,0xf8, + 0x04,0x84,0x44,0xc4,0x24,0xa4,0x64,0xe4,0x14,0x94,0x54,0xd4,0x34,0xb4,0x74,0xf4, + 0x0c,0x8c,0x4c,0xcc,0x2c,0xac,0x6c,0xec,0x1c,0x9c,0x5c,0xdc,0x3c,0xbc,0x7c,0xfc, + 0x02,0x82,0x42,0xc2,0x22,0xa2,0x62,0xe2,0x12,0x92,0x52,0xd2,0x32,0xb2,0x72,0xf2, + 0x0a,0x8a,0x4a,0xca,0x2a,0xaa,0x6a,0xea,0x1a,0x9a,0x5a,0xda,0x3a,0xba,0x7a,0xfa, + 0x06,0x86,0x46,0xc6,0x26,0xa6,0x66,0xe6,0x16,0x96,0x56,0xd6,0x36,0xb6,0x76,0xf6, + 0x0e,0x8e,0x4e,0xce,0x2e,0xae,0x6e,0xee,0x1e,0x9e,0x5e,0xde,0x3e,0xbe,0x7e,0xfe, + 0x01,0x81,0x41,0xc1,0x21,0xa1,0x61,0xe1,0x11,0x91,0x51,0xd1,0x31,0xb1,0x71,0xf1, + 0x09,0x89,0x49,0xc9,0x29,0xa9,0x69,0xe9,0x19,0x99,0x59,0xd9,0x39,0xb9,0x79,0xf9, + 0x05,0x85,0x45,0xc5,0x25,0xa5,0x65,0xe5,0x15,0x95,0x55,0xd5,0x35,0xb5,0x75,0xf5, + 0x0d,0x8d,0x4d,0xcd,0x2d,0xad,0x6d,0xed,0x1d,0x9d,0x5d,0xdd,0x3d,0xbd,0x7d,0xfd, + 0x03,0x83,0x43,0xc3,0x23,0xa3,0x63,0xe3,0x13,0x93,0x53,0xd3,0x33,0xb3,0x73,0xf3, + 0x0b,0x8b,0x4b,0xcb,0x2b,0xab,0x6b,0xeb,0x1b,0x9b,0x5b,0xdb,0x3b,0xbb,0x7b,0xfb, + 0x07,0x87,0x47,0xc7,0x27,0xa7,0x67,0xe7,0x17,0x97,0x57,0xd7,0x37,0xb7,0x77,0xf7, + 0x0f,0x8f,0x4f,0xcf,0x2f,0xaf,0x6f,0xef,0x1f,0x9f,0x5f,0xdf,0x3f,0xbf,0x7f,0xff +}; + +static const double DFTTab[][2] = +{ +{ 1.00000000000000000, 0.00000000000000000 }, +{-1.00000000000000000, 0.00000000000000000 }, +{ 0.00000000000000000, 1.00000000000000000 }, +{ 0.70710678118654757, 0.70710678118654746 }, +{ 0.92387953251128674, 0.38268343236508978 }, +{ 0.98078528040323043, 0.19509032201612825 }, +{ 0.99518472667219693, 0.09801714032956060 }, +{ 0.99879545620517241, 0.04906767432741802 }, +{ 0.99969881869620425, 0.02454122852291229 }, +{ 0.99992470183914450, 0.01227153828571993 }, +{ 0.99998117528260111, 0.00613588464915448 }, +{ 0.99999529380957619, 0.00306795676296598 }, +{ 0.99999882345170188, 0.00153398018628477 }, +{ 0.99999970586288223, 0.00076699031874270 }, +{ 0.99999992646571789, 0.00038349518757140 }, +{ 0.99999998161642933, 0.00019174759731070 }, +{ 0.99999999540410733, 0.00009587379909598 }, +{ 0.99999999885102686, 0.00004793689960307 }, +{ 0.99999999971275666, 0.00002396844980842 }, +{ 0.99999999992818922, 0.00001198422490507 }, +{ 0.99999999998204725, 0.00000599211245264 }, +{ 0.99999999999551181, 0.00000299605622633 }, +{ 0.99999999999887801, 0.00000149802811317 }, +{ 0.99999999999971945, 0.00000074901405658 }, +{ 0.99999999999992983, 0.00000037450702829 }, +{ 0.99999999999998246, 0.00000018725351415 }, +{ 0.99999999999999567, 0.00000009362675707 }, +{ 0.99999999999999889, 0.00000004681337854 }, +{ 0.99999999999999978, 0.00000002340668927 }, +{ 0.99999999999999989, 0.00000001170334463 }, +{ 1.00000000000000000, 0.00000000585167232 }, +{ 1.00000000000000000, 0.00000000292583616 } +}; + +#define BitRev(i,shift) \ + ((int)((((unsigned)bitrevTab[(i)&255] << 24)+ \ + ((unsigned)bitrevTab[((i)>> 8)&255] << 16)+ \ + ((unsigned)bitrevTab[((i)>>16)&255] << 8)+ \ + ((unsigned)bitrevTab[((i)>>24)])) >> (shift))) + +static int +DFTFactorize( int n, int* factors ) +{ + int nf = 0, f, i, j; + + if( n <= 5 ) + { + factors[0] = n; + return 1; + } + + f = (((n - 1)^n)+1) >> 1; + if( f > 1 ) + { + factors[nf++] = f; + n = f == n ? 1 : n/f; + } + + for( f = 3; n > 1; ) + { + int d = n/f; + if( d*f == n ) + { + factors[nf++] = f; + n = d; + } + else + { + f += 2; + if( f*f > n ) + break; + } + } + + if( n > 1 ) + factors[nf++] = n; + + f = (factors[0] & 1) == 0; + for( i = f; i < (nf+f)/2; i++ ) + CV_SWAP( factors[i], factors[nf-i-1+f], j ); + + return nf; +} + +static void +DFTInit( int n0, int nf, int* factors, int* itab, int elem_size, void* _wave, int inv_itab ) +{ + int digits[34], radix[34]; + int n = factors[0], m = 0; + int* itab0 = itab; + int i, j, k; + Complex w, w1; + double t; + + if( n0 <= 5 ) + { + itab[0] = 0; + itab[n0-1] = n0-1; + + if( n0 != 4 ) + { + for( i = 1; i < n0-1; i++ ) + itab[i] = i; + } + else + { + itab[1] = 2; + itab[2] = 1; + } + if( n0 == 5 ) + { + if( elem_size == sizeof(Complex) ) + ((Complex*)_wave)[0] = Complex(1.,0.); + else + ((Complex*)_wave)[0] = Complex(1.f,0.f); + } + if( n0 != 4 ) + return; + m = 2; + } + else + { + // radix[] is initialized from index 'nf' down to zero + assert (nf < 34); + radix[nf] = 1; + digits[nf] = 0; + for( i = 0; i < nf; i++ ) + { + digits[i] = 0; + radix[nf-i-1] = radix[nf-i]*factors[nf-i-1]; + } + + if( inv_itab && factors[0] != factors[nf-1] ) + itab = (int*)_wave; + + if( (n & 1) == 0 ) + { + int a = radix[1], na2 = n*a>>1, na4 = na2 >> 1; + for( m = 0; (unsigned)(1 << m) < (unsigned)n; m++ ) + ; + if( n <= 2 ) + { + itab[0] = 0; + itab[1] = na2; + } + else if( n <= 256 ) + { + int shift = 10 - m; + for( i = 0; i <= n - 4; i += 4 ) + { + j = (bitrevTab[i>>2]>>shift)*a; + itab[i] = j; + itab[i+1] = j + na2; + itab[i+2] = j + na4; + itab[i+3] = j + na2 + na4; + } + } + else + { + int shift = 34 - m; + for( i = 0; i < n; i += 4 ) + { + int i4 = i >> 2; + j = BitRev(i4,shift)*a; + itab[i] = j; + itab[i+1] = j + na2; + itab[i+2] = j + na4; + itab[i+3] = j + na2 + na4; + } + } + + digits[1]++; + + if( nf >= 2 ) + { + for( i = n, j = radix[2]; i < n0; ) + { + for( k = 0; k < n; k++ ) + itab[i+k] = itab[k] + j; + if( (i += n) >= n0 ) + break; + j += radix[2]; + for( k = 1; ++digits[k] >= factors[k]; k++ ) + { + digits[k] = 0; + j += radix[k+2] - radix[k]; + } + } + } + } + else + { + for( i = 0, j = 0;; ) + { + itab[i] = j; + if( ++i >= n0 ) + break; + j += radix[1]; + for( k = 0; ++digits[k] >= factors[k]; k++ ) + { + digits[k] = 0; + j += radix[k+2] - radix[k]; + } + } + } + + if( itab != itab0 ) + { + itab0[0] = 0; + for( i = n0 & 1; i < n0; i += 2 ) + { + int k0 = itab[i]; + int k1 = itab[i+1]; + itab0[k0] = i; + itab0[k1] = i+1; + } + } + } + + if( (n0 & (n0-1)) == 0 ) + { + w.re = w1.re = DFTTab[m][0]; + w.im = w1.im = -DFTTab[m][1]; + } + else + { + t = -CV_PI*2/n0; + w.im = w1.im = sin(t); + w.re = w1.re = std::sqrt(1. - w1.im*w1.im); + } + n = (n0+1)/2; + + if( elem_size == sizeof(Complex) ) + { + Complex* wave = (Complex*)_wave; + + wave[0].re = 1.; + wave[0].im = 0.; + + if( (n0 & 1) == 0 ) + { + wave[n].re = -1.; + wave[n].im = 0; + } + + for( i = 1; i < n; i++ ) + { + wave[i] = w; + wave[n0-i].re = w.re; + wave[n0-i].im = -w.im; + + t = w.re*w1.re - w.im*w1.im; + w.im = w.re*w1.im + w.im*w1.re; + w.re = t; + } + } + else + { + Complex* wave = (Complex*)_wave; + assert( elem_size == sizeof(Complex) ); + + wave[0].re = 1.f; + wave[0].im = 0.f; + + if( (n0 & 1) == 0 ) + { + wave[n].re = -1.f; + wave[n].im = 0.f; + } + + for( i = 1; i < n; i++ ) + { + wave[i].re = (float)w.re; + wave[i].im = (float)w.im; + wave[n0-i].re = (float)w.re; + wave[n0-i].im = (float)-w.im; + + t = w.re*w1.re - w.im*w1.im; + w.im = w.re*w1.im + w.im*w1.re; + w.re = t; + } + } +} + +template struct DFT_VecR4 +{ + int operator()(Complex*, int, int, int&, const Complex*) const { return 1; } +}; + +#if CV_SSE3 + +// optimized radix-4 transform +template<> struct DFT_VecR4 +{ + int operator()(Complex* dst, int N, int n0, int& _dw0, const Complex* wave) const + { + int n = 1, i, j, nx, dw, dw0 = _dw0; + __m128 z = _mm_setzero_ps(), x02=z, x13=z, w01=z, w23=z, y01, y23, t0, t1; + Cv32suf t; t.i = 0x80000000; + __m128 neg0_mask = _mm_load_ss(&t.f); + __m128 neg3_mask = _mm_shuffle_ps(neg0_mask, neg0_mask, _MM_SHUFFLE(0,1,2,3)); + + for( ; n*4 <= N; ) + { + nx = n; + n *= 4; + dw0 /= 4; + + for( i = 0; i < n0; i += n ) + { + Complexf *v0, *v1; + + v0 = dst + i; + v1 = v0 + nx*2; + + x02 = _mm_loadl_pi(x02, (const __m64*)&v0[0]); + x13 = _mm_loadl_pi(x13, (const __m64*)&v0[nx]); + x02 = _mm_loadh_pi(x02, (const __m64*)&v1[0]); + x13 = _mm_loadh_pi(x13, (const __m64*)&v1[nx]); + + y01 = _mm_add_ps(x02, x13); + y23 = _mm_sub_ps(x02, x13); + t1 = _mm_xor_ps(_mm_shuffle_ps(y01, y23, _MM_SHUFFLE(2,3,3,2)), neg3_mask); + t0 = _mm_movelh_ps(y01, y23); + y01 = _mm_add_ps(t0, t1); + y23 = _mm_sub_ps(t0, t1); + + _mm_storel_pi((__m64*)&v0[0], y01); + _mm_storeh_pi((__m64*)&v0[nx], y01); + _mm_storel_pi((__m64*)&v1[0], y23); + _mm_storeh_pi((__m64*)&v1[nx], y23); + + for( j = 1, dw = dw0; j < nx; j++, dw += dw0 ) + { + v0 = dst + i + j; + v1 = v0 + nx*2; + + x13 = _mm_loadl_pi(x13, (const __m64*)&v0[nx]); + w23 = _mm_loadl_pi(w23, (const __m64*)&wave[dw*2]); + x13 = _mm_loadh_pi(x13, (const __m64*)&v1[nx]); // x1, x3 = r1 i1 r3 i3 + w23 = _mm_loadh_pi(w23, (const __m64*)&wave[dw*3]); // w2, w3 = wr2 wi2 wr3 wi3 + + t0 = _mm_mul_ps(_mm_moveldup_ps(x13), w23); + t1 = _mm_mul_ps(_mm_movehdup_ps(x13), _mm_shuffle_ps(w23, w23, _MM_SHUFFLE(2,3,0,1))); + x13 = _mm_addsub_ps(t0, t1); + // re(x1*w2), im(x1*w2), re(x3*w3), im(x3*w3) + x02 = _mm_loadl_pi(x02, (const __m64*)&v1[0]); // x2 = r2 i2 + w01 = _mm_loadl_pi(w01, (const __m64*)&wave[dw]); // w1 = wr1 wi1 + x02 = _mm_shuffle_ps(x02, x02, _MM_SHUFFLE(0,0,1,1)); + w01 = _mm_shuffle_ps(w01, w01, _MM_SHUFFLE(1,0,0,1)); + x02 = _mm_mul_ps(x02, w01); + x02 = _mm_addsub_ps(x02, _mm_movelh_ps(x02, x02)); + // re(x0) im(x0) re(x2*w1), im(x2*w1) + x02 = _mm_loadl_pi(x02, (const __m64*)&v0[0]); + + y01 = _mm_add_ps(x02, x13); + y23 = _mm_sub_ps(x02, x13); + t1 = _mm_xor_ps(_mm_shuffle_ps(y01, y23, _MM_SHUFFLE(2,3,3,2)), neg3_mask); + t0 = _mm_movelh_ps(y01, y23); + y01 = _mm_add_ps(t0, t1); + y23 = _mm_sub_ps(t0, t1); + + _mm_storel_pi((__m64*)&v0[0], y01); + _mm_storeh_pi((__m64*)&v0[nx], y01); + _mm_storel_pi((__m64*)&v1[0], y23); + _mm_storeh_pi((__m64*)&v1[nx], y23); + } + } + } + + _dw0 = dw0; + return n; + } +}; + +#endif + +#ifdef HAVE_IPP +static void ippsDFTFwd_CToC( const Complex* src, Complex* dst, + const void* spec, uchar* buf) +{ + ippsDFTFwd_CToC_32fc( (const Ipp32fc*)src, (Ipp32fc*)dst, + (const IppsDFTSpec_C_32fc*)spec, buf); +} + +static void ippsDFTFwd_CToC( const Complex* src, Complex* dst, + const void* spec, uchar* buf) +{ + ippsDFTFwd_CToC_64fc( (const Ipp64fc*)src, (Ipp64fc*)dst, + (const IppsDFTSpec_C_64fc*)spec, buf); +} + +static void ippsDFTInv_CToC( const Complex* src, Complex* dst, + const void* spec, uchar* buf) +{ + ippsDFTInv_CToC_32fc( (const Ipp32fc*)src, (Ipp32fc*)dst, + (const IppsDFTSpec_C_32fc*)spec, buf); +} + +static void ippsDFTInv_CToC( const Complex* src, Complex* dst, + const void* spec, uchar* buf) +{ + ippsDFTInv_CToC_64fc( (const Ipp64fc*)src, (Ipp64fc*)dst, + (const IppsDFTSpec_C_64fc*)spec, buf); +} + +static void ippsDFTFwd_RToPack( const float* src, float* dst, + const void* spec, uchar* buf) +{ + ippsDFTFwd_RToPack_32f( src, dst, (const IppsDFTSpec_R_32f*)spec, buf); +} + +static void ippsDFTFwd_RToPack( const double* src, double* dst, + const void* spec, uchar* buf) +{ + ippsDFTFwd_RToPack_64f( src, dst, (const IppsDFTSpec_R_64f*)spec, buf); +} + +static void ippsDFTInv_PackToR( const float* src, float* dst, + const void* spec, uchar* buf) +{ + ippsDFTInv_PackToR_32f( src, dst, (const IppsDFTSpec_R_32f*)spec, buf); +} + +static void ippsDFTInv_PackToR( const double* src, double* dst, + const void* spec, uchar* buf) +{ + ippsDFTInv_PackToR_64f( src, dst, (const IppsDFTSpec_R_64f*)spec, buf); +} +#endif + +enum { DFT_NO_PERMUTE=256, DFT_COMPLEX_INPUT_OR_OUTPUT=512 }; + +// mixed-radix complex discrete Fourier transform: double-precision version +template static void +DFT( const Complex* src, Complex* dst, int n, + int nf, const int* factors, const int* itab, + const Complex* wave, int tab_size, + const void* +#ifdef HAVE_IPP + spec +#endif + , Complex* buf, + int flags, double _scale ) +{ + static const T sin_120 = (T)0.86602540378443864676372317075294; + static const T fft5_2 = (T)0.559016994374947424102293417182819; + static const T fft5_3 = (T)-0.951056516295153572116439333379382; + static const T fft5_4 = (T)-1.538841768587626701285145288018455; + static const T fft5_5 = (T)0.363271264002680442947733378740309; + + int n0 = n, f_idx, nx; + int inv = flags & DFT_INVERSE; + int dw0 = tab_size, dw; + int i, j, k; + Complex t; + T scale = (T)_scale; + int tab_step; + +#ifdef HAVE_IPP + if( spec ) + { + if( !inv ) + ippsDFTFwd_CToC( src, dst, spec, (uchar*)buf ); + else + ippsDFTInv_CToC( src, dst, spec, (uchar*)buf ); + return; + } +#endif + + tab_step = tab_size == n ? 1 : tab_size == n*2 ? 2 : tab_size/n; + + // 0. shuffle data + if( dst != src ) + { + assert( (flags & DFT_NO_PERMUTE) == 0 ); + if( !inv ) + { + for( i = 0; i <= n - 2; i += 2, itab += 2*tab_step ) + { + int k0 = itab[0], k1 = itab[tab_step]; + assert( (unsigned)k0 < (unsigned)n && (unsigned)k1 < (unsigned)n ); + dst[i] = src[k0]; dst[i+1] = src[k1]; + } + + if( i < n ) + dst[n-1] = src[n-1]; + } + else + { + for( i = 0; i <= n - 2; i += 2, itab += 2*tab_step ) + { + int k0 = itab[0], k1 = itab[tab_step]; + assert( (unsigned)k0 < (unsigned)n && (unsigned)k1 < (unsigned)n ); + t.re = src[k0].re; t.im = -src[k0].im; + dst[i] = t; + t.re = src[k1].re; t.im = -src[k1].im; + dst[i+1] = t; + } + + if( i < n ) + { + t.re = src[n-1].re; t.im = -src[n-1].im; + dst[i] = t; + } + } + } + else + { + if( (flags & DFT_NO_PERMUTE) == 0 ) + { + CV_Assert( factors[0] == factors[nf-1] ); + if( nf == 1 ) + { + if( (n & 3) == 0 ) + { + int n2 = n/2; + Complex* dsth = dst + n2; + + for( i = 0; i < n2; i += 2, itab += tab_step*2 ) + { + j = itab[0]; + assert( (unsigned)j < (unsigned)n2 ); + + CV_SWAP(dst[i+1], dsth[j], t); + if( j > i ) + { + CV_SWAP(dst[i], dst[j], t); + CV_SWAP(dsth[i+1], dsth[j+1], t); + } + } + } + // else do nothing + } + else + { + for( i = 0; i < n; i++, itab += tab_step ) + { + j = itab[0]; + assert( (unsigned)j < (unsigned)n ); + if( j > i ) + CV_SWAP(dst[i], dst[j], t); + } + } + } + + if( inv ) + { + for( i = 0; i <= n - 2; i += 2 ) + { + T t0 = -dst[i].im; + T t1 = -dst[i+1].im; + dst[i].im = t0; dst[i+1].im = t1; + } + + if( i < n ) + dst[n-1].im = -dst[n-1].im; + } + } + + n = 1; + // 1. power-2 transforms + if( (factors[0] & 1) == 0 ) + { + if( factors[0] >= 4 && checkHardwareSupport(CV_CPU_SSE3)) + { + DFT_VecR4 vr4; + n = vr4(dst, factors[0], n0, dw0, wave); + } + + // radix-4 transform + for( ; n*4 <= factors[0]; ) + { + nx = n; + n *= 4; + dw0 /= 4; + + for( i = 0; i < n0; i += n ) + { + Complex *v0, *v1; + T r0, i0, r1, i1, r2, i2, r3, i3, r4, i4; + + v0 = dst + i; + v1 = v0 + nx*2; + + r0 = v1[0].re; i0 = v1[0].im; + r4 = v1[nx].re; i4 = v1[nx].im; + + r1 = r0 + r4; i1 = i0 + i4; + r3 = i0 - i4; i3 = r4 - r0; + + r2 = v0[0].re; i2 = v0[0].im; + r4 = v0[nx].re; i4 = v0[nx].im; + + r0 = r2 + r4; i0 = i2 + i4; + r2 -= r4; i2 -= i4; + + v0[0].re = r0 + r1; v0[0].im = i0 + i1; + v1[0].re = r0 - r1; v1[0].im = i0 - i1; + v0[nx].re = r2 + r3; v0[nx].im = i2 + i3; + v1[nx].re = r2 - r3; v1[nx].im = i2 - i3; + + for( j = 1, dw = dw0; j < nx; j++, dw += dw0 ) + { + v0 = dst + i + j; + v1 = v0 + nx*2; + + r2 = v0[nx].re*wave[dw*2].re - v0[nx].im*wave[dw*2].im; + i2 = v0[nx].re*wave[dw*2].im + v0[nx].im*wave[dw*2].re; + r0 = v1[0].re*wave[dw].im + v1[0].im*wave[dw].re; + i0 = v1[0].re*wave[dw].re - v1[0].im*wave[dw].im; + r3 = v1[nx].re*wave[dw*3].im + v1[nx].im*wave[dw*3].re; + i3 = v1[nx].re*wave[dw*3].re - v1[nx].im*wave[dw*3].im; + + r1 = i0 + i3; i1 = r0 + r3; + r3 = r0 - r3; i3 = i3 - i0; + r4 = v0[0].re; i4 = v0[0].im; + + r0 = r4 + r2; i0 = i4 + i2; + r2 = r4 - r2; i2 = i4 - i2; + + v0[0].re = r0 + r1; v0[0].im = i0 + i1; + v1[0].re = r0 - r1; v1[0].im = i0 - i1; + v0[nx].re = r2 + r3; v0[nx].im = i2 + i3; + v1[nx].re = r2 - r3; v1[nx].im = i2 - i3; + } + } + } + + for( ; n < factors[0]; ) + { + // do the remaining radix-2 transform + nx = n; + n *= 2; + dw0 /= 2; + + for( i = 0; i < n0; i += n ) + { + Complex* v = dst + i; + T r0 = v[0].re + v[nx].re; + T i0 = v[0].im + v[nx].im; + T r1 = v[0].re - v[nx].re; + T i1 = v[0].im - v[nx].im; + v[0].re = r0; v[0].im = i0; + v[nx].re = r1; v[nx].im = i1; + + for( j = 1, dw = dw0; j < nx; j++, dw += dw0 ) + { + v = dst + i + j; + r1 = v[nx].re*wave[dw].re - v[nx].im*wave[dw].im; + i1 = v[nx].im*wave[dw].re + v[nx].re*wave[dw].im; + r0 = v[0].re; i0 = v[0].im; + + v[0].re = r0 + r1; v[0].im = i0 + i1; + v[nx].re = r0 - r1; v[nx].im = i0 - i1; + } + } + } + } + + // 2. all the other transforms + for( f_idx = (factors[0]&1) ? 0 : 1; f_idx < nf; f_idx++ ) + { + int factor = factors[f_idx]; + nx = n; + n *= factor; + dw0 /= factor; + + if( factor == 3 ) + { + // radix-3 + for( i = 0; i < n0; i += n ) + { + Complex* v = dst + i; + + T r1 = v[nx].re + v[nx*2].re; + T i1 = v[nx].im + v[nx*2].im; + T r0 = v[0].re; + T i0 = v[0].im; + T r2 = sin_120*(v[nx].im - v[nx*2].im); + T i2 = sin_120*(v[nx*2].re - v[nx].re); + v[0].re = r0 + r1; v[0].im = i0 + i1; + r0 -= (T)0.5*r1; i0 -= (T)0.5*i1; + v[nx].re = r0 + r2; v[nx].im = i0 + i2; + v[nx*2].re = r0 - r2; v[nx*2].im = i0 - i2; + + for( j = 1, dw = dw0; j < nx; j++, dw += dw0 ) + { + v = dst + i + j; + r0 = v[nx].re*wave[dw].re - v[nx].im*wave[dw].im; + i0 = v[nx].re*wave[dw].im + v[nx].im*wave[dw].re; + i2 = v[nx*2].re*wave[dw*2].re - v[nx*2].im*wave[dw*2].im; + r2 = v[nx*2].re*wave[dw*2].im + v[nx*2].im*wave[dw*2].re; + r1 = r0 + i2; i1 = i0 + r2; + + r2 = sin_120*(i0 - r2); i2 = sin_120*(i2 - r0); + r0 = v[0].re; i0 = v[0].im; + v[0].re = r0 + r1; v[0].im = i0 + i1; + r0 -= (T)0.5*r1; i0 -= (T)0.5*i1; + v[nx].re = r0 + r2; v[nx].im = i0 + i2; + v[nx*2].re = r0 - r2; v[nx*2].im = i0 - i2; + } + } + } + else if( factor == 5 ) + { + // radix-5 + for( i = 0; i < n0; i += n ) + { + for( j = 0, dw = 0; j < nx; j++, dw += dw0 ) + { + Complex* v0 = dst + i + j; + Complex* v1 = v0 + nx*2; + Complex* v2 = v1 + nx*2; + + T r0, i0, r1, i1, r2, i2, r3, i3, r4, i4, r5, i5; + + r3 = v0[nx].re*wave[dw].re - v0[nx].im*wave[dw].im; + i3 = v0[nx].re*wave[dw].im + v0[nx].im*wave[dw].re; + r2 = v2[0].re*wave[dw*4].re - v2[0].im*wave[dw*4].im; + i2 = v2[0].re*wave[dw*4].im + v2[0].im*wave[dw*4].re; + + r1 = r3 + r2; i1 = i3 + i2; + r3 -= r2; i3 -= i2; + + r4 = v1[nx].re*wave[dw*3].re - v1[nx].im*wave[dw*3].im; + i4 = v1[nx].re*wave[dw*3].im + v1[nx].im*wave[dw*3].re; + r0 = v1[0].re*wave[dw*2].re - v1[0].im*wave[dw*2].im; + i0 = v1[0].re*wave[dw*2].im + v1[0].im*wave[dw*2].re; + + r2 = r4 + r0; i2 = i4 + i0; + r4 -= r0; i4 -= i0; + + r0 = v0[0].re; i0 = v0[0].im; + r5 = r1 + r2; i5 = i1 + i2; + + v0[0].re = r0 + r5; v0[0].im = i0 + i5; + + r0 -= (T)0.25*r5; i0 -= (T)0.25*i5; + r1 = fft5_2*(r1 - r2); i1 = fft5_2*(i1 - i2); + r2 = -fft5_3*(i3 + i4); i2 = fft5_3*(r3 + r4); + + i3 *= -fft5_5; r3 *= fft5_5; + i4 *= -fft5_4; r4 *= fft5_4; + + r5 = r2 + i3; i5 = i2 + r3; + r2 -= i4; i2 -= r4; + + r3 = r0 + r1; i3 = i0 + i1; + r0 -= r1; i0 -= i1; + + v0[nx].re = r3 + r2; v0[nx].im = i3 + i2; + v2[0].re = r3 - r2; v2[0].im = i3 - i2; + + v1[0].re = r0 + r5; v1[0].im = i0 + i5; + v1[nx].re = r0 - r5; v1[nx].im = i0 - i5; + } + } + } + else + { + // radix-"factor" - an odd number + int p, q, factor2 = (factor - 1)/2; + int d, dd, dw_f = tab_size/factor; + Complex* a = buf; + Complex* b = buf + factor2; + + for( i = 0; i < n0; i += n ) + { + for( j = 0, dw = 0; j < nx; j++, dw += dw0 ) + { + Complex* v = dst + i + j; + Complex v_0 = v[0]; + Complex vn_0 = v_0; + + if( j == 0 ) + { + for( p = 1, k = nx; p <= factor2; p++, k += nx ) + { + T r0 = v[k].re + v[n-k].re; + T i0 = v[k].im - v[n-k].im; + T r1 = v[k].re - v[n-k].re; + T i1 = v[k].im + v[n-k].im; + + vn_0.re += r0; vn_0.im += i1; + a[p-1].re = r0; a[p-1].im = i0; + b[p-1].re = r1; b[p-1].im = i1; + } + } + else + { + const Complex* wave_ = wave + dw*factor; + d = dw; + + for( p = 1, k = nx; p <= factor2; p++, k += nx, d += dw ) + { + T r2 = v[k].re*wave[d].re - v[k].im*wave[d].im; + T i2 = v[k].re*wave[d].im + v[k].im*wave[d].re; + + T r1 = v[n-k].re*wave_[-d].re - v[n-k].im*wave_[-d].im; + T i1 = v[n-k].re*wave_[-d].im + v[n-k].im*wave_[-d].re; + + T r0 = r2 + r1; + T i0 = i2 - i1; + r1 = r2 - r1; + i1 = i2 + i1; + + vn_0.re += r0; vn_0.im += i1; + a[p-1].re = r0; a[p-1].im = i0; + b[p-1].re = r1; b[p-1].im = i1; + } + } + + v[0] = vn_0; + + for( p = 1, k = nx; p <= factor2; p++, k += nx ) + { + Complex s0 = v_0, s1 = v_0; + d = dd = dw_f*p; + + for( q = 0; q < factor2; q++ ) + { + T r0 = wave[d].re * a[q].re; + T i0 = wave[d].im * a[q].im; + T r1 = wave[d].re * b[q].im; + T i1 = wave[d].im * b[q].re; + + s1.re += r0 + i0; s0.re += r0 - i0; + s1.im += r1 - i1; s0.im += r1 + i1; + + d += dd; + d -= -(d >= tab_size) & tab_size; + } + + v[k] = s0; + v[n-k] = s1; + } + } + } + } + } + + if( scale != 1 ) + { + T re_scale = scale, im_scale = scale; + if( inv ) + im_scale = -im_scale; + + for( i = 0; i < n0; i++ ) + { + T t0 = dst[i].re*re_scale; + T t1 = dst[i].im*im_scale; + dst[i].re = t0; + dst[i].im = t1; + } + } + else if( inv ) + { + for( i = 0; i <= n0 - 2; i += 2 ) + { + T t0 = -dst[i].im; + T t1 = -dst[i+1].im; + dst[i].im = t0; + dst[i+1].im = t1; + } + + if( i < n0 ) + dst[n0-1].im = -dst[n0-1].im; + } +} + + +/* FFT of real vector + output vector format: + re(0), re(1), im(1), ... , re(n/2-1), im((n+1)/2-1) [, re((n+1)/2)] OR ... + re(0), 0, re(1), im(1), ..., re(n/2-1), im((n+1)/2-1) [, re((n+1)/2), 0] */ +template static void +RealDFT( const T* src, T* dst, int n, int nf, int* factors, const int* itab, + const Complex* wave, int tab_size, const void* +#ifdef HAVE_IPP + spec +#endif + , + Complex* buf, int flags, double _scale ) +{ + int complex_output = (flags & DFT_COMPLEX_INPUT_OR_OUTPUT) != 0; + T scale = (T)_scale; + int j, n2 = n >> 1; + dst += complex_output; + +#ifdef HAVE_IPP + if( spec ) + { + ippsDFTFwd_RToPack( src, dst, spec, (uchar*)buf ); + goto finalize; + } +#endif + assert( tab_size == n ); + + if( n == 1 ) + { + dst[0] = src[0]*scale; + } + else if( n == 2 ) + { + T t = (src[0] + src[1])*scale; + dst[1] = (src[0] - src[1])*scale; + dst[0] = t; + } + else if( n & 1 ) + { + dst -= complex_output; + Complex* _dst = (Complex*)dst; + _dst[0].re = src[0]*scale; + _dst[0].im = 0; + for( j = 1; j < n; j += 2 ) + { + T t0 = src[itab[j]]*scale; + T t1 = src[itab[j+1]]*scale; + _dst[j].re = t0; + _dst[j].im = 0; + _dst[j+1].re = t1; + _dst[j+1].im = 0; + } + DFT( _dst, _dst, n, nf, factors, itab, wave, + tab_size, 0, buf, DFT_NO_PERMUTE, 1 ); + if( !complex_output ) + dst[1] = dst[0]; + } + else + { + T t0, t; + T h1_re, h1_im, h2_re, h2_im; + T scale2 = scale*(T)0.5; + factors[0] >>= 1; + + DFT( (Complex*)src, (Complex*)dst, n2, nf - (factors[0] == 1), + factors + (factors[0] == 1), + itab, wave, tab_size, 0, buf, 0, 1 ); + factors[0] <<= 1; + + t = dst[0] - dst[1]; + dst[0] = (dst[0] + dst[1])*scale; + dst[1] = t*scale; + + t0 = dst[n2]; + t = dst[n-1]; + dst[n-1] = dst[1]; + + for( j = 2, wave++; j < n2; j += 2, wave++ ) + { + /* calc odd */ + h2_re = scale2*(dst[j+1] + t); + h2_im = scale2*(dst[n-j] - dst[j]); + + /* calc even */ + h1_re = scale2*(dst[j] + dst[n-j]); + h1_im = scale2*(dst[j+1] - t); + + /* rotate */ + t = h2_re*wave->re - h2_im*wave->im; + h2_im = h2_re*wave->im + h2_im*wave->re; + h2_re = t; + t = dst[n-j-1]; + + dst[j-1] = h1_re + h2_re; + dst[n-j-1] = h1_re - h2_re; + dst[j] = h1_im + h2_im; + dst[n-j] = h2_im - h1_im; + } + + if( j <= n2 ) + { + dst[n2-1] = t0*scale; + dst[n2] = -t*scale; + } + } + +#ifdef HAVE_IPP +finalize: +#endif + if( complex_output && (n & 1) == 0 ) + { + dst[-1] = dst[0]; + dst[0] = 0; + if( (n & 1) == 0 ) + dst[n] = 0; + } +} + +/* Inverse FFT of complex conjugate-symmetric vector + input vector format: + re[0], re[1], im[1], ... , re[n/2-1], im[n/2-1], re[n/2] OR + re(0), 0, re(1), im(1), ..., re(n/2-1), im((n+1)/2-1) [, re((n+1)/2), 0] */ +template static void +CCSIDFT( const T* src, T* dst, int n, int nf, int* factors, const int* itab, + const Complex* wave, int tab_size, + const void* +#ifdef HAVE_IPP + spec +#endif + , Complex* buf, + int flags, double _scale ) +{ + int complex_input = (flags & DFT_COMPLEX_INPUT_OR_OUTPUT) != 0; + int j, k, n2 = (n+1) >> 1; + T scale = (T)_scale; + T save_s1 = 0.; + T t0, t1, t2, t3, t; + + assert( tab_size == n ); + + if( complex_input ) + { + assert( src != dst ); + save_s1 = src[1]; + ((T*)src)[1] = src[0]; + src++; + } +#ifdef HAVE_IPP + if( spec ) + { + ippsDFTInv_PackToR( src, dst, spec, (uchar*)buf ); + goto finalize; + } +#endif + if( n == 1 ) + { + dst[0] = (T)(src[0]*scale); + } + else if( n == 2 ) + { + t = (src[0] + src[1])*scale; + dst[1] = (src[0] - src[1])*scale; + dst[0] = t; + } + else if( n & 1 ) + { + Complex* _src = (Complex*)(src-1); + Complex* _dst = (Complex*)dst; + + _dst[0].re = src[0]; + _dst[0].im = 0; + for( j = 1; j < n2; j++ ) + { + int k0 = itab[j], k1 = itab[n-j]; + t0 = _src[j].re; t1 = _src[j].im; + _dst[k0].re = t0; _dst[k0].im = -t1; + _dst[k1].re = t0; _dst[k1].im = t1; + } + + DFT( _dst, _dst, n, nf, factors, itab, wave, + tab_size, 0, buf, DFT_NO_PERMUTE, 1. ); + dst[0] *= scale; + for( j = 1; j < n; j += 2 ) + { + t0 = dst[j*2]*scale; + t1 = dst[j*2+2]*scale; + dst[j] = t0; + dst[j+1] = t1; + } + } + else + { + int inplace = src == dst; + const Complex* w = wave; + + t = src[1]; + t0 = (src[0] + src[n-1]); + t1 = (src[n-1] - src[0]); + dst[0] = t0; + dst[1] = t1; + + for( j = 2, w++; j < n2; j += 2, w++ ) + { + T h1_re, h1_im, h2_re, h2_im; + + h1_re = (t + src[n-j-1]); + h1_im = (src[j] - src[n-j]); + + h2_re = (t - src[n-j-1]); + h2_im = (src[j] + src[n-j]); + + t = h2_re*w->re + h2_im*w->im; + h2_im = h2_im*w->re - h2_re*w->im; + h2_re = t; + + t = src[j+1]; + t0 = h1_re - h2_im; + t1 = -h1_im - h2_re; + t2 = h1_re + h2_im; + t3 = h1_im - h2_re; + + if( inplace ) + { + dst[j] = t0; + dst[j+1] = t1; + dst[n-j] = t2; + dst[n-j+1]= t3; + } + else + { + int j2 = j >> 1; + k = itab[j2]; + dst[k] = t0; + dst[k+1] = t1; + k = itab[n2-j2]; + dst[k] = t2; + dst[k+1]= t3; + } + } + + if( j <= n2 ) + { + t0 = t*2; + t1 = src[n2]*2; + + if( inplace ) + { + dst[n2] = t0; + dst[n2+1] = t1; + } + else + { + k = itab[n2]; + dst[k*2] = t0; + dst[k*2+1] = t1; + } + } + + factors[0] >>= 1; + DFT( (Complex*)dst, (Complex*)dst, n2, + nf - (factors[0] == 1), + factors + (factors[0] == 1), itab, + wave, tab_size, 0, buf, + inplace ? 0 : DFT_NO_PERMUTE, 1. ); + factors[0] <<= 1; + + for( j = 0; j < n; j += 2 ) + { + t0 = dst[j]*scale; + t1 = dst[j+1]*(-scale); + dst[j] = t0; + dst[j+1] = t1; + } + } + +#ifdef HAVE_IPP +finalize: +#endif + if( complex_input ) + ((T*)src)[0] = (T)save_s1; +} + +static void +CopyColumn( const uchar* _src, size_t src_step, + uchar* _dst, size_t dst_step, + int len, size_t elem_size ) +{ + int i, t0, t1; + const int* src = (const int*)_src; + int* dst = (int*)_dst; + src_step /= sizeof(src[0]); + dst_step /= sizeof(dst[0]); + + if( elem_size == sizeof(int) ) + { + for( i = 0; i < len; i++, src += src_step, dst += dst_step ) + dst[0] = src[0]; + } + else if( elem_size == sizeof(int)*2 ) + { + for( i = 0; i < len; i++, src += src_step, dst += dst_step ) + { + t0 = src[0]; t1 = src[1]; + dst[0] = t0; dst[1] = t1; + } + } + else if( elem_size == sizeof(int)*4 ) + { + for( i = 0; i < len; i++, src += src_step, dst += dst_step ) + { + t0 = src[0]; t1 = src[1]; + dst[0] = t0; dst[1] = t1; + t0 = src[2]; t1 = src[3]; + dst[2] = t0; dst[3] = t1; + } + } +} + + +static void +CopyFrom2Columns( const uchar* _src, size_t src_step, + uchar* _dst0, uchar* _dst1, + int len, size_t elem_size ) +{ + int i, t0, t1; + const int* src = (const int*)_src; + int* dst0 = (int*)_dst0; + int* dst1 = (int*)_dst1; + src_step /= sizeof(src[0]); + + if( elem_size == sizeof(int) ) + { + for( i = 0; i < len; i++, src += src_step ) + { + t0 = src[0]; t1 = src[1]; + dst0[i] = t0; dst1[i] = t1; + } + } + else if( elem_size == sizeof(int)*2 ) + { + for( i = 0; i < len*2; i += 2, src += src_step ) + { + t0 = src[0]; t1 = src[1]; + dst0[i] = t0; dst0[i+1] = t1; + t0 = src[2]; t1 = src[3]; + dst1[i] = t0; dst1[i+1] = t1; + } + } + else if( elem_size == sizeof(int)*4 ) + { + for( i = 0; i < len*4; i += 4, src += src_step ) + { + t0 = src[0]; t1 = src[1]; + dst0[i] = t0; dst0[i+1] = t1; + t0 = src[2]; t1 = src[3]; + dst0[i+2] = t0; dst0[i+3] = t1; + t0 = src[4]; t1 = src[5]; + dst1[i] = t0; dst1[i+1] = t1; + t0 = src[6]; t1 = src[7]; + dst1[i+2] = t0; dst1[i+3] = t1; + } + } +} + + +static void +CopyTo2Columns( const uchar* _src0, const uchar* _src1, + uchar* _dst, size_t dst_step, + int len, size_t elem_size ) +{ + int i, t0, t1; + const int* src0 = (const int*)_src0; + const int* src1 = (const int*)_src1; + int* dst = (int*)_dst; + dst_step /= sizeof(dst[0]); + + if( elem_size == sizeof(int) ) + { + for( i = 0; i < len; i++, dst += dst_step ) + { + t0 = src0[i]; t1 = src1[i]; + dst[0] = t0; dst[1] = t1; + } + } + else if( elem_size == sizeof(int)*2 ) + { + for( i = 0; i < len*2; i += 2, dst += dst_step ) + { + t0 = src0[i]; t1 = src0[i+1]; + dst[0] = t0; dst[1] = t1; + t0 = src1[i]; t1 = src1[i+1]; + dst[2] = t0; dst[3] = t1; + } + } + else if( elem_size == sizeof(int)*4 ) + { + for( i = 0; i < len*4; i += 4, dst += dst_step ) + { + t0 = src0[i]; t1 = src0[i+1]; + dst[0] = t0; dst[1] = t1; + t0 = src0[i+2]; t1 = src0[i+3]; + dst[2] = t0; dst[3] = t1; + t0 = src1[i]; t1 = src1[i+1]; + dst[4] = t0; dst[5] = t1; + t0 = src1[i+2]; t1 = src1[i+3]; + dst[6] = t0; dst[7] = t1; + } + } +} + + +static void +ExpandCCS( uchar* _ptr, int len, int elem_size ) +{ + int i; + _ptr -= elem_size; + memcpy( _ptr, _ptr + elem_size, elem_size ); + memset( _ptr + elem_size, 0, elem_size ); + if( (len & 1) == 0 ) + memset( _ptr + (len+1)*elem_size, 0, elem_size ); + + if( elem_size == sizeof(float) ) + { + Complex* ptr = (Complex*)_ptr; + + for( i = 1; i < (len+1)/2; i++ ) + { + Complex t; + t.re = ptr[i].re; + t.im = -ptr[i].im; + ptr[len-i] = t; + } + } + else + { + Complex* ptr = (Complex*)_ptr; + + for( i = 1; i < (len+1)/2; i++ ) + { + Complex t; + t.re = ptr[i].re; + t.im = -ptr[i].im; + ptr[len-i] = t; + } + } +} + + +typedef void (*DFTFunc)( + const void* src, void* dst, int n, int nf, int* factors, + const int* itab, const void* wave, int tab_size, + const void* spec, void* buf, int inv, double scale ); + +static void DFT_32f( const Complexf* src, Complexf* dst, int n, + int nf, const int* factors, const int* itab, + const Complexf* wave, int tab_size, + const void* spec, Complexf* buf, + int flags, double scale ) +{ + DFT(src, dst, n, nf, factors, itab, wave, tab_size, spec, buf, flags, scale); +} + +static void DFT_64f( const Complexd* src, Complexd* dst, int n, + int nf, const int* factors, const int* itab, + const Complexd* wave, int tab_size, + const void* spec, Complexd* buf, + int flags, double scale ) +{ + DFT(src, dst, n, nf, factors, itab, wave, tab_size, spec, buf, flags, scale); +} + + +static void RealDFT_32f( const float* src, float* dst, int n, int nf, int* factors, + const int* itab, const Complexf* wave, int tab_size, const void* spec, + Complexf* buf, int flags, double scale ) +{ + RealDFT( src, dst, n, nf, factors, itab, wave, tab_size, spec, buf, flags, scale); +} + +static void RealDFT_64f( const double* src, double* dst, int n, int nf, int* factors, + const int* itab, const Complexd* wave, int tab_size, const void* spec, + Complexd* buf, int flags, double scale ) +{ + RealDFT( src, dst, n, nf, factors, itab, wave, tab_size, spec, buf, flags, scale); +} + +static void CCSIDFT_32f( const float* src, float* dst, int n, int nf, int* factors, + const int* itab, const Complexf* wave, int tab_size, const void* spec, + Complexf* buf, int flags, double scale ) +{ + CCSIDFT( src, dst, n, nf, factors, itab, wave, tab_size, spec, buf, flags, scale); +} + +static void CCSIDFT_64f( const double* src, double* dst, int n, int nf, int* factors, + const int* itab, const Complexd* wave, int tab_size, const void* spec, + Complexd* buf, int flags, double scale ) +{ + CCSIDFT( src, dst, n, nf, factors, itab, wave, tab_size, spec, buf, flags, scale); +} + +} + + +void cv::dft( InputArray _src0, OutputArray _dst, int flags, int nonzero_rows ) +{ + static DFTFunc dft_tbl[6] = + { + (DFTFunc)DFT_32f, + (DFTFunc)RealDFT_32f, + (DFTFunc)CCSIDFT_32f, + (DFTFunc)DFT_64f, + (DFTFunc)RealDFT_64f, + (DFTFunc)CCSIDFT_64f + }; + + AutoBuffer buf; + void *spec = 0; + + Mat src0 = _src0.getMat(), src = src0; + int prev_len = 0, stage = 0; + bool inv = (flags & DFT_INVERSE) != 0; + int nf = 0, real_transform = src.channels() == 1 || (inv && (flags & DFT_REAL_OUTPUT)!=0); + int type = src.type(), depth = src.depth(); + int elem_size = (int)src.elemSize1(), complex_elem_size = elem_size*2; + int factors[34]; + bool inplace_transform = false; + int ipp_norm_flag = 0; +#ifdef HAVE_IPP + void *spec_r = 0, *spec_c = 0; +#endif + + CV_Assert( type == CV_32FC1 || type == CV_32FC2 || type == CV_64FC1 || type == CV_64FC2 ); + + if( !inv && src.channels() == 1 && (flags & DFT_COMPLEX_OUTPUT) ) + _dst.create( src.size(), CV_MAKETYPE(depth, 2) ); + else if( inv && src.channels() == 2 && (flags & DFT_REAL_OUTPUT) ) + _dst.create( src.size(), depth ); + else + _dst.create( src.size(), type ); + + Mat dst = _dst.getMat(); + + if( !real_transform ) + elem_size = complex_elem_size; + + if( src.cols == 1 && nonzero_rows > 0 ) + CV_Error( CV_StsNotImplemented, + "This mode (using nonzero_rows with a single-column matrix) breaks the function's logic, so it is prohibited.\n" + "For fast convolution/correlation use 2-column matrix or single-row matrix instead" ); + + // determine, which transform to do first - row-wise + // (stage 0) or column-wise (stage 1) transform + if( !(flags & DFT_ROWS) && src.rows > 1 && + ((src.cols == 1 && (!src.isContinuous() || !dst.isContinuous())) || + (src.cols > 1 && inv && real_transform)) ) + stage = 1; + + ipp_norm_flag = !(flags & DFT_SCALE) ? 8 : inv ? 2 : 1; + + for(;;) + { + double scale = 1; + uchar* wave = 0; + int* itab = 0; + uchar* ptr; + int i, len, count, sz = 0; + int use_buf = 0, odd_real = 0; + DFTFunc dft_func; + + if( stage == 0 ) // row-wise transform + { + len = !inv ? src.cols : dst.cols; + count = src.rows; + if( len == 1 && !(flags & DFT_ROWS) ) + { + len = !inv ? src.rows : dst.rows; + count = 1; + } + odd_real = real_transform && (len & 1); + } + else + { + len = dst.rows; + count = !inv ? src0.cols : dst.cols; + sz = 2*len*complex_elem_size; + } + + spec = 0; +#ifdef HAVE_IPP + if( len*count >= 64 ) // use IPP DFT if available + { + int ipp_sz = 0; + + if( real_transform && stage == 0 ) + { + if( depth == CV_32F ) + { + if( spec_r ) + IPPI_CALL( ippsDFTFree_R_32f( (IppsDFTSpec_R_32f*)spec_r )); + IPPI_CALL( ippsDFTInitAlloc_R_32f( + (IppsDFTSpec_R_32f**)&spec_r, len, ipp_norm_flag, ippAlgHintNone )); + IPPI_CALL( ippsDFTGetBufSize_R_32f( (IppsDFTSpec_R_32f*)spec_r, &ipp_sz )); + } + else + { + if( spec_r ) + IPPI_CALL( ippsDFTFree_R_64f( (IppsDFTSpec_R_64f*)spec_r )); + IPPI_CALL( ippsDFTInitAlloc_R_64f( + (IppsDFTSpec_R_64f**)&spec_r, len, ipp_norm_flag, ippAlgHintNone )); + IPPI_CALL( ippsDFTGetBufSize_R_64f( (IppsDFTSpec_R_64f*)spec_r, &ipp_sz )); + } + spec = spec_r; + } + else + { + if( depth == CV_32F ) + { + if( spec_c ) + IPPI_CALL( ippsDFTFree_C_32fc( (IppsDFTSpec_C_32fc*)spec_c )); + IPPI_CALL( ippsDFTInitAlloc_C_32fc( + (IppsDFTSpec_C_32fc**)&spec_c, len, ipp_norm_flag, ippAlgHintNone )); + IPPI_CALL( ippsDFTGetBufSize_C_32fc( (IppsDFTSpec_C_32fc*)spec_c, &ipp_sz )); + } + else + { + if( spec_c ) + IPPI_CALL( ippsDFTFree_C_64fc( (IppsDFTSpec_C_64fc*)spec_c )); + IPPI_CALL( ippsDFTInitAlloc_C_64fc( + (IppsDFTSpec_C_64fc**)&spec_c, len, ipp_norm_flag, ippAlgHintNone )); + IPPI_CALL( ippsDFTGetBufSize_C_64fc( (IppsDFTSpec_C_64fc*)spec_c, &ipp_sz )); + } + spec = spec_c; + } + + sz += ipp_sz; + } + else +#endif + { + if( len != prev_len ) + nf = DFTFactorize( len, factors ); + + inplace_transform = factors[0] == factors[nf-1]; + sz += len*(complex_elem_size + sizeof(int)); + i = nf > 1 && (factors[0] & 1) == 0; + if( (factors[i] & 1) != 0 && factors[i] > 5 ) + sz += (factors[i]+1)*complex_elem_size; + + if( (stage == 0 && ((src.data == dst.data && !inplace_transform) || odd_real)) || + (stage == 1 && !inplace_transform) ) + { + use_buf = 1; + sz += len*complex_elem_size; + } + } + + ptr = (uchar*)buf; + buf.allocate( sz + 32 ); + if( ptr != (uchar*)buf ) + prev_len = 0; // because we release the buffer, + // force recalculation of + // twiddle factors and permutation table + ptr = (uchar*)buf; + if( !spec ) + { + wave = ptr; + ptr += len*complex_elem_size; + itab = (int*)ptr; + ptr = (uchar*)cvAlignPtr( ptr + len*sizeof(int), 16 ); + + if( len != prev_len || (!inplace_transform && inv && real_transform)) + DFTInit( len, nf, factors, itab, complex_elem_size, + wave, stage == 0 && inv && real_transform ); + // otherwise reuse the tables calculated on the previous stage + } + + if( stage == 0 ) + { + uchar* tmp_buf = 0; + int dptr_offset = 0; + int dst_full_len = len*elem_size; + int _flags = inv + (src.channels() != dst.channels() ? + DFT_COMPLEX_INPUT_OR_OUTPUT : 0); + if( use_buf ) + { + tmp_buf = ptr; + ptr += len*complex_elem_size; + if( odd_real && !inv && len > 1 && + !(_flags & DFT_COMPLEX_INPUT_OR_OUTPUT)) + dptr_offset = elem_size; + } + + if( !inv && (_flags & DFT_COMPLEX_INPUT_OR_OUTPUT) ) + dst_full_len += (len & 1) ? elem_size : complex_elem_size; + + dft_func = dft_tbl[(!real_transform ? 0 : !inv ? 1 : 2) + (depth == CV_64F)*3]; + + if( count > 1 && !(flags & DFT_ROWS) && (!inv || !real_transform) ) + stage = 1; + else if( flags & CV_DXT_SCALE ) + scale = 1./(len * (flags & DFT_ROWS ? 1 : count)); + + if( nonzero_rows <= 0 || nonzero_rows > count ) + nonzero_rows = count; + + for( i = 0; i < nonzero_rows; i++ ) + { + uchar* sptr = src.data + i*src.step; + uchar* dptr0 = dst.data + i*dst.step; + uchar* dptr = dptr0; + + if( tmp_buf ) + dptr = tmp_buf; + + dft_func( sptr, dptr, len, nf, factors, itab, wave, len, spec, ptr, _flags, scale ); + if( dptr != dptr0 ) + memcpy( dptr0, dptr + dptr_offset, dst_full_len ); + } + + for( ; i < count; i++ ) + { + uchar* dptr0 = dst.data + i*dst.step; + memset( dptr0, 0, dst_full_len ); + } + + if( stage != 1 ) + break; + src = dst; + } + else + { + int a = 0, b = count; + uchar *buf0, *buf1, *dbuf0, *dbuf1; + uchar* sptr0 = src.data; + uchar* dptr0 = dst.data; + buf0 = ptr; + ptr += len*complex_elem_size; + buf1 = ptr; + ptr += len*complex_elem_size; + dbuf0 = buf0, dbuf1 = buf1; + + if( use_buf ) + { + dbuf1 = ptr; + dbuf0 = buf1; + ptr += len*complex_elem_size; + } + + dft_func = dft_tbl[(depth == CV_64F)*3]; + + if( real_transform && inv && src.cols > 1 ) + stage = 0; + else if( flags & CV_DXT_SCALE ) + scale = 1./(len * count); + + if( real_transform ) + { + int even; + a = 1; + even = (count & 1) == 0; + b = (count+1)/2; + if( !inv ) + { + memset( buf0, 0, len*complex_elem_size ); + CopyColumn( sptr0, src.step, buf0, complex_elem_size, len, elem_size ); + sptr0 += dst.channels()*elem_size; + if( even ) + { + memset( buf1, 0, len*complex_elem_size ); + CopyColumn( sptr0 + (count-2)*elem_size, src.step, + buf1, complex_elem_size, len, elem_size ); + } + } + else if( src.channels() == 1 ) + { + CopyColumn( sptr0, src.step, buf0 + elem_size, elem_size, len, elem_size ); + ExpandCCS( buf0 + elem_size, len, elem_size ); + if( even ) + { + CopyColumn( sptr0 + (count-1)*elem_size, src.step, + buf1 + elem_size, elem_size, len, elem_size ); + ExpandCCS( buf1 + elem_size, len, elem_size ); + } + sptr0 += elem_size; + } + else + { + CopyColumn( sptr0, src.step, buf0, complex_elem_size, len, complex_elem_size ); + if( even ) + { + CopyColumn( sptr0 + b*complex_elem_size, src.step, + buf1, complex_elem_size, len, complex_elem_size ); + } + sptr0 += complex_elem_size; + } + + if( even ) + dft_func( buf1, dbuf1, len, nf, factors, itab, + wave, len, spec, ptr, inv, scale ); + dft_func( buf0, dbuf0, len, nf, factors, itab, + wave, len, spec, ptr, inv, scale ); + + if( dst.channels() == 1 ) + { + if( !inv ) + { + // copy the half of output vector to the first/last column. + // before doing that, defgragment the vector + memcpy( dbuf0 + elem_size, dbuf0, elem_size ); + CopyColumn( dbuf0 + elem_size, elem_size, dptr0, + dst.step, len, elem_size ); + if( even ) + { + memcpy( dbuf1 + elem_size, dbuf1, elem_size ); + CopyColumn( dbuf1 + elem_size, elem_size, + dptr0 + (count-1)*elem_size, + dst.step, len, elem_size ); + } + dptr0 += elem_size; + } + else + { + // copy the real part of the complex vector to the first/last column + CopyColumn( dbuf0, complex_elem_size, dptr0, dst.step, len, elem_size ); + if( even ) + CopyColumn( dbuf1, complex_elem_size, dptr0 + (count-1)*elem_size, + dst.step, len, elem_size ); + dptr0 += elem_size; + } + } + else + { + assert( !inv ); + CopyColumn( dbuf0, complex_elem_size, dptr0, + dst.step, len, complex_elem_size ); + if( even ) + CopyColumn( dbuf1, complex_elem_size, + dptr0 + b*complex_elem_size, + dst.step, len, complex_elem_size ); + dptr0 += complex_elem_size; + } + } + + for( i = a; i < b; i += 2 ) + { + if( i+1 < b ) + { + CopyFrom2Columns( sptr0, src.step, buf0, buf1, len, complex_elem_size ); + dft_func( buf1, dbuf1, len, nf, factors, itab, + wave, len, spec, ptr, inv, scale ); + } + else + CopyColumn( sptr0, src.step, buf0, complex_elem_size, len, complex_elem_size ); + + dft_func( buf0, dbuf0, len, nf, factors, itab, + wave, len, spec, ptr, inv, scale ); + + if( i+1 < b ) + CopyTo2Columns( dbuf0, dbuf1, dptr0, dst.step, len, complex_elem_size ); + else + CopyColumn( dbuf0, complex_elem_size, dptr0, dst.step, len, complex_elem_size ); + sptr0 += 2*complex_elem_size; + dptr0 += 2*complex_elem_size; + } + + if( stage != 0 ) + break; + src = dst; + } + } + +#ifdef HAVE_IPP + if( spec_c ) + { + if( depth == CV_32F ) + ippsDFTFree_C_32fc( (IppsDFTSpec_C_32fc*)spec_c ); + else + ippsDFTFree_C_64fc( (IppsDFTSpec_C_64fc*)spec_c ); + } + + if( spec_r ) + { + if( depth == CV_32F ) + ippsDFTFree_R_32f( (IppsDFTSpec_R_32f*)spec_r ); + else + ippsDFTFree_R_64f( (IppsDFTSpec_R_64f*)spec_r ); + } +#endif +} + + +void cv::idft( InputArray src, OutputArray dst, int flags, int nonzero_rows ) +{ + dft( src, dst, flags | DFT_INVERSE, nonzero_rows ); +} + +void cv::mulSpectrums( InputArray _srcA, InputArray _srcB, + OutputArray _dst, int flags, bool conjB ) +{ + Mat srcA = _srcA.getMat(), srcB = _srcB.getMat(); + int depth = srcA.depth(), cn = srcA.channels(), type = srcA.type(); + int rows = srcA.rows, cols = srcA.cols; + int j, k; + + CV_Assert( type == srcB.type() && srcA.size() == srcB.size() ); + CV_Assert( type == CV_32FC1 || type == CV_32FC2 || type == CV_64FC1 || type == CV_64FC2 ); + + _dst.create( srcA.rows, srcA.cols, type ); + Mat dst = _dst.getMat(); + + bool is_1d = (flags & DFT_ROWS) || (rows == 1 || (cols == 1 && + srcA.isContinuous() && srcB.isContinuous() && dst.isContinuous())); + + if( is_1d && !(flags & DFT_ROWS) ) + cols = cols + rows - 1, rows = 1; + + int ncols = cols*cn; + int j0 = cn == 1; + int j1 = ncols - (cols % 2 == 0 && cn == 1); + + if( depth == CV_32F ) + { + const float* dataA = (const float*)srcA.data; + const float* dataB = (const float*)srcB.data; + float* dataC = (float*)dst.data; + + size_t stepA = srcA.step/sizeof(dataA[0]); + size_t stepB = srcB.step/sizeof(dataB[0]); + size_t stepC = dst.step/sizeof(dataC[0]); + + if( !is_1d && cn == 1 ) + { + for( k = 0; k < (cols % 2 ? 1 : 2); k++ ) + { + if( k == 1 ) + dataA += cols - 1, dataB += cols - 1, dataC += cols - 1; + dataC[0] = dataA[0]*dataB[0]; + if( rows % 2 == 0 ) + dataC[(rows-1)*stepC] = dataA[(rows-1)*stepA]*dataB[(rows-1)*stepB]; + if( !conjB ) + for( j = 1; j <= rows - 2; j += 2 ) + { + double re = (double)dataA[j*stepA]*dataB[j*stepB] - + (double)dataA[(j+1)*stepA]*dataB[(j+1)*stepB]; + double im = (double)dataA[j*stepA]*dataB[(j+1)*stepB] + + (double)dataA[(j+1)*stepA]*dataB[j*stepB]; + dataC[j*stepC] = (float)re; dataC[(j+1)*stepC] = (float)im; + } + else + for( j = 1; j <= rows - 2; j += 2 ) + { + double re = (double)dataA[j*stepA]*dataB[j*stepB] + + (double)dataA[(j+1)*stepA]*dataB[(j+1)*stepB]; + double im = (double)dataA[(j+1)*stepA]*dataB[j*stepB] - + (double)dataA[j*stepA]*dataB[(j+1)*stepB]; + dataC[j*stepC] = (float)re; dataC[(j+1)*stepC] = (float)im; + } + if( k == 1 ) + dataA -= cols - 1, dataB -= cols - 1, dataC -= cols - 1; + } + } + + for( ; rows--; dataA += stepA, dataB += stepB, dataC += stepC ) + { + if( is_1d && cn == 1 ) + { + dataC[0] = dataA[0]*dataB[0]; + if( cols % 2 == 0 ) + dataC[j1] = dataA[j1]*dataB[j1]; + } + + if( !conjB ) + for( j = j0; j < j1; j += 2 ) + { + double re = (double)dataA[j]*dataB[j] - (double)dataA[j+1]*dataB[j+1]; + double im = (double)dataA[j+1]*dataB[j] + (double)dataA[j]*dataB[j+1]; + dataC[j] = (float)re; dataC[j+1] = (float)im; + } + else + for( j = j0; j < j1; j += 2 ) + { + double re = (double)dataA[j]*dataB[j] + (double)dataA[j+1]*dataB[j+1]; + double im = (double)dataA[j+1]*dataB[j] - (double)dataA[j]*dataB[j+1]; + dataC[j] = (float)re; dataC[j+1] = (float)im; + } + } + } + else + { + const double* dataA = (const double*)srcA.data; + const double* dataB = (const double*)srcB.data; + double* dataC = (double*)dst.data; + + size_t stepA = srcA.step/sizeof(dataA[0]); + size_t stepB = srcB.step/sizeof(dataB[0]); + size_t stepC = dst.step/sizeof(dataC[0]); + + if( !is_1d && cn == 1 ) + { + for( k = 0; k < (cols % 2 ? 1 : 2); k++ ) + { + if( k == 1 ) + dataA += cols - 1, dataB += cols - 1, dataC += cols - 1; + dataC[0] = dataA[0]*dataB[0]; + if( rows % 2 == 0 ) + dataC[(rows-1)*stepC] = dataA[(rows-1)*stepA]*dataB[(rows-1)*stepB]; + if( !conjB ) + for( j = 1; j <= rows - 2; j += 2 ) + { + double re = dataA[j*stepA]*dataB[j*stepB] - + dataA[(j+1)*stepA]*dataB[(j+1)*stepB]; + double im = dataA[j*stepA]*dataB[(j+1)*stepB] + + dataA[(j+1)*stepA]*dataB[j*stepB]; + dataC[j*stepC] = re; dataC[(j+1)*stepC] = im; + } + else + for( j = 1; j <= rows - 2; j += 2 ) + { + double re = dataA[j*stepA]*dataB[j*stepB] + + dataA[(j+1)*stepA]*dataB[(j+1)*stepB]; + double im = dataA[(j+1)*stepA]*dataB[j*stepB] - + dataA[j*stepA]*dataB[(j+1)*stepB]; + dataC[j*stepC] = re; dataC[(j+1)*stepC] = im; + } + if( k == 1 ) + dataA -= cols - 1, dataB -= cols - 1, dataC -= cols - 1; + } + } + + for( ; rows--; dataA += stepA, dataB += stepB, dataC += stepC ) + { + if( is_1d && cn == 1 ) + { + dataC[0] = dataA[0]*dataB[0]; + if( cols % 2 == 0 ) + dataC[j1] = dataA[j1]*dataB[j1]; + } + + if( !conjB ) + for( j = j0; j < j1; j += 2 ) + { + double re = dataA[j]*dataB[j] - dataA[j+1]*dataB[j+1]; + double im = dataA[j+1]*dataB[j] + dataA[j]*dataB[j+1]; + dataC[j] = re; dataC[j+1] = im; + } + else + for( j = j0; j < j1; j += 2 ) + { + double re = dataA[j]*dataB[j] + dataA[j+1]*dataB[j+1]; + double im = dataA[j+1]*dataB[j] - dataA[j]*dataB[j+1]; + dataC[j] = re; dataC[j+1] = im; + } + } + } +} + + +/****************************************************************************************\ + Discrete Cosine Transform +\****************************************************************************************/ + +namespace cv +{ + +/* DCT is calculated using DFT, as described here: + http://www.ece.utexas.edu/~bevans/courses/ee381k/lectures/09_DCT/lecture9/: +*/ +template static void +DCT( const T* src, int src_step, T* dft_src, T* dft_dst, T* dst, int dst_step, + int n, int nf, int* factors, const int* itab, const Complex* dft_wave, + const Complex* dct_wave, const void* spec, Complex* buf ) +{ + static const T sin_45 = (T)0.70710678118654752440084436210485; + int j, n2 = n >> 1; + + src_step /= sizeof(src[0]); + dst_step /= sizeof(dst[0]); + T* dst1 = dst + (n-1)*dst_step; + + if( n == 1 ) + { + dst[0] = src[0]; + return; + } + + for( j = 0; j < n2; j++, src += src_step*2 ) + { + dft_src[j] = src[0]; + dft_src[n-j-1] = src[src_step]; + } + + RealDFT( dft_src, dft_dst, n, nf, factors, + itab, dft_wave, n, spec, buf, 0, 1.0 ); + src = dft_dst; + + dst[0] = (T)(src[0]*dct_wave->re*sin_45); + dst += dst_step; + for( j = 1, dct_wave++; j < n2; j++, dct_wave++, + dst += dst_step, dst1 -= dst_step ) + { + T t0 = dct_wave->re*src[j*2-1] - dct_wave->im*src[j*2]; + T t1 = -dct_wave->im*src[j*2-1] - dct_wave->re*src[j*2]; + dst[0] = t0; + dst1[0] = t1; + } + + dst[0] = src[n-1]*dct_wave->re; +} + + +template static void +IDCT( const T* src, int src_step, T* dft_src, T* dft_dst, T* dst, int dst_step, + int n, int nf, int* factors, const int* itab, const Complex* dft_wave, + const Complex* dct_wave, const void* spec, Complex* buf ) +{ + static const T sin_45 = (T)0.70710678118654752440084436210485; + int j, n2 = n >> 1; + + src_step /= sizeof(src[0]); + dst_step /= sizeof(dst[0]); + const T* src1 = src + (n-1)*src_step; + + if( n == 1 ) + { + dst[0] = src[0]; + return; + } + + dft_src[0] = (T)(src[0]*2*dct_wave->re*sin_45); + src += src_step; + for( j = 1, dct_wave++; j < n2; j++, dct_wave++, + src += src_step, src1 -= src_step ) + { + T t0 = dct_wave->re*src[0] - dct_wave->im*src1[0]; + T t1 = -dct_wave->im*src[0] - dct_wave->re*src1[0]; + dft_src[j*2-1] = t0; + dft_src[j*2] = t1; + } + + dft_src[n-1] = (T)(src[0]*2*dct_wave->re); + CCSIDFT( dft_src, dft_dst, n, nf, factors, itab, + dft_wave, n, spec, buf, 0, 1.0 ); + + for( j = 0; j < n2; j++, dst += dst_step*2 ) + { + dst[0] = dft_dst[j]; + dst[dst_step] = dft_dst[n-j-1]; + } +} + + +static void +DCTInit( int n, int elem_size, void* _wave, int inv ) +{ + static const double DctScale[] = + { + 0.707106781186547570, 0.500000000000000000, 0.353553390593273790, + 0.250000000000000000, 0.176776695296636890, 0.125000000000000000, + 0.088388347648318447, 0.062500000000000000, 0.044194173824159223, + 0.031250000000000000, 0.022097086912079612, 0.015625000000000000, + 0.011048543456039806, 0.007812500000000000, 0.005524271728019903, + 0.003906250000000000, 0.002762135864009952, 0.001953125000000000, + 0.001381067932004976, 0.000976562500000000, 0.000690533966002488, + 0.000488281250000000, 0.000345266983001244, 0.000244140625000000, + 0.000172633491500622, 0.000122070312500000, 0.000086316745750311, + 0.000061035156250000, 0.000043158372875155, 0.000030517578125000 + }; + + int i; + Complex w, w1; + double t, scale; + + if( n == 1 ) + return; + + assert( (n&1) == 0 ); + + if( (n & (n - 1)) == 0 ) + { + int m; + for( m = 0; (unsigned)(1 << m) < (unsigned)n; m++ ) + ; + scale = (!inv ? 2 : 1)*DctScale[m]; + w1.re = DFTTab[m+2][0]; + w1.im = -DFTTab[m+2][1]; + } + else + { + t = 1./(2*n); + scale = (!inv ? 2 : 1)*std::sqrt(t); + w1.im = sin(-CV_PI*t); + w1.re = std::sqrt(1. - w1.im*w1.im); + } + n >>= 1; + + if( elem_size == sizeof(Complex) ) + { + Complex* wave = (Complex*)_wave; + + w.re = scale; + w.im = 0.; + + for( i = 0; i <= n; i++ ) + { + wave[i] = w; + t = w.re*w1.re - w.im*w1.im; + w.im = w.re*w1.im + w.im*w1.re; + w.re = t; + } + } + else + { + Complex* wave = (Complex*)_wave; + assert( elem_size == sizeof(Complex) ); + + w.re = (float)scale; + w.im = 0.f; + + for( i = 0; i <= n; i++ ) + { + wave[i].re = (float)w.re; + wave[i].im = (float)w.im; + t = w.re*w1.re - w.im*w1.im; + w.im = w.re*w1.im + w.im*w1.re; + w.re = t; + } + } +} + + +typedef void (*DCTFunc)(const void* src, int src_step, void* dft_src, + void* dft_dst, void* dst, int dst_step, int n, + int nf, int* factors, const int* itab, const void* dft_wave, + const void* dct_wave, const void* spec, void* buf ); + +static void DCT_32f(const float* src, int src_step, float* dft_src, float* dft_dst, + float* dst, int dst_step, int n, int nf, int* factors, const int* itab, + const Complexf* dft_wave, const Complexf* dct_wave, const void* spec, Complexf* buf ) +{ + DCT(src, src_step, dft_src, dft_dst, dst, dst_step, + n, nf, factors, itab, dft_wave, dct_wave, spec, buf); +} + +static void IDCT_32f(const float* src, int src_step, float* dft_src, float* dft_dst, + float* dst, int dst_step, int n, int nf, int* factors, const int* itab, + const Complexf* dft_wave, const Complexf* dct_wave, const void* spec, Complexf* buf ) +{ + IDCT(src, src_step, dft_src, dft_dst, dst, dst_step, + n, nf, factors, itab, dft_wave, dct_wave, spec, buf); +} + +static void DCT_64f(const double* src, int src_step, double* dft_src, double* dft_dst, + double* dst, int dst_step, int n, int nf, int* factors, const int* itab, + const Complexd* dft_wave, const Complexd* dct_wave, const void* spec, Complexd* buf ) +{ + DCT(src, src_step, dft_src, dft_dst, dst, dst_step, + n, nf, factors, itab, dft_wave, dct_wave, spec, buf); +} + +static void IDCT_64f(const double* src, int src_step, double* dft_src, double* dft_dst, + double* dst, int dst_step, int n, int nf, int* factors, const int* itab, + const Complexd* dft_wave, const Complexd* dct_wave, const void* spec, Complexd* buf ) +{ + IDCT(src, src_step, dft_src, dft_dst, dst, dst_step, + n, nf, factors, itab, dft_wave, dct_wave, spec, buf); +} + +} + +void cv::dct( InputArray _src0, OutputArray _dst, int flags ) +{ + static DCTFunc dct_tbl[4] = + { + (DCTFunc)DCT_32f, + (DCTFunc)IDCT_32f, + (DCTFunc)DCT_64f, + (DCTFunc)IDCT_64f + }; + + bool inv = (flags & DCT_INVERSE) != 0; + Mat src0 = _src0.getMat(), src = src0; + int type = src.type(), depth = src.depth(); + void /* *spec_dft = 0, */ *spec = 0; + + double scale = 1.; + int prev_len = 0, nf = 0, stage, end_stage; + uchar *src_dft_buf = 0, *dst_dft_buf = 0; + uchar *dft_wave = 0, *dct_wave = 0; + int* itab = 0; + uchar* ptr = 0; + int elem_size = (int)src.elemSize(), complex_elem_size = elem_size*2; + int factors[34], inplace_transform; + int i, len, count; + AutoBuffer buf; + + CV_Assert( type == CV_32FC1 || type == CV_64FC1 ); + _dst.create( src.rows, src.cols, type ); + Mat dst = _dst.getMat(); + + DCTFunc dct_func = dct_tbl[inv + (depth == CV_64F)*2]; + + if( (flags & DFT_ROWS) || src.rows == 1 || + (src.cols == 1 && (src.isContinuous() && dst.isContinuous()))) + { + stage = end_stage = 0; + } + else + { + stage = src.cols == 1; + end_stage = 1; + } + + for( ; stage <= end_stage; stage++ ) + { + uchar *sptr = src.data, *dptr = dst.data; + size_t sstep0, sstep1, dstep0, dstep1; + + if( stage == 0 ) + { + len = src.cols; + count = src.rows; + if( len == 1 && !(flags & DFT_ROWS) ) + { + len = src.rows; + count = 1; + } + sstep0 = src.step; + dstep0 = dst.step; + sstep1 = dstep1 = elem_size; + } + else + { + len = dst.rows; + count = dst.cols; + sstep1 = src.step; + dstep1 = dst.step; + sstep0 = dstep0 = elem_size; + } + + if( len != prev_len ) + { + int sz; + + if( len > 1 && (len & 1) ) + CV_Error( CV_StsNotImplemented, "Odd-size DCT\'s are not implemented" ); + + sz = len*elem_size; + sz += (len/2 + 1)*complex_elem_size; + + spec = 0; + inplace_transform = 1; + /*if( len*count >= 64 && DFTInitAlloc_R_32f_p ) + { + int ipp_sz = 0; + if( depth == CV_32F ) + { + if( spec_dft ) + IPPI_CALL( DFTFree_R_32f_p( spec_dft )); + IPPI_CALL( DFTInitAlloc_R_32f_p( &spec_dft, len, 8, cvAlgHintNone )); + IPPI_CALL( DFTGetBufSize_R_32f_p( spec_dft, &ipp_sz )); + } + else + { + if( spec_dft ) + IPPI_CALL( DFTFree_R_64f_p( spec_dft )); + IPPI_CALL( DFTInitAlloc_R_64f_p( &spec_dft, len, 8, cvAlgHintNone )); + IPPI_CALL( DFTGetBufSize_R_64f_p( spec_dft, &ipp_sz )); + } + spec = spec_dft; + sz += ipp_sz; + } + else*/ + { + sz += len*(complex_elem_size + sizeof(int)) + complex_elem_size; + + nf = DFTFactorize( len, factors ); + inplace_transform = factors[0] == factors[nf-1]; + + i = nf > 1 && (factors[0] & 1) == 0; + if( (factors[i] & 1) != 0 && factors[i] > 5 ) + sz += (factors[i]+1)*complex_elem_size; + + if( !inplace_transform ) + sz += len*elem_size; + } + + buf.allocate( sz + 32 ); + ptr = (uchar*)buf; + + if( !spec ) + { + dft_wave = ptr; + ptr += len*complex_elem_size; + itab = (int*)ptr; + ptr = (uchar*)cvAlignPtr( ptr + len*sizeof(int), 16 ); + DFTInit( len, nf, factors, itab, complex_elem_size, dft_wave, inv ); + } + + dct_wave = ptr; + ptr += (len/2 + 1)*complex_elem_size; + src_dft_buf = dst_dft_buf = ptr; + ptr += len*elem_size; + if( !inplace_transform ) + { + dst_dft_buf = ptr; + ptr += len*elem_size; + } + DCTInit( len, complex_elem_size, dct_wave, inv ); + if( !inv ) + scale += scale; + prev_len = len; + } + // otherwise reuse the tables calculated on the previous stage + for( i = 0; i < count; i++ ) + { + dct_func( sptr + i*sstep0, (int)sstep1, src_dft_buf, dst_dft_buf, + dptr + i*dstep0, (int)dstep1, len, nf, factors, + itab, dft_wave, dct_wave, spec, ptr ); + } + src = dst; + } +} + + +void cv::idct( InputArray src, OutputArray dst, int flags ) +{ + dct( src, dst, flags | DCT_INVERSE ); +} + +namespace cv +{ + +static const int optimalDFTSizeTab[] = { +1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, 25, 27, 30, 32, 36, 40, 45, 48, +50, 54, 60, 64, 72, 75, 80, 81, 90, 96, 100, 108, 120, 125, 128, 135, 144, 150, 160, +162, 180, 192, 200, 216, 225, 240, 243, 250, 256, 270, 288, 300, 320, 324, 360, 375, +384, 400, 405, 432, 450, 480, 486, 500, 512, 540, 576, 600, 625, 640, 648, 675, 720, +729, 750, 768, 800, 810, 864, 900, 960, 972, 1000, 1024, 1080, 1125, 1152, 1200, +1215, 1250, 1280, 1296, 1350, 1440, 1458, 1500, 1536, 1600, 1620, 1728, 1800, 1875, +1920, 1944, 2000, 2025, 2048, 2160, 2187, 2250, 2304, 2400, 2430, 2500, 2560, 2592, +2700, 2880, 2916, 3000, 3072, 3125, 3200, 3240, 3375, 3456, 3600, 3645, 3750, 3840, +3888, 4000, 4050, 4096, 4320, 4374, 4500, 4608, 4800, 4860, 5000, 5120, 5184, 5400, +5625, 5760, 5832, 6000, 6075, 6144, 6250, 6400, 6480, 6561, 6750, 6912, 7200, 7290, +7500, 7680, 7776, 8000, 8100, 8192, 8640, 8748, 9000, 9216, 9375, 9600, 9720, 10000, +10125, 10240, 10368, 10800, 10935, 11250, 11520, 11664, 12000, 12150, 12288, 12500, +12800, 12960, 13122, 13500, 13824, 14400, 14580, 15000, 15360, 15552, 15625, 16000, +16200, 16384, 16875, 17280, 17496, 18000, 18225, 18432, 18750, 19200, 19440, 19683, +20000, 20250, 20480, 20736, 21600, 21870, 22500, 23040, 23328, 24000, 24300, 24576, +25000, 25600, 25920, 26244, 27000, 27648, 28125, 28800, 29160, 30000, 30375, 30720, +31104, 31250, 32000, 32400, 32768, 32805, 33750, 34560, 34992, 36000, 36450, 36864, +37500, 38400, 38880, 39366, 40000, 40500, 40960, 41472, 43200, 43740, 45000, 46080, +46656, 46875, 48000, 48600, 49152, 50000, 50625, 51200, 51840, 52488, 54000, 54675, +55296, 56250, 57600, 58320, 59049, 60000, 60750, 61440, 62208, 62500, 64000, 64800, +65536, 65610, 67500, 69120, 69984, 72000, 72900, 73728, 75000, 76800, 77760, 78125, +78732, 80000, 81000, 81920, 82944, 84375, 86400, 87480, 90000, 91125, 92160, 93312, +93750, 96000, 97200, 98304, 98415, 100000, 101250, 102400, 103680, 104976, 108000, +109350, 110592, 112500, 115200, 116640, 118098, 120000, 121500, 122880, 124416, 125000, +128000, 129600, 131072, 131220, 135000, 138240, 139968, 140625, 144000, 145800, 147456, +150000, 151875, 153600, 155520, 156250, 157464, 160000, 162000, 163840, 164025, 165888, +168750, 172800, 174960, 177147, 180000, 182250, 184320, 186624, 187500, 192000, 194400, +196608, 196830, 200000, 202500, 204800, 207360, 209952, 216000, 218700, 221184, 225000, +230400, 233280, 234375, 236196, 240000, 243000, 245760, 248832, 250000, 253125, 256000, +259200, 262144, 262440, 270000, 273375, 276480, 279936, 281250, 288000, 291600, 294912, +295245, 300000, 303750, 307200, 311040, 312500, 314928, 320000, 324000, 327680, 328050, +331776, 337500, 345600, 349920, 354294, 360000, 364500, 368640, 373248, 375000, 384000, +388800, 390625, 393216, 393660, 400000, 405000, 409600, 414720, 419904, 421875, 432000, +437400, 442368, 450000, 455625, 460800, 466560, 468750, 472392, 480000, 486000, 491520, +492075, 497664, 500000, 506250, 512000, 518400, 524288, 524880, 531441, 540000, 546750, +552960, 559872, 562500, 576000, 583200, 589824, 590490, 600000, 607500, 614400, 622080, +625000, 629856, 640000, 648000, 655360, 656100, 663552, 675000, 691200, 699840, 703125, +708588, 720000, 729000, 737280, 746496, 750000, 759375, 768000, 777600, 781250, 786432, +787320, 800000, 810000, 819200, 820125, 829440, 839808, 843750, 864000, 874800, 884736, +885735, 900000, 911250, 921600, 933120, 937500, 944784, 960000, 972000, 983040, 984150, +995328, 1000000, 1012500, 1024000, 1036800, 1048576, 1049760, 1062882, 1080000, 1093500, +1105920, 1119744, 1125000, 1152000, 1166400, 1171875, 1179648, 1180980, 1200000, +1215000, 1228800, 1244160, 1250000, 1259712, 1265625, 1280000, 1296000, 1310720, +1312200, 1327104, 1350000, 1366875, 1382400, 1399680, 1406250, 1417176, 1440000, +1458000, 1474560, 1476225, 1492992, 1500000, 1518750, 1536000, 1555200, 1562500, +1572864, 1574640, 1594323, 1600000, 1620000, 1638400, 1640250, 1658880, 1679616, +1687500, 1728000, 1749600, 1769472, 1771470, 1800000, 1822500, 1843200, 1866240, +1875000, 1889568, 1920000, 1944000, 1953125, 1966080, 1968300, 1990656, 2000000, +2025000, 2048000, 2073600, 2097152, 2099520, 2109375, 2125764, 2160000, 2187000, +2211840, 2239488, 2250000, 2278125, 2304000, 2332800, 2343750, 2359296, 2361960, +2400000, 2430000, 2457600, 2460375, 2488320, 2500000, 2519424, 2531250, 2560000, +2592000, 2621440, 2624400, 2654208, 2657205, 2700000, 2733750, 2764800, 2799360, +2812500, 2834352, 2880000, 2916000, 2949120, 2952450, 2985984, 3000000, 3037500, +3072000, 3110400, 3125000, 3145728, 3149280, 3188646, 3200000, 3240000, 3276800, +3280500, 3317760, 3359232, 3375000, 3456000, 3499200, 3515625, 3538944, 3542940, +3600000, 3645000, 3686400, 3732480, 3750000, 3779136, 3796875, 3840000, 3888000, +3906250, 3932160, 3936600, 3981312, 4000000, 4050000, 4096000, 4100625, 4147200, +4194304, 4199040, 4218750, 4251528, 4320000, 4374000, 4423680, 4428675, 4478976, +4500000, 4556250, 4608000, 4665600, 4687500, 4718592, 4723920, 4782969, 4800000, +4860000, 4915200, 4920750, 4976640, 5000000, 5038848, 5062500, 5120000, 5184000, +5242880, 5248800, 5308416, 5314410, 5400000, 5467500, 5529600, 5598720, 5625000, +5668704, 5760000, 5832000, 5859375, 5898240, 5904900, 5971968, 6000000, 6075000, +6144000, 6220800, 6250000, 6291456, 6298560, 6328125, 6377292, 6400000, 6480000, +6553600, 6561000, 6635520, 6718464, 6750000, 6834375, 6912000, 6998400, 7031250, +7077888, 7085880, 7200000, 7290000, 7372800, 7381125, 7464960, 7500000, 7558272, +7593750, 7680000, 7776000, 7812500, 7864320, 7873200, 7962624, 7971615, 8000000, +8100000, 8192000, 8201250, 8294400, 8388608, 8398080, 8437500, 8503056, 8640000, +8748000, 8847360, 8857350, 8957952, 9000000, 9112500, 9216000, 9331200, 9375000, +9437184, 9447840, 9565938, 9600000, 9720000, 9765625, 9830400, 9841500, 9953280, +10000000, 10077696, 10125000, 10240000, 10368000, 10485760, 10497600, 10546875, 10616832, +10628820, 10800000, 10935000, 11059200, 11197440, 11250000, 11337408, 11390625, 11520000, +11664000, 11718750, 11796480, 11809800, 11943936, 12000000, 12150000, 12288000, 12301875, +12441600, 12500000, 12582912, 12597120, 12656250, 12754584, 12800000, 12960000, 13107200, +13122000, 13271040, 13286025, 13436928, 13500000, 13668750, 13824000, 13996800, 14062500, +14155776, 14171760, 14400000, 14580000, 14745600, 14762250, 14929920, 15000000, 15116544, +15187500, 15360000, 15552000, 15625000, 15728640, 15746400, 15925248, 15943230, 16000000, +16200000, 16384000, 16402500, 16588800, 16777216, 16796160, 16875000, 17006112, 17280000, +17496000, 17578125, 17694720, 17714700, 17915904, 18000000, 18225000, 18432000, 18662400, +18750000, 18874368, 18895680, 18984375, 19131876, 19200000, 19440000, 19531250, 19660800, +19683000, 19906560, 20000000, 20155392, 20250000, 20480000, 20503125, 20736000, 20971520, +20995200, 21093750, 21233664, 21257640, 21600000, 21870000, 22118400, 22143375, 22394880, +22500000, 22674816, 22781250, 23040000, 23328000, 23437500, 23592960, 23619600, 23887872, +23914845, 24000000, 24300000, 24576000, 24603750, 24883200, 25000000, 25165824, 25194240, +25312500, 25509168, 25600000, 25920000, 26214400, 26244000, 26542080, 26572050, 26873856, +27000000, 27337500, 27648000, 27993600, 28125000, 28311552, 28343520, 28800000, 29160000, +29296875, 29491200, 29524500, 29859840, 30000000, 30233088, 30375000, 30720000, 31104000, +31250000, 31457280, 31492800, 31640625, 31850496, 31886460, 32000000, 32400000, 32768000, +32805000, 33177600, 33554432, 33592320, 33750000, 34012224, 34171875, 34560000, 34992000, +35156250, 35389440, 35429400, 35831808, 36000000, 36450000, 36864000, 36905625, 37324800, +37500000, 37748736, 37791360, 37968750, 38263752, 38400000, 38880000, 39062500, 39321600, +39366000, 39813120, 39858075, 40000000, 40310784, 40500000, 40960000, 41006250, 41472000, +41943040, 41990400, 42187500, 42467328, 42515280, 43200000, 43740000, 44236800, 44286750, +44789760, 45000000, 45349632, 45562500, 46080000, 46656000, 46875000, 47185920, 47239200, +47775744, 47829690, 48000000, 48600000, 48828125, 49152000, 49207500, 49766400, 50000000, +50331648, 50388480, 50625000, 51018336, 51200000, 51840000, 52428800, 52488000, 52734375, +53084160, 53144100, 53747712, 54000000, 54675000, 55296000, 55987200, 56250000, 56623104, +56687040, 56953125, 57600000, 58320000, 58593750, 58982400, 59049000, 59719680, 60000000, +60466176, 60750000, 61440000, 61509375, 62208000, 62500000, 62914560, 62985600, 63281250, +63700992, 63772920, 64000000, 64800000, 65536000, 65610000, 66355200, 66430125, 67108864, +67184640, 67500000, 68024448, 68343750, 69120000, 69984000, 70312500, 70778880, 70858800, +71663616, 72000000, 72900000, 73728000, 73811250, 74649600, 75000000, 75497472, 75582720, +75937500, 76527504, 76800000, 77760000, 78125000, 78643200, 78732000, 79626240, 79716150, +80000000, 80621568, 81000000, 81920000, 82012500, 82944000, 83886080, 83980800, 84375000, +84934656, 85030560, 86400000, 87480000, 87890625, 88473600, 88573500, 89579520, 90000000, +90699264, 91125000, 92160000, 93312000, 93750000, 94371840, 94478400, 94921875, 95551488, +95659380, 96000000, 97200000, 97656250, 98304000, 98415000, 99532800, 100000000, +100663296, 100776960, 101250000, 102036672, 102400000, 102515625, 103680000, 104857600, +104976000, 105468750, 106168320, 106288200, 107495424, 108000000, 109350000, 110592000, +110716875, 111974400, 112500000, 113246208, 113374080, 113906250, 115200000, 116640000, +117187500, 117964800, 118098000, 119439360, 119574225, 120000000, 120932352, 121500000, +122880000, 123018750, 124416000, 125000000, 125829120, 125971200, 126562500, 127401984, +127545840, 128000000, 129600000, 131072000, 131220000, 132710400, 132860250, 134217728, +134369280, 135000000, 136048896, 136687500, 138240000, 139968000, 140625000, 141557760, +141717600, 143327232, 144000000, 145800000, 146484375, 147456000, 147622500, 149299200, +150000000, 150994944, 151165440, 151875000, 153055008, 153600000, 155520000, 156250000, +157286400, 157464000, 158203125, 159252480, 159432300, 160000000, 161243136, 162000000, +163840000, 164025000, 165888000, 167772160, 167961600, 168750000, 169869312, 170061120, +170859375, 172800000, 174960000, 175781250, 176947200, 177147000, 179159040, 180000000, +181398528, 182250000, 184320000, 184528125, 186624000, 187500000, 188743680, 188956800, +189843750, 191102976, 191318760, 192000000, 194400000, 195312500, 196608000, 196830000, +199065600, 199290375, 200000000, 201326592, 201553920, 202500000, 204073344, 204800000, +205031250, 207360000, 209715200, 209952000, 210937500, 212336640, 212576400, 214990848, +216000000, 218700000, 221184000, 221433750, 223948800, 225000000, 226492416, 226748160, +227812500, 230400000, 233280000, 234375000, 235929600, 236196000, 238878720, 239148450, +240000000, 241864704, 243000000, 244140625, 245760000, 246037500, 248832000, 250000000, +251658240, 251942400, 253125000, 254803968, 255091680, 256000000, 259200000, 262144000, +262440000, 263671875, 265420800, 265720500, 268435456, 268738560, 270000000, 272097792, +273375000, 276480000, 279936000, 281250000, 283115520, 283435200, 284765625, 286654464, +288000000, 291600000, 292968750, 294912000, 295245000, 298598400, 300000000, 301989888, +302330880, 303750000, 306110016, 307200000, 307546875, 311040000, 312500000, 314572800, +314928000, 316406250, 318504960, 318864600, 320000000, 322486272, 324000000, 327680000, +328050000, 331776000, 332150625, 335544320, 335923200, 337500000, 339738624, 340122240, +341718750, 345600000, 349920000, 351562500, 353894400, 354294000, 358318080, 360000000, +362797056, 364500000, 368640000, 369056250, 373248000, 375000000, 377487360, 377913600, +379687500, 382205952, 382637520, 384000000, 388800000, 390625000, 393216000, 393660000, +398131200, 398580750, 400000000, 402653184, 403107840, 405000000, 408146688, 409600000, +410062500, 414720000, 419430400, 419904000, 421875000, 424673280, 425152800, 429981696, +432000000, 437400000, 439453125, 442368000, 442867500, 447897600, 450000000, 452984832, +453496320, 455625000, 460800000, 466560000, 468750000, 471859200, 472392000, 474609375, +477757440, 478296900, 480000000, 483729408, 486000000, 488281250, 491520000, 492075000, +497664000, 500000000, 503316480, 503884800, 506250000, 509607936, 510183360, 512000000, +512578125, 518400000, 524288000, 524880000, 527343750, 530841600, 531441000, 536870912, +537477120, 540000000, 544195584, 546750000, 552960000, 553584375, 559872000, 562500000, +566231040, 566870400, 569531250, 573308928, 576000000, 583200000, 585937500, 589824000, +590490000, 597196800, 597871125, 600000000, 603979776, 604661760, 607500000, 612220032, +614400000, 615093750, 622080000, 625000000, 629145600, 629856000, 632812500, 637009920, +637729200, 640000000, 644972544, 648000000, 655360000, 656100000, 663552000, 664301250, +671088640, 671846400, 675000000, 679477248, 680244480, 683437500, 691200000, 699840000, +703125000, 707788800, 708588000, 716636160, 720000000, 725594112, 729000000, 732421875, +737280000, 738112500, 746496000, 750000000, 754974720, 755827200, 759375000, 764411904, +765275040, 768000000, 777600000, 781250000, 786432000, 787320000, 791015625, 796262400, +797161500, 800000000, 805306368, 806215680, 810000000, 816293376, 819200000, 820125000, +829440000, 838860800, 839808000, 843750000, 849346560, 850305600, 854296875, 859963392, +864000000, 874800000, 878906250, 884736000, 885735000, 895795200, 900000000, 905969664, +906992640, 911250000, 921600000, 922640625, 933120000, 937500000, 943718400, 944784000, +949218750, 955514880, 956593800, 960000000, 967458816, 972000000, 976562500, 983040000, +984150000, 995328000, 996451875, 1000000000, 1006632960, 1007769600, 1012500000, +1019215872, 1020366720, 1024000000, 1025156250, 1036800000, 1048576000, 1049760000, +1054687500, 1061683200, 1062882000, 1073741824, 1074954240, 1080000000, 1088391168, +1093500000, 1105920000, 1107168750, 1119744000, 1125000000, 1132462080, 1133740800, +1139062500, 1146617856, 1152000000, 1166400000, 1171875000, 1179648000, 1180980000, +1194393600, 1195742250, 1200000000, 1207959552, 1209323520, 1215000000, 1220703125, +1224440064, 1228800000, 1230187500, 1244160000, 1250000000, 1258291200, 1259712000, +1265625000, 1274019840, 1275458400, 1280000000, 1289945088, 1296000000, 1310720000, +1312200000, 1318359375, 1327104000, 1328602500, 1342177280, 1343692800, 1350000000, +1358954496, 1360488960, 1366875000, 1382400000, 1399680000, 1406250000, 1415577600, +1417176000, 1423828125, 1433272320, 1440000000, 1451188224, 1458000000, 1464843750, +1474560000, 1476225000, 1492992000, 1500000000, 1509949440, 1511654400, 1518750000, +1528823808, 1530550080, 1536000000, 1537734375, 1555200000, 1562500000, 1572864000, +1574640000, 1582031250, 1592524800, 1594323000, 1600000000, 1610612736, 1612431360, +1620000000, 1632586752, 1638400000, 1640250000, 1658880000, 1660753125, 1677721600, +1679616000, 1687500000, 1698693120, 1700611200, 1708593750, 1719926784, 1728000000, +1749600000, 1757812500, 1769472000, 1771470000, 1791590400, 1800000000, 1811939328, +1813985280, 1822500000, 1843200000, 1845281250, 1866240000, 1875000000, 1887436800, +1889568000, 1898437500, 1911029760, 1913187600, 1920000000, 1934917632, 1944000000, +1953125000, 1966080000, 1968300000, 1990656000, 1992903750, 2000000000, 2013265920, +2015539200, 2025000000, 2038431744, 2040733440, 2048000000, 2050312500, 2073600000, +2097152000, 2099520000, 2109375000, 2123366400, 2125764000 +}; + +} + +int cv::getOptimalDFTSize( int size0 ) +{ + int a = 0, b = sizeof(optimalDFTSizeTab)/sizeof(optimalDFTSizeTab[0]) - 1; + if( (unsigned)size0 >= (unsigned)optimalDFTSizeTab[b] ) + return -1; + + while( a < b ) + { + int c = (a + b) >> 1; + if( size0 <= optimalDFTSizeTab[c] ) + b = c; + else + a = c+1; + } + + return optimalDFTSizeTab[b]; +} + +CV_IMPL void +cvDFT( const CvArr* srcarr, CvArr* dstarr, int flags, int nonzero_rows ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst0 = cv::cvarrToMat(dstarr), dst = dst0; + int _flags = ((flags & CV_DXT_INVERSE) ? cv::DFT_INVERSE : 0) | + ((flags & CV_DXT_SCALE) ? cv::DFT_SCALE : 0) | + ((flags & CV_DXT_ROWS) ? cv::DFT_ROWS : 0); + + CV_Assert( src.size == dst.size ); + + if( src.type() != dst.type() ) + { + if( dst.channels() == 2 ) + _flags |= cv::DFT_COMPLEX_OUTPUT; + else + _flags |= cv::DFT_REAL_OUTPUT; + } + + cv::dft( src, dst, _flags, nonzero_rows ); + CV_Assert( dst.data == dst0.data ); // otherwise it means that the destination size or type was incorrect +} + + +CV_IMPL void +cvMulSpectrums( const CvArr* srcAarr, const CvArr* srcBarr, + CvArr* dstarr, int flags ) +{ + cv::Mat srcA = cv::cvarrToMat(srcAarr), + srcB = cv::cvarrToMat(srcBarr), + dst = cv::cvarrToMat(dstarr); + CV_Assert( srcA.size == dst.size && srcA.type() == dst.type() ); + + cv::mulSpectrums(srcA, srcB, dst, + (flags & CV_DXT_ROWS) ? cv::DFT_ROWS : 0, + (flags & CV_DXT_MUL_CONJ) != 0 ); +} + + +CV_IMPL void +cvDCT( const CvArr* srcarr, CvArr* dstarr, int flags ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + CV_Assert( src.size == dst.size && src.type() == dst.type() ); + int _flags = ((flags & CV_DXT_INVERSE) ? cv::DCT_INVERSE : 0) | + ((flags & CV_DXT_ROWS) ? cv::DCT_ROWS : 0); + cv::dct( src, dst, _flags ); +} + + +CV_IMPL int +cvGetOptimalDFTSize( int size0 ) +{ + return cv::getOptimalDFTSize(size0); +} + +/* End of file. */ diff --git a/opencv/core/lapack.cpp b/opencv/core/lapack.cpp new file mode 100644 index 0000000..c88125e --- /dev/null +++ b/opencv/core/lapack.cpp @@ -0,0 +1,1765 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +namespace cv +{ + +/****************************************************************************************\ +* LU & Cholesky implementation for small matrices * +\****************************************************************************************/ + +template static inline int +LUImpl(_Tp* A, size_t astep, int m, _Tp* b, size_t bstep, int n) +{ + int i, j, k, p = 1; + astep /= sizeof(A[0]); + bstep /= sizeof(b[0]); + + for( i = 0; i < m; i++ ) + { + k = i; + + for( j = i+1; j < m; j++ ) + if( std::abs(A[j*astep + i]) > std::abs(A[k*astep + i]) ) + k = j; + + if( std::abs(A[k*astep + i]) < std::numeric_limits<_Tp>::epsilon() ) + return 0; + + if( k != i ) + { + for( j = i; j < m; j++ ) + std::swap(A[i*astep + j], A[k*astep + j]); + if( b ) + for( j = 0; j < n; j++ ) + std::swap(b[i*bstep + j], b[k*bstep + j]); + p = -p; + } + + _Tp d = -1/A[i*astep + i]; + + for( j = i+1; j < m; j++ ) + { + _Tp alpha = A[j*astep + i]*d; + + for( k = i+1; k < m; k++ ) + A[j*astep + k] += alpha*A[i*astep + k]; + + if( b ) + for( k = 0; k < n; k++ ) + b[j*bstep + k] += alpha*b[i*bstep + k]; + } + + A[i*astep + i] = -d; + } + + if( b ) + { + for( i = m-1; i >= 0; i-- ) + for( j = 0; j < n; j++ ) + { + _Tp s = b[i*bstep + j]; + for( k = i+1; k < m; k++ ) + s -= A[i*astep + k]*b[k*bstep + j]; + b[i*bstep + j] = s*A[i*astep + i]; + } + } + + return p; +} + + +int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n) +{ + return LUImpl(A, astep, m, b, bstep, n); +} + + +int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n) +{ + return LUImpl(A, astep, m, b, bstep, n); +} + + +template static inline bool +CholImpl(_Tp* A, size_t astep, int m, _Tp* b, size_t bstep, int n) +{ + _Tp* L = A; + int i, j, k; + double s; + astep /= sizeof(A[0]); + bstep /= sizeof(b[0]); + + for( i = 0; i < m; i++ ) + { + for( j = 0; j < i; j++ ) + { + s = A[i*astep + j]; + for( k = 0; k < j; k++ ) + s -= L[i*astep + k]*L[j*astep + k]; + L[i*astep + j] = (_Tp)(s*L[j*astep + j]); + } + s = A[i*astep + i]; + for( k = 0; k < j; k++ ) + { + double t = L[i*astep + k]; + s -= t*t; + } + if( s < std::numeric_limits<_Tp>::epsilon() ) + return false; + L[i*astep + i] = (_Tp)(1./std::sqrt(s)); + } + + if( !b ) + return true; + + // LLt x = b + // 1: L y = b + // 2. Lt x = y + + /* + [ L00 ] y0 b0 + [ L10 L11 ] y1 = b1 + [ L20 L21 L22 ] y2 b2 + [ L30 L31 L32 L33 ] y3 b3 + + [ L00 L10 L20 L30 ] x0 y0 + [ L11 L21 L31 ] x1 = y1 + [ L22 L32 ] x2 y2 + [ L33 ] x3 y3 + */ + + for( i = 0; i < m; i++ ) + { + for( j = 0; j < n; j++ ) + { + s = b[i*bstep + j]; + for( k = 0; k < i; k++ ) + s -= L[i*astep + k]*b[k*bstep + j]; + b[i*bstep + j] = (_Tp)(s*L[i*astep + i]); + } + } + + for( i = m-1; i >= 0; i-- ) + { + for( j = 0; j < n; j++ ) + { + s = b[i*bstep + j]; + for( k = m-1; k > i; k-- ) + s -= L[k*astep + i]*b[k*bstep + j]; + b[i*bstep + j] = (_Tp)(s*L[i*astep + i]); + } + } + + return true; +} + + +bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n) +{ + return CholImpl(A, astep, m, b, bstep, n); +} + +bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n) +{ + return CholImpl(A, astep, m, b, bstep, n); +} + + +template static inline _Tp hypot(_Tp a, _Tp b) +{ + a = std::abs(a); + b = std::abs(b); + if( a > b ) + { + b /= a; + return a*std::sqrt(1 + b*b); + } + if( b > 0 ) + { + a /= b; + return b*std::sqrt(1 + a*a); + } + return 0; +} + + +template bool +JacobiImpl_( _Tp* A, size_t astep, _Tp* W, _Tp* V, size_t vstep, int n, uchar* buf ) +{ + const _Tp eps = std::numeric_limits<_Tp>::epsilon(); + int i, j, k, m; + + astep /= sizeof(A[0]); + if( V ) + { + vstep /= sizeof(V[0]); + for( i = 0; i < n; i++ ) + { + for( j = 0; j < n; j++ ) + V[i*vstep + j] = (_Tp)0; + V[i*vstep + i] = (_Tp)1; + } + } + + int iters, maxIters = n*n*30; + + _Tp* maxSR = (_Tp*)alignPtr(buf, sizeof(_Tp)); + _Tp* maxSC = maxSR + n; + int* indR = (int*)(maxSC + n); + int* indC = indR + n; + _Tp mv = (_Tp)0; + + for( k = 0; k < n; k++ ) + { + W[k] = A[(astep + 1)*k]; + if( k < n - 1 ) + { + for( m = k+1, mv = std::abs(A[astep*k + m]), i = k+2; i < n; i++ ) + { + _Tp val = std::abs(A[astep*k+i]); + if( mv < val ) + mv = val, m = i; + } + maxSR[k] = mv; + indR[k] = m; + } + if( k > 0 ) + { + for( m = 0, mv = std::abs(A[k]), i = 1; i < k; i++ ) + { + _Tp val = std::abs(A[astep*i+k]); + if( mv < val ) + mv = val, m = i; + } + maxSC[k] = mv; + indC[k] = m; + } + } + + for( iters = 0; iters < maxIters; iters++ ) + { + // find index (k,l) of pivot p + for( k = 0, mv = maxSR[0], i = 1; i < n-1; i++ ) + { + _Tp val = maxSR[i]; + if( mv < val ) + mv = val, k = i; + } + int l = indR[k]; + for( i = 1; i < n; i++ ) + { + _Tp val = maxSC[i]; + if( mv < val ) + mv = val, k = indC[i], l = i; + } + + _Tp p = A[astep*k + l]; + if( std::abs(p) <= eps ) + break; + _Tp y = (_Tp)((W[l] - W[k])*0.5); + _Tp t = std::abs(y) + hypot(p, y); + _Tp s = hypot(p, t); + _Tp c = t/s; + s = p/s; t = (p/t)*p; + if( y < 0 ) + s = -s, t = -t; + A[astep*k + l] = 0; + + W[k] -= t; + W[l] += t; + + _Tp a0, b0; + +#undef rotate +#define rotate(v0, v1) a0 = v0, b0 = v1, v0 = a0*c - b0*s, v1 = a0*s + b0*c + + // rotate rows and columns k and l + for( i = 0; i < k; i++ ) + rotate(A[astep*i+k], A[astep*i+l]); + for( i = k+1; i < l; i++ ) + rotate(A[astep*k+i], A[astep*i+l]); + for( i = l+1; i < n; i++ ) + rotate(A[astep*k+i], A[astep*l+i]); + + // rotate eigenvectors + if( V ) + for( i = 0; i < n; i++ ) + rotate(V[vstep*k+i], V[vstep*l+i]); + +#undef rotate + + for( j = 0; j < 2; j++ ) + { + int idx = j == 0 ? k : l; + if( idx < n - 1 ) + { + for( m = idx+1, mv = std::abs(A[astep*idx + m]), i = idx+2; i < n; i++ ) + { + _Tp val = std::abs(A[astep*idx+i]); + if( mv < val ) + mv = val, m = i; + } + maxSR[idx] = mv; + indR[idx] = m; + } + if( idx > 0 ) + { + for( m = 0, mv = std::abs(A[idx]), i = 1; i < idx; i++ ) + { + _Tp val = std::abs(A[astep*i+idx]); + if( mv < val ) + mv = val, m = i; + } + maxSC[idx] = mv; + indC[idx] = m; + } + } + } + + // sort eigenvalues & eigenvectors + for( k = 0; k < n-1; k++ ) + { + m = k; + for( i = k+1; i < n; i++ ) + { + if( W[m] < W[i] ) + m = i; + } + if( k != m ) + { + std::swap(W[m], W[k]); + if( V ) + for( i = 0; i < n; i++ ) + std::swap(V[vstep*m + i], V[vstep*k + i]); + } + } + + return true; +} + +static bool Jacobi( float* S, size_t sstep, float* e, float* E, size_t estep, int n, uchar* buf ) +{ + return JacobiImpl_(S, sstep, e, E, estep, n, buf); +} + +static bool Jacobi( double* S, size_t sstep, double* e, double* E, size_t estep, int n, uchar* buf ) +{ + return JacobiImpl_(S, sstep, e, E, estep, n, buf); +} + + +template struct VBLAS +{ + int dot(const T*, const T*, int, T*) const { return 0; } + int givens(T*, T*, int, T, T) const { return 0; } + int givensx(T*, T*, int, T, T, T*, T*) const { return 0; } +}; + +#if CV_SSE2 +template<> inline int VBLAS::dot(const float* a, const float* b, int n, float* result) const +{ + if( n < 8 ) + return 0; + int k = 0; + __m128 s0 = _mm_setzero_ps(), s1 = _mm_setzero_ps(); + for( ; k <= n - 8; k += 8 ) + { + __m128 a0 = _mm_load_ps(a + k), a1 = _mm_load_ps(a + k + 4); + __m128 b0 = _mm_load_ps(b + k), b1 = _mm_load_ps(b + k + 4); + + s0 = _mm_add_ps(s0, _mm_mul_ps(a0, b0)); + s1 = _mm_add_ps(s1, _mm_mul_ps(a1, b1)); + } + s0 = _mm_add_ps(s0, s1); + float sbuf[4]; + _mm_storeu_ps(sbuf, s0); + *result = sbuf[0] + sbuf[1] + sbuf[2] + sbuf[3]; + return k; +} + + +template<> inline int VBLAS::givens(float* a, float* b, int n, float c, float s) const +{ + if( n < 4 ) + return 0; + int k = 0; + __m128 c4 = _mm_set1_ps(c), s4 = _mm_set1_ps(s); + for( ; k <= n - 4; k += 4 ) + { + __m128 a0 = _mm_load_ps(a + k); + __m128 b0 = _mm_load_ps(b + k); + __m128 t0 = _mm_add_ps(_mm_mul_ps(a0, c4), _mm_mul_ps(b0, s4)); + __m128 t1 = _mm_sub_ps(_mm_mul_ps(b0, c4), _mm_mul_ps(a0, s4)); + _mm_store_ps(a + k, t0); + _mm_store_ps(b + k, t1); + } + return k; +} + + +template<> inline int VBLAS::givensx(float* a, float* b, int n, float c, float s, + float* anorm, float* bnorm) const +{ + if( n < 4 ) + return 0; + int k = 0; + __m128 c4 = _mm_set1_ps(c), s4 = _mm_set1_ps(s); + __m128 sa = _mm_setzero_ps(), sb = _mm_setzero_ps(); + for( ; k <= n - 4; k += 4 ) + { + __m128 a0 = _mm_load_ps(a + k); + __m128 b0 = _mm_load_ps(b + k); + __m128 t0 = _mm_add_ps(_mm_mul_ps(a0, c4), _mm_mul_ps(b0, s4)); + __m128 t1 = _mm_sub_ps(_mm_mul_ps(b0, c4), _mm_mul_ps(a0, s4)); + _mm_store_ps(a + k, t0); + _mm_store_ps(b + k, t1); + sa = _mm_add_ps(sa, _mm_mul_ps(t0, t0)); + sb = _mm_add_ps(sb, _mm_mul_ps(t1, t1)); + } + float abuf[4], bbuf[4]; + _mm_storeu_ps(abuf, sa); + _mm_storeu_ps(bbuf, sb); + *anorm = abuf[0] + abuf[1] + abuf[2] + abuf[3]; + *bnorm = bbuf[0] + bbuf[1] + bbuf[2] + bbuf[3]; + return k; +} + + +template<> inline int VBLAS::dot(const double* a, const double* b, int n, double* result) const +{ + if( n < 4 ) + return 0; + int k = 0; + __m128d s0 = _mm_setzero_pd(), s1 = _mm_setzero_pd(); + for( ; k <= n - 4; k += 4 ) + { + __m128d a0 = _mm_load_pd(a + k), a1 = _mm_load_pd(a + k + 2); + __m128d b0 = _mm_load_pd(b + k), b1 = _mm_load_pd(b + k + 2); + + s0 = _mm_add_pd(s0, _mm_mul_pd(a0, b0)); + s1 = _mm_add_pd(s1, _mm_mul_pd(a1, b1)); + } + s0 = _mm_add_pd(s0, s1); + double sbuf[2]; + _mm_storeu_pd(sbuf, s0); + *result = sbuf[0] + sbuf[1]; + return k; +} + + +template<> inline int VBLAS::givens(double* a, double* b, int n, double c, double s) const +{ + int k = 0; + __m128d c2 = _mm_set1_pd(c), s2 = _mm_set1_pd(s); + for( ; k <= n - 2; k += 2 ) + { + __m128d a0 = _mm_load_pd(a + k); + __m128d b0 = _mm_load_pd(b + k); + __m128d t0 = _mm_add_pd(_mm_mul_pd(a0, c2), _mm_mul_pd(b0, s2)); + __m128d t1 = _mm_sub_pd(_mm_mul_pd(b0, c2), _mm_mul_pd(a0, s2)); + _mm_store_pd(a + k, t0); + _mm_store_pd(b + k, t1); + } + return k; +} + + +template<> inline int VBLAS::givensx(double* a, double* b, int n, double c, double s, + double* anorm, double* bnorm) const +{ + int k = 0; + __m128d c2 = _mm_set1_pd(c), s2 = _mm_set1_pd(s); + __m128d sa = _mm_setzero_pd(), sb = _mm_setzero_pd(); + for( ; k <= n - 2; k += 2 ) + { + __m128d a0 = _mm_load_pd(a + k); + __m128d b0 = _mm_load_pd(b + k); + __m128d t0 = _mm_add_pd(_mm_mul_pd(a0, c2), _mm_mul_pd(b0, s2)); + __m128d t1 = _mm_sub_pd(_mm_mul_pd(b0, c2), _mm_mul_pd(a0, s2)); + _mm_store_pd(a + k, t0); + _mm_store_pd(b + k, t1); + sa = _mm_add_pd(sa, _mm_mul_pd(t0, t0)); + sb = _mm_add_pd(sb, _mm_mul_pd(t1, t1)); + } + double abuf[2], bbuf[2]; + _mm_storeu_pd(abuf, sa); + _mm_storeu_pd(bbuf, sb); + *anorm = abuf[0] + abuf[1]; + *bnorm = bbuf[0] + bbuf[1]; + return k; +} +#endif + +template void +JacobiSVDImpl_(_Tp* At, size_t astep, _Tp* W, _Tp* Vt, size_t vstep, int m, int n, int n1) +{ + VBLAS<_Tp> vblas; + _Tp eps = std::numeric_limits<_Tp>::epsilon()*10; + int i, j, k, iter, max_iter = std::max(m, 30); + _Tp c, s; + double sd; + astep /= sizeof(At[0]); + vstep /= sizeof(Vt[0]); + + for( i = 0; i < n; i++ ) + { + for( k = 0, s = 0; k < m; k++ ) + { + _Tp t = At[i*astep + k]; + s += t*t; + } + W[i] = s; + + if( Vt ) + { + for( k = 0; k < n; k++ ) + Vt[i*vstep + k] = 0; + Vt[i*vstep + i] = 1; + } + } + + for( iter = 0; iter < max_iter; iter++ ) + { + bool changed = false; + + for( i = 0; i < n-1; i++ ) + for( j = i+1; j < n; j++ ) + { + _Tp *Ai = At + i*astep, *Aj = At + j*astep, a = W[i], p = 0, b = W[j]; + + k = vblas.dot(Ai, Aj, m, &p); + + for( ; k < m; k++ ) + p += Ai[k]*Aj[k]; + + if( std::abs(p) <= eps*std::sqrt((double)a*b) ) + continue; + + p *= 2; + double beta = a - b, gamma = hypot((double)p, beta), delta; + if( beta < 0 ) + { + delta = (_Tp)((gamma - beta)*0.5); + s = (_Tp)std::sqrt(delta/gamma); + c = (_Tp)(p/(gamma*s*2)); + } + else + { + c = (_Tp)std::sqrt((gamma + beta)/(gamma*2)); + s = (_Tp)(p/(gamma*c*2)); + delta = (_Tp)(p*p*0.5/(gamma + beta)); + } + + if( iter % 2 ) + { + W[i] = (_Tp)(W[i] + delta); + W[j] = (_Tp)(W[j] - delta); + + k = vblas.givens(Ai, Aj, m, c, s); + + for( ; k < m; k++ ) + { + _Tp t0 = c*Ai[k] + s*Aj[k]; + _Tp t1 = -s*Ai[k] + c*Aj[k]; + Ai[k] = t0; Aj[k] = t1; + } + } + else + { + a = b = 0; + k = vblas.givensx(Ai, Aj, m, c, s, &a, &b); + for( ; k < m; k++ ) + { + _Tp t0 = c*Ai[k] + s*Aj[k]; + _Tp t1 = -s*Ai[k] + c*Aj[k]; + Ai[k] = t0; Aj[k] = t1; + + a += t0*t0; b += t1*t1; + } + W[i] = a; W[j] = b; + } + + changed = true; + + if( Vt ) + { + _Tp *Vi = Vt + i*vstep, *Vj = Vt + j*vstep; + k = vblas.givens(Vi, Vj, n, c, s); + + for( ; k < n; k++ ) + { + _Tp t0 = c*Vi[k] + s*Vj[k]; + _Tp t1 = -s*Vi[k] + c*Vj[k]; + Vi[k] = t0; Vj[k] = t1; + } + } + } + if( !changed ) + break; + } + + for( i = 0; i < n; i++ ) + { + for( k = 0, sd = 0; k < m; k++ ) + { + _Tp t = At[i*astep + k]; + sd += (double)t*t; + } + W[i] = s = (_Tp)std::sqrt(sd); + } + + for( i = 0; i < n-1; i++ ) + { + j = i; + for( k = i+1; k < n; k++ ) + { + if( W[j] < W[k] ) + j = k; + } + if( i != j ) + { + std::swap(W[i], W[j]); + if( Vt ) + { + for( k = 0; k < m; k++ ) + std::swap(At[i*astep + k], At[j*astep + k]); + + for( k = 0; k < n; k++ ) + std::swap(Vt[i*vstep + k], Vt[j*vstep + k]); + } + } + } + + if( !Vt ) + return; + RNG rng; + for( i = 0; i < n1; i++ ) + { + s = i < n ? W[i] : 0; + + while( s == 0 ) + { + // if we got a zero singular value, then in order to get the corresponding left singular vector + // we generate a random vector, project it to the previously computed left singular vectors, + // subtract the projection and normalize the difference. + const _Tp val0 = (_Tp)(1./m); + for( k = 0; k < m; k++ ) + { + _Tp val = (rng.next() & 256) ? val0 : -val0; + At[i*astep + k] = val; + } + for( iter = 0; iter < 2; iter++ ) + { + for( j = 0; j < i; j++ ) + { + sd = 0; + for( k = 0; k < m; k++ ) + sd += At[i*astep + k]*At[j*astep + k]; + _Tp asum = 0; + for( k = 0; k < m; k++ ) + { + _Tp t = (_Tp)(At[i*astep + k] - sd*At[j*astep + k]); + At[i*astep + k] = t; + asum += std::abs(t); + } + asum = asum ? 1/asum : 0; + for( k = 0; k < m; k++ ) + At[i*astep + k] *= asum; + } + } + sd = 0; + for( k = 0; k < m; k++ ) + { + _Tp t = At[i*astep + k]; + sd += (double)t*t; + } + s = (_Tp)std::sqrt(sd); + } + + s = 1/s; + for( k = 0; k < m; k++ ) + At[i*astep + k] *= s; + } +} + + +static void JacobiSVD(float* At, size_t astep, float* W, float* Vt, size_t vstep, int m, int n, int n1=-1) +{ + JacobiSVDImpl_(At, astep, W, Vt, vstep, m, n, !Vt ? 0 : n1 < 0 ? n : n1); +} + +static void JacobiSVD(double* At, size_t astep, double* W, double* Vt, size_t vstep, int m, int n, int n1=-1) +{ + JacobiSVDImpl_(At, astep, W, Vt, vstep, m, n, !Vt ? 0 : n1 < 0 ? n : n1); +} + +/* y[0:m,0:n] += diag(a[0:1,0:m]) * x[0:m,0:n] */ +template static void +MatrAXPY( int m, int n, const T1* x, int dx, + const T2* a, int inca, T3* y, int dy ) +{ + int i, j; + for( i = 0; i < m; i++, x += dx, y += dy ) + { + T2 s = a[i*inca]; + for( j = 0; j <= n - 4; j += 4 ) + { + T3 t0 = (T3)(y[j] + s*x[j]); + T3 t1 = (T3)(y[j+1] + s*x[j+1]); + y[j] = t0; + y[j+1] = t1; + t0 = (T3)(y[j+2] + s*x[j+2]); + t1 = (T3)(y[j+3] + s*x[j+3]); + y[j+2] = t0; + y[j+3] = t1; + } + + for( ; j < n; j++ ) + y[j] = (T3)(y[j] + s*x[j]); + } +} + +template static void +SVBkSbImpl_( int m, int n, const T* w, int incw, + const T* u, int ldu, bool uT, + const T* v, int ldv, bool vT, + const T* b, int ldb, int nb, + T* x, int ldx, double* buffer, T eps ) +{ + double threshold = 0; + int udelta0 = uT ? ldu : 1, udelta1 = uT ? 1 : ldu; + int vdelta0 = vT ? ldv : 1, vdelta1 = vT ? 1 : ldv; + int i, j, nm = std::min(m, n); + + if( !b ) + nb = m; + + for( i = 0; i < n; i++ ) + for( j = 0; j < nb; j++ ) + x[i*ldx + j] = 0; + + for( i = 0; i < nm; i++ ) + threshold += w[i*incw]; + threshold *= eps; + + // v * inv(w) * uT * b + for( i = 0; i < nm; i++, u += udelta0, v += vdelta0 ) + { + double wi = w[i*incw]; + if( (double)std::abs(wi) <= threshold ) + continue; + wi = 1/wi; + + if( nb == 1 ) + { + double s = 0; + if( b ) + for( j = 0; j < m; j++ ) + s += u[j*udelta1]*b[j*ldb]; + else + s = u[0]; + s *= wi; + + for( j = 0; j < n; j++ ) + x[j*ldx] = (T)(x[j*ldx] + s*v[j*vdelta1]); + } + else + { + if( b ) + { + for( j = 0; j < nb; j++ ) + buffer[j] = 0; + MatrAXPY( m, nb, b, ldb, u, udelta1, buffer, 0 ); + for( j = 0; j < nb; j++ ) + buffer[j] *= wi; + } + else + { + for( j = 0; j < nb; j++ ) + buffer[j] = u[j*udelta1]*wi; + } + MatrAXPY( n, nb, buffer, 0, v, vdelta1, x, ldx ); + } + } +} + +static void +SVBkSb( int m, int n, const float* w, size_t wstep, + const float* u, size_t ustep, bool uT, + const float* v, size_t vstep, bool vT, + const float* b, size_t bstep, int nb, + float* x, size_t xstep, uchar* buffer ) +{ + SVBkSbImpl_(m, n, w, wstep ? (int)(wstep/sizeof(w[0])) : 1, + u, (int)(ustep/sizeof(u[0])), uT, + v, (int)(vstep/sizeof(v[0])), vT, + b, (int)(bstep/sizeof(b[0])), nb, + x, (int)(xstep/sizeof(x[0])), + (double*)alignPtr(buffer, sizeof(double)), FLT_EPSILON*10 ); +} + +static void +SVBkSb( int m, int n, const double* w, size_t wstep, + const double* u, size_t ustep, bool uT, + const double* v, size_t vstep, bool vT, + const double* b, size_t bstep, int nb, + double* x, size_t xstep, uchar* buffer ) +{ + SVBkSbImpl_(m, n, w, wstep ? (int)(wstep/sizeof(w[0])) : 1, + u, (int)(ustep/sizeof(u[0])), uT, + v, (int)(vstep/sizeof(v[0])), vT, + b, (int)(bstep/sizeof(b[0])), nb, + x, (int)(xstep/sizeof(x[0])), + (double*)alignPtr(buffer, sizeof(double)), DBL_EPSILON*2 ); +} + +} + +/****************************************************************************************\ +* Determinant of the matrix * +\****************************************************************************************/ + +#define det2(m) ((double)m(0,0)*m(1,1) - (double)m(0,1)*m(1,0)) +#define det3(m) (m(0,0)*((double)m(1,1)*m(2,2) - (double)m(1,2)*m(2,1)) - \ + m(0,1)*((double)m(1,0)*m(2,2) - (double)m(1,2)*m(2,0)) + \ + m(0,2)*((double)m(1,0)*m(2,1) - (double)m(1,1)*m(2,0))) + +double cv::determinant( InputArray _mat ) +{ + Mat mat = _mat.getMat(); + double result = 0; + int type = mat.type(), rows = mat.rows; + size_t step = mat.step; + const uchar* m = mat.data; + + CV_Assert( mat.rows == mat.cols && (type == CV_32F || type == CV_64F)); + + #define Mf(y, x) ((float*)(m + y*step))[x] + #define Md(y, x) ((double*)(m + y*step))[x] + + if( type == CV_32F ) + { + if( rows == 2 ) + result = det2(Mf); + else if( rows == 3 ) + result = det3(Mf); + else if( rows == 1 ) + result = Mf(0,0); + else + { + size_t bufSize = rows*rows*sizeof(float); + AutoBuffer buffer(bufSize); + Mat a(rows, rows, CV_32F, (uchar*)buffer); + mat.copyTo(a); + + result = LU((float*)a.data, a.step, rows, 0, 0, 0); + if( result ) + { + for( int i = 0; i < rows; i++ ) + result *= ((const float*)(a.data + a.step*i))[i]; + result = 1./result; + } + } + } + else + { + if( rows == 2 ) + result = det2(Md); + else if( rows == 3 ) + result = det3(Md); + else if( rows == 1 ) + result = Md(0,0); + else + { + size_t bufSize = rows*rows*sizeof(double); + AutoBuffer buffer(bufSize); + Mat a(rows, rows, CV_64F, (uchar*)buffer); + mat.copyTo(a); + + result = LU((double*)a.data, a.step, rows, 0, 0, 0); + if( result ) + { + for( int i = 0; i < rows; i++ ) + result *= ((const double*)(a.data + a.step*i))[i]; + result = 1./result; + } + } + } + + #undef Mf + #undef Md + + return result; +} + +/****************************************************************************************\ +* Inverse (or pseudo-inverse) of a matrix * +\****************************************************************************************/ + +#define Sf( y, x ) ((float*)(srcdata + y*srcstep))[x] +#define Sd( y, x ) ((double*)(srcdata + y*srcstep))[x] +#define Df( y, x ) ((float*)(dstdata + y*dststep))[x] +#define Dd( y, x ) ((double*)(dstdata + y*dststep))[x] + +double cv::invert( InputArray _src, OutputArray _dst, int method ) +{ + bool result = false; + Mat src = _src.getMat(); + int type = src.type(); + + CV_Assert( method == DECOMP_LU || method == DECOMP_CHOLESKY || method == DECOMP_SVD ); + _dst.create( src.cols, src.rows, type ); + Mat dst = _dst.getMat(); + + if( method == DECOMP_SVD ) + { + int n = std::min(src.rows, src.cols); + SVD svd(src); + svd.backSubst(Mat(), dst); + + return type == CV_32F ? + (((float*)svd.w.data)[0] >= FLT_EPSILON ? + ((float*)svd.w.data)[n-1]/((float*)svd.w.data)[0] : 0) : + (((double*)svd.w.data)[0] >= DBL_EPSILON ? + ((double*)svd.w.data)[n-1]/((double*)svd.w.data)[0] : 0); + } + + CV_Assert( src.rows == src.cols && (type == CV_32F || type == CV_64F)); + + if( src.rows <= 3 ) + { + uchar* srcdata = src.data; + uchar* dstdata = dst.data; + size_t srcstep = src.step; + size_t dststep = dst.step; + + if( src.rows == 2 ) + { + if( type == CV_32FC1 ) + { + double d = det2(Sf); + if( d != 0. ) + { + double t0, t1; + result = true; + d = 1./d; + t0 = Sf(0,0)*d; + t1 = Sf(1,1)*d; + Df(1,1) = (float)t0; + Df(0,0) = (float)t1; + t0 = -Sf(0,1)*d; + t1 = -Sf(1,0)*d; + Df(0,1) = (float)t0; + Df(1,0) = (float)t1; + } + } + else + { + double d = det2(Sd); + if( d != 0. ) + { + double t0, t1; + result = true; + d = 1./d; + t0 = Sd(0,0)*d; + t1 = Sd(1,1)*d; + Dd(1,1) = t0; + Dd(0,0) = t1; + t0 = -Sd(0,1)*d; + t1 = -Sd(1,0)*d; + Dd(0,1) = t0; + Dd(1,0) = t1; + } + } + } + else if( src.rows == 3 ) + { + if( type == CV_32FC1 ) + { + double d = det3(Sf); + if( d != 0. ) + { + float t[9]; + result = true; + d = 1./d; + + t[0] = (float)(((double)Sf(1,1) * Sf(2,2) - (double)Sf(1,2) * Sf(2,1)) * d); + t[1] = (float)(((double)Sf(0,2) * Sf(2,1) - (double)Sf(0,1) * Sf(2,2)) * d); + t[2] = (float)(((double)Sf(0,1) * Sf(1,2) - (double)Sf(0,2) * Sf(1,1)) * d); + + t[3] = (float)(((double)Sf(1,2) * Sf(2,0) - (double)Sf(1,0) * Sf(2,2)) * d); + t[4] = (float)(((double)Sf(0,0) * Sf(2,2) - (double)Sf(0,2) * Sf(2,0)) * d); + t[5] = (float)(((double)Sf(0,2) * Sf(1,0) - (double)Sf(0,0) * Sf(1,2)) * d); + + t[6] = (float)(((double)Sf(1,0) * Sf(2,1) - (double)Sf(1,1) * Sf(2,0)) * d); + t[7] = (float)(((double)Sf(0,1) * Sf(2,0) - (double)Sf(0,0) * Sf(2,1)) * d); + t[8] = (float)(((double)Sf(0,0) * Sf(1,1) - (double)Sf(0,1) * Sf(1,0)) * d); + + Df(0,0) = t[0]; Df(0,1) = t[1]; Df(0,2) = t[2]; + Df(1,0) = t[3]; Df(1,1) = t[4]; Df(1,2) = t[5]; + Df(2,0) = t[6]; Df(2,1) = t[7]; Df(2,2) = t[8]; + } + } + else + { + double d = det3(Sd); + if( d != 0. ) + { + double t[9]; + result = true; + d = 1./d; + + t[0] = (Sd(1,1) * Sd(2,2) - Sd(1,2) * Sd(2,1)) * d; + t[1] = (Sd(0,2) * Sd(2,1) - Sd(0,1) * Sd(2,2)) * d; + t[2] = (Sd(0,1) * Sd(1,2) - Sd(0,2) * Sd(1,1)) * d; + + t[3] = (Sd(1,2) * Sd(2,0) - Sd(1,0) * Sd(2,2)) * d; + t[4] = (Sd(0,0) * Sd(2,2) - Sd(0,2) * Sd(2,0)) * d; + t[5] = (Sd(0,2) * Sd(1,0) - Sd(0,0) * Sd(1,2)) * d; + + t[6] = (Sd(1,0) * Sd(2,1) - Sd(1,1) * Sd(2,0)) * d; + t[7] = (Sd(0,1) * Sd(2,0) - Sd(0,0) * Sd(2,1)) * d; + t[8] = (Sd(0,0) * Sd(1,1) - Sd(0,1) * Sd(1,0)) * d; + + Dd(0,0) = t[0]; Dd(0,1) = t[1]; Dd(0,2) = t[2]; + Dd(1,0) = t[3]; Dd(1,1) = t[4]; Dd(1,2) = t[5]; + Dd(2,0) = t[6]; Dd(2,1) = t[7]; Dd(2,2) = t[8]; + } + } + } + else + { + assert( src.rows == 1 ); + + if( type == CV_32FC1 ) + { + double d = Sf(0,0); + if( d != 0. ) + { + result = true; + Df(0,0) = (float)(1./d); + } + } + else + { + double d = Sd(0,0); + if( d != 0. ) + { + result = true; + Dd(0,0) = 1./d; + } + } + } + if( !result ) + dst = Scalar(0); + return result; + } + + int n = dst.cols, elem_size = CV_ELEM_SIZE(type); + AutoBuffer buf(n*n*elem_size); + Mat src1(n, n, type, (uchar*)buf); + src.copyTo(src1); + setIdentity(dst); + + if( method == DECOMP_LU && type == CV_32F ) + result = LU((float*)src1.data, src1.step, n, (float*)dst.data, dst.step, n) != 0; + else if( method == DECOMP_LU && type == CV_64F ) + result = LU((double*)src1.data, src1.step, n, (double*)dst.data, dst.step, n) != 0; + else if( method == DECOMP_CHOLESKY && type == CV_32F ) + result = Cholesky((float*)src1.data, src1.step, n, (float*)dst.data, dst.step, n); + else + result = Cholesky((double*)src1.data, src1.step, n, (double*)dst.data, dst.step, n); + + if( !result ) + dst = Scalar(0); + + return result; +} + +/****************************************************************************************\ +* Solving a linear system * +\****************************************************************************************/ + +bool cv::solve( InputArray _src, InputArray _src2arg, OutputArray _dst, int method ) +{ + bool result = true; + Mat src = _src.getMat(), _src2 = _src2arg.getMat(); + int type = src.type(); + bool is_normal = (method & DECOMP_NORMAL) != 0; + + CV_Assert( type == _src2.type() && (type == CV_32F || type == CV_64F) ); + + method &= ~DECOMP_NORMAL; + CV_Assert( (method != DECOMP_LU && method != DECOMP_CHOLESKY) || + is_normal || src.rows == src.cols ); + + // check case of a single equation and small matrix + if( (method == DECOMP_LU || method == DECOMP_CHOLESKY) && !is_normal && + src.rows <= 3 && src.rows == src.cols && _src2.cols == 1 ) + { + _dst.create( src.cols, _src2.cols, src.type() ); + Mat dst = _dst.getMat(); + + #define bf(y) ((float*)(bdata + y*src2step))[0] + #define bd(y) ((double*)(bdata + y*src2step))[0] + + uchar* srcdata = src.data; + uchar* bdata = _src2.data; + uchar* dstdata = dst.data; + size_t srcstep = src.step; + size_t src2step = _src2.step; + size_t dststep = dst.step; + + if( src.rows == 2 ) + { + if( type == CV_32FC1 ) + { + double d = det2(Sf); + if( d != 0. ) + { + double t; + d = 1./d; + t = (float)(((double)bf(0)*Sf(1,1) - (double)bf(1)*Sf(0,1))*d); + Df(1,0) = (float)(((double)bf(1)*Sf(0,0) - (double)bf(0)*Sf(1,0))*d); + Df(0,0) = (float)t; + } + else + result = false; + } + else + { + double d = det2(Sd); + if( d != 0. ) + { + double t; + d = 1./d; + t = (bd(0)*Sd(1,1) - bd(1)*Sd(0,1))*d; + Dd(1,0) = (bd(1)*Sd(0,0) - bd(0)*Sd(1,0))*d; + Dd(0,0) = t; + } + else + result = false; + } + } + else if( src.rows == 3 ) + { + if( type == CV_32FC1 ) + { + double d = det3(Sf); + if( d != 0. ) + { + float t[3]; + d = 1./d; + + t[0] = (float)(d* + (bf(0)*((double)Sf(1,1)*Sf(2,2) - (double)Sf(1,2)*Sf(2,1)) - + Sf(0,1)*((double)bf(1)*Sf(2,2) - (double)Sf(1,2)*bf(2)) + + Sf(0,2)*((double)bf(1)*Sf(2,1) - (double)Sf(1,1)*bf(2)))); + + t[1] = (float)(d* + (Sf(0,0)*(double)(bf(1)*Sf(2,2) - (double)Sf(1,2)*bf(2)) - + bf(0)*((double)Sf(1,0)*Sf(2,2) - (double)Sf(1,2)*Sf(2,0)) + + Sf(0,2)*((double)Sf(1,0)*bf(2) - (double)bf(1)*Sf(2,0)))); + + t[2] = (float)(d* + (Sf(0,0)*((double)Sf(1,1)*bf(2) - (double)bf(1)*Sf(2,1)) - + Sf(0,1)*((double)Sf(1,0)*bf(2) - (double)bf(1)*Sf(2,0)) + + bf(0)*((double)Sf(1,0)*Sf(2,1) - (double)Sf(1,1)*Sf(2,0)))); + + Df(0,0) = t[0]; + Df(1,0) = t[1]; + Df(2,0) = t[2]; + } + else + result = false; + } + else + { + double d = det3(Sd); + if( d != 0. ) + { + double t[9]; + + d = 1./d; + + t[0] = ((Sd(1,1) * Sd(2,2) - Sd(1,2) * Sd(2,1))*bd(0) + + (Sd(0,2) * Sd(2,1) - Sd(0,1) * Sd(2,2))*bd(1) + + (Sd(0,1) * Sd(1,2) - Sd(0,2) * Sd(1,1))*bd(2))*d; + + t[1] = ((Sd(1,2) * Sd(2,0) - Sd(1,0) * Sd(2,2))*bd(0) + + (Sd(0,0) * Sd(2,2) - Sd(0,2) * Sd(2,0))*bd(1) + + (Sd(0,2) * Sd(1,0) - Sd(0,0) * Sd(1,2))*bd(2))*d; + + t[2] = ((Sd(1,0) * Sd(2,1) - Sd(1,1) * Sd(2,0))*bd(0) + + (Sd(0,1) * Sd(2,0) - Sd(0,0) * Sd(2,1))*bd(1) + + (Sd(0,0) * Sd(1,1) - Sd(0,1) * Sd(1,0))*bd(2))*d; + + Dd(0,0) = t[0]; + Dd(1,0) = t[1]; + Dd(2,0) = t[2]; + } + else + result = false; + } + } + else + { + assert( src.rows == 1 ); + + if( type == CV_32FC1 ) + { + double d = Sf(0,0); + if( d != 0. ) + Df(0,0) = (float)(bf(0)/d); + else + result = false; + } + else + { + double d = Sd(0,0); + if( d != 0. ) + Dd(0,0) = (bd(0)/d); + else + result = false; + } + } + return result; + } + + if( method == DECOMP_QR ) + method = DECOMP_SVD; + + int m = src.rows, m_ = m, n = src.cols, nb = _src2.cols; + size_t esz = CV_ELEM_SIZE(type), bufsize = 0; + size_t vstep = alignSize(n*esz, 16); + size_t astep = method == DECOMP_SVD && !is_normal ? alignSize(m*esz, 16) : vstep; + AutoBuffer buffer; + + Mat src2 = _src2; + _dst.create( src.cols, src2.cols, src.type() ); + Mat dst = _dst.getMat(); + + if( m < n ) + CV_Error(CV_StsBadArg, "The function can not solve under-determined linear systems" ); + + if( m == n ) + is_normal = false; + else if( is_normal ) + { + m_ = n; + if( method == DECOMP_SVD ) + method = DECOMP_EIG; + } + + size_t asize = astep*(method == DECOMP_SVD || is_normal ? n : m); + bufsize += asize + 32; + + if( is_normal ) + bufsize += n*nb*esz; + + if( method == DECOMP_SVD || method == DECOMP_EIG ) + bufsize += n*5*esz + n*vstep + nb*sizeof(double) + 32; + + buffer.allocate(bufsize); + uchar* ptr = alignPtr((uchar*)buffer, 16); + + Mat a(m_, n, type, ptr, astep); + + if( is_normal ) + mulTransposed(src, a, true); + else if( method != DECOMP_SVD ) + src.copyTo(a); + else + { + a = Mat(n, m_, type, ptr, astep); + transpose(src, a); + } + ptr += asize; + + if( !is_normal ) + { + if( method == DECOMP_LU || method == DECOMP_CHOLESKY ) + src2.copyTo(dst); + } + else + { + // a'*b + if( method == DECOMP_LU || method == DECOMP_CHOLESKY ) + gemm( src, src2, 1, Mat(), 0, dst, GEMM_1_T ); + else + { + Mat tmp(n, nb, type, ptr); + ptr += n*nb*esz; + gemm( src, src2, 1, Mat(), 0, tmp, GEMM_1_T ); + src2 = tmp; + } + } + + if( method == DECOMP_LU ) + { + if( type == CV_32F ) + result = LU(a.ptr(), a.step, n, dst.ptr(), dst.step, nb) != 0; + else + result = LU(a.ptr(), a.step, n, dst.ptr(), dst.step, nb) != 0; + } + else if( method == DECOMP_CHOLESKY ) + { + if( type == CV_32F ) + result = Cholesky(a.ptr(), a.step, n, dst.ptr(), dst.step, nb); + else + result = Cholesky(a.ptr(), a.step, n, dst.ptr(), dst.step, nb); + } + else + { + ptr = alignPtr(ptr, 16); + Mat v(n, n, type, ptr, vstep), w(n, 1, type, ptr + vstep*n), u; + ptr += n*(vstep + esz); + + if( method == DECOMP_EIG ) + { + if( type == CV_32F ) + Jacobi(a.ptr(), a.step, w.ptr(), v.ptr(), v.step, n, ptr); + else + Jacobi(a.ptr(), a.step, w.ptr(), v.ptr(), v.step, n, ptr); + u = v; + } + else + { + if( type == CV_32F ) + JacobiSVD(a.ptr(), a.step, w.ptr(), v.ptr(), v.step, m_, n); + else + JacobiSVD(a.ptr(), a.step, w.ptr(), v.ptr(), v.step, m_, n); + u = a; + } + + if( type == CV_32F ) + { + SVBkSb(m_, n, w.ptr(), 0, u.ptr(), u.step, true, + v.ptr(), v.step, true, src2.ptr(), + src2.step, nb, dst.ptr(), dst.step, ptr); + } + else + { + SVBkSb(m_, n, w.ptr(), 0, u.ptr(), u.step, true, + v.ptr(), v.step, true, src2.ptr(), + src2.step, nb, dst.ptr(), dst.step, ptr); + } + result = true; + } + + if( !result ) + dst = Scalar(0); + + return result; +} + + +/////////////////// finding eigenvalues and eigenvectors of a symmetric matrix /////////////// + +bool cv::eigen( InputArray _src, bool computeEvects, OutputArray _evals, OutputArray _evects ) +{ + Mat src = _src.getMat(); + int type = src.type(); + int n = src.rows; + + CV_Assert( src.rows == src.cols ); + CV_Assert (type == CV_32F || type == CV_64F); + + Mat v; + if( computeEvects ) + { + _evects.create(n, n, type); + v = _evects.getMat(); + } + + size_t elemSize = src.elemSize(), astep = alignSize(n*elemSize, 16); + AutoBuffer buf(n*astep + n*5*elemSize + 32); + uchar* ptr = alignPtr((uchar*)buf, 16); + Mat a(n, n, type, ptr, astep), w(n, 1, type, ptr + astep*n); + ptr += astep*n + elemSize*n; + src.copyTo(a); + bool ok = type == CV_32F ? + Jacobi(a.ptr(), a.step, w.ptr(), v.ptr(), v.step, n, ptr) : + Jacobi(a.ptr(), a.step, w.ptr(), v.ptr(), v.step, n, ptr); + + w.copyTo(_evals); + return ok; +} + +bool cv::eigen( InputArray src, OutputArray evals, int, int ) +{ + return eigen(src, false, evals, noArray()); +} + +bool cv::eigen( InputArray src, OutputArray evals, OutputArray evects, int, int) +{ + return eigen(src, true, evals, evects); +} + +namespace cv +{ + +static void _SVDcompute( InputArray _aarr, OutputArray _w, + OutputArray _u, OutputArray _vt, int flags ) +{ + Mat src = _aarr.getMat(); + int m = src.rows, n = src.cols; + int type = src.type(); + bool compute_uv = _u.needed() || _vt.needed(); + bool full_uv = (flags & SVD::FULL_UV) != 0; + + CV_Assert( type == CV_32F || type == CV_64F ); + + if( flags & SVD::NO_UV ) + { + _u.release(); + _vt.release(); + compute_uv = full_uv = false; + } + + bool at = false; + if( m < n ) + { + std::swap(m, n); + at = true; + } + + int urows = full_uv ? m : n; + size_t esz = src.elemSize(), astep = alignSize(m*esz, 16), vstep = alignSize(n*esz, 16); + AutoBuffer _buf(urows*astep + n*vstep + n*esz + 32); + uchar* buf = alignPtr((uchar*)_buf, 16); + Mat temp_a(n, m, type, buf, astep); + Mat temp_w(n, 1, type, buf + urows*astep); + Mat temp_u(urows, m, type, buf, astep), temp_v; + + if( compute_uv ) + temp_v = Mat(n, n, type, alignPtr(buf + urows*astep + n*esz, 16), vstep); + + if( !at ) + transpose(src, temp_a); + else + src.copyTo(temp_a); + + if( type == CV_32F ) + { + JacobiSVD(temp_a.ptr(), temp_a.step, temp_w.ptr(), + temp_v.ptr(), temp_v.step, m, n, compute_uv ? urows : 0); + } + else + { + JacobiSVD(temp_a.ptr(), temp_a.step, temp_w.ptr(), + temp_v.ptr(), temp_v.step, m, n, compute_uv ? urows : 0); + } + temp_w.copyTo(_w); + if( compute_uv ) + { + if( !at ) + { + transpose(temp_u, _u); + temp_v.copyTo(_vt); + } + else + { + transpose(temp_v, _u); + temp_u.copyTo(_vt); + } + } +} + + +void SVD::compute( InputArray a, OutputArray w, OutputArray u, OutputArray vt, int flags ) +{ + _SVDcompute(a, w, u, vt, flags); +} + +void SVD::compute( InputArray a, OutputArray w, int flags ) +{ + _SVDcompute(a, w, noArray(), noArray(), flags); +} + +void SVD::backSubst( InputArray _w, InputArray _u, InputArray _vt, + InputArray _rhs, OutputArray _dst ) +{ + Mat w = _w.getMat(), u = _u.getMat(), vt = _vt.getMat(), rhs = _rhs.getMat(); + int type = w.type(), esz = (int)w.elemSize(); + int m = u.rows, n = vt.cols, nb = rhs.data ? rhs.cols : m, nm = std::min(m, n); + size_t wstep = w.rows == 1 ? esz : w.cols == 1 ? (size_t)w.step : (size_t)w.step + esz; + AutoBuffer buffer(nb*sizeof(double) + 16); + CV_Assert( w.type() == u.type() && u.type() == vt.type() && u.data && vt.data && w.data ); + CV_Assert( u.cols >= nm && vt.rows >= nm && + (w.size() == Size(nm, 1) || w.size() == Size(1, nm) || w.size() == Size(vt.rows, u.cols)) ); + CV_Assert( rhs.data == 0 || (rhs.type() == type && rhs.rows == m) ); + + _dst.create( n, nb, type ); + Mat dst = _dst.getMat(); + if( type == CV_32F ) + SVBkSb(m, n, w.ptr(), wstep, u.ptr(), u.step, false, + vt.ptr(), vt.step, true, rhs.ptr(), rhs.step, nb, + dst.ptr(), dst.step, buffer); + else if( type == CV_64F ) + SVBkSb(m, n, w.ptr(), wstep, u.ptr(), u.step, false, + vt.ptr(), vt.step, true, rhs.ptr(), rhs.step, nb, + dst.ptr(), dst.step, buffer); + else + CV_Error( CV_StsUnsupportedFormat, "" ); +} + + +SVD& SVD::operator ()(InputArray a, int flags) +{ + _SVDcompute(a, w, u, vt, flags); + return *this; +} + + +void SVD::backSubst( InputArray rhs, OutputArray dst ) const +{ + backSubst( w, u, vt, rhs, dst ); +} + +} + + +void cv::SVDecomp(InputArray src, OutputArray w, OutputArray u, OutputArray vt, int flags) +{ + SVD::compute(src, w, u, vt, flags); +} + +void cv::SVBackSubst(InputArray w, InputArray u, InputArray vt, InputArray rhs, OutputArray dst) +{ + SVD::backSubst(w, u, vt, rhs, dst); +} + + +CV_IMPL double +cvDet( const CvArr* arr ) +{ + if( CV_IS_MAT(arr) && ((CvMat*)arr)->rows <= 3 ) + { + CvMat* mat = (CvMat*)arr; + int type = CV_MAT_TYPE(mat->type); + int rows = mat->rows; + uchar* m = mat->data.ptr; + int step = mat->step; + CV_Assert( rows == mat->cols ); + + #define Mf(y, x) ((float*)(m + y*step))[x] + #define Md(y, x) ((double*)(m + y*step))[x] + + if( type == CV_32F ) + { + if( rows == 2 ) + return det2(Mf); + if( rows == 3 ) + return det3(Mf); + } + else if( type == CV_64F ) + { + if( rows == 2 ) + return det2(Md); + if( rows == 3 ) + return det3(Md); + } + return cv::determinant(cv::Mat(mat)); + } + return cv::determinant(cv::cvarrToMat(arr)); +} + + +CV_IMPL double +cvInvert( const CvArr* srcarr, CvArr* dstarr, int method ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + + CV_Assert( src.type() == dst.type() && src.rows == dst.cols && src.cols == dst.rows ); + return cv::invert( src, dst, method == CV_CHOLESKY ? cv::DECOMP_CHOLESKY : + method == CV_SVD || method == CV_SVD_SYM ? cv::DECOMP_SVD : cv::DECOMP_LU ); +} + + +CV_IMPL int +cvSolve( const CvArr* Aarr, const CvArr* barr, CvArr* xarr, int method ) +{ + cv::Mat A = cv::cvarrToMat(Aarr), b = cv::cvarrToMat(barr), x = cv::cvarrToMat(xarr); + + CV_Assert( A.type() == x.type() && A.cols == x.rows && x.cols == b.cols ); + bool is_normal = (method & CV_NORMAL) != 0; + method &= ~CV_NORMAL; + return cv::solve( A, b, x, (method == CV_CHOLESKY ? cv::DECOMP_CHOLESKY : + method == CV_SVD || method == CV_SVD_SYM ? cv::DECOMP_SVD : + A.rows > A.cols ? cv::DECOMP_QR : cv::DECOMP_LU) + (is_normal ? cv::DECOMP_NORMAL : 0) ); +} + + +CV_IMPL void +cvEigenVV( CvArr* srcarr, CvArr* evectsarr, CvArr* evalsarr, double, + int lowindex, int highindex) +{ + cv::Mat src = cv::cvarrToMat(srcarr), evals0 = cv::cvarrToMat(evalsarr), evals = evals0; + if( evectsarr ) + { + cv::Mat evects0 = cv::cvarrToMat(evectsarr), evects = evects0; + eigen(src, evals, evects, lowindex, highindex); + if( evects0.data != evects.data ) + { + uchar* p = evects0.data; + evects.convertTo(evects0, evects0.type()); + CV_Assert( p == evects0.data ); + } + } + else + eigen(src, evals, lowindex, highindex); + if( evals0.data != evals.data ) + { + uchar* p = evals0.data; + if( evals0.size() == evals.size() ) + evals.convertTo(evals0, evals0.type()); + else if( evals0.type() == evals.type() ) + cv::transpose(evals, evals0); + else + cv::Mat(evals.t()).convertTo(evals0, evals0.type()); + CV_Assert( p == evals0.data ); + } +} + + +CV_IMPL void +cvSVD( CvArr* aarr, CvArr* warr, CvArr* uarr, CvArr* varr, int flags ) +{ + cv::Mat a = cv::cvarrToMat(aarr), w = cv::cvarrToMat(warr), u, v; + int m = a.rows, n = a.cols, type = a.type(), mn = std::max(m, n), nm = std::min(m, n); + + CV_Assert( w.type() == type && + (w.size() == cv::Size(nm,1) || w.size() == cv::Size(1, nm) || + w.size() == cv::Size(nm, nm) || w.size() == cv::Size(n, m)) ); + + cv::SVD svd; + + if( w.size() == cv::Size(nm, 1) ) + svd.w = cv::Mat(nm, 1, type, w.data ); + else if( w.isContinuous() ) + svd.w = w; + + if( uarr ) + { + u = cv::cvarrToMat(uarr); + CV_Assert( u.type() == type ); + svd.u = u; + } + + if( varr ) + { + v = cv::cvarrToMat(varr); + CV_Assert( v.type() == type ); + svd.vt = v; + } + + svd(a, ((flags & CV_SVD_MODIFY_A) ? cv::SVD::MODIFY_A : 0) | + ((!svd.u.data && !svd.vt.data) ? cv::SVD::NO_UV : 0) | + ((m != n && (svd.u.size() == cv::Size(mn, mn) || + svd.vt.size() == cv::Size(mn, mn))) ? cv::SVD::FULL_UV : 0)); + + if( u.data ) + { + if( flags & CV_SVD_U_T ) + cv::transpose( svd.u, u ); + else if( u.data != svd.u.data ) + { + CV_Assert( u.size() == svd.u.size() ); + svd.u.copyTo(u); + } + } + + if( v.data ) + { + if( !(flags & CV_SVD_V_T) ) + cv::transpose( svd.vt, v ); + else if( v.data != svd.vt.data ) + { + CV_Assert( v.size() == svd.vt.size() ); + svd.vt.copyTo(v); + } + } + + if( w.data != svd.w.data ) + { + if( w.size() == svd.w.size() ) + svd.w.copyTo(w); + else + { + w = cv::Scalar(0); + cv::Mat wd = w.diag(); + svd.w.copyTo(wd); + } + } +} + + +CV_IMPL void +cvSVBkSb( const CvArr* warr, const CvArr* uarr, + const CvArr* varr, const CvArr* rhsarr, + CvArr* dstarr, int flags ) +{ + cv::Mat w = cv::cvarrToMat(warr), u = cv::cvarrToMat(uarr), + v = cv::cvarrToMat(varr), rhs, + dst = cv::cvarrToMat(dstarr), dst0 = dst; + if( flags & CV_SVD_U_T ) + { + cv::Mat tmp; + transpose(u, tmp); + u = tmp; + } + if( !(flags & CV_SVD_V_T) ) + { + cv::Mat tmp; + transpose(v, tmp); + v = tmp; + } + if( rhsarr ) + rhs = cv::cvarrToMat(rhsarr); + + cv::SVD::backSubst(w, u, v, rhs, dst); + CV_Assert( dst.data == dst0.data ); +} diff --git a/opencv/core/mathfuncs.cpp b/opencv/core/mathfuncs.cpp new file mode 100644 index 0000000..88056e0 --- /dev/null +++ b/opencv/core/mathfuncs.cpp @@ -0,0 +1,2411 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + + +namespace cv +{ + +static const int MAX_BLOCK_SIZE = 1024; +typedef void (*MathFunc)(const void* src, void* dst, int len); + +float fastAtan2( float y, float x ) +{ + double a, x2 = (double)x*x, y2 = (double)y*y; + if( y2 <= x2 ) + { + a = (180./CV_PI)*x*y*(x2 + 0.43157974*y2)/(x2*x2 + y2*(0.76443945*x2 + 0.05831938*y2) + DBL_EPSILON); + return (float)(x < 0 ? a + 180 : y >= 0 ? a : 360+a); + } + a = (180./CV_PI)*x*y*(y2 + 0.43157974*x2)/(y2*y2 + x2*(0.76443945*y2 + 0.05831938*x2) + DBL_EPSILON); + return (float)(y >= 0 ? 90 - a : 270 - a); +} + +static void FastAtan2_32f(const float *Y, const float *X, float *angle, int len, bool angleInDegrees=true ) +{ + int i = 0; + float scale = angleInDegrees ? (float)(180/CV_PI) : 1.f; + +#if CV_SSE2 + if( USE_SSE2 ) + { + Cv32suf iabsmask; iabsmask.i = 0x7fffffff; + __m128 eps = _mm_set1_ps((float)DBL_EPSILON), absmask = _mm_set1_ps(iabsmask.f); + __m128 _90 = _mm_set1_ps((float)(CV_PI*0.5)), _180 = _mm_set1_ps((float)CV_PI), _360 = _mm_set1_ps((float)(CV_PI*2)); + __m128 zero = _mm_setzero_ps(), scale4 = _mm_set1_ps(scale); + __m128 p0 = _mm_set1_ps(0.43157974f), q0 = _mm_set1_ps(0.76443945f), q1 = _mm_set1_ps(0.05831938f); + + for( ; i <= len - 4; i += 4 ) + { + __m128 x4 = _mm_loadu_ps(X + i), y4 = _mm_loadu_ps(Y + i); + __m128 xq4 = _mm_mul_ps(x4, x4), yq4 = _mm_mul_ps(y4, y4); + __m128 xly = _mm_cmplt_ps(xq4, yq4); + __m128 t = _mm_min_ps(xq4, yq4); + xq4 = _mm_max_ps(xq4, yq4); yq4 = t; + __m128 z4 = _mm_div_ps(_mm_mul_ps(_mm_mul_ps(x4, y4), _mm_add_ps(xq4, _mm_mul_ps(yq4, p0))), + _mm_add_ps(eps, _mm_add_ps(_mm_mul_ps(xq4, xq4), + _mm_mul_ps(yq4, _mm_add_ps(_mm_mul_ps(xq4, q0), + _mm_mul_ps(yq4, q1)))))); + + // a4 <- x < y ? 90 : 0; + __m128 a4 = _mm_and_ps(xly, _90); + // a4 <- (y < 0 ? 360 - a4 : a4) == ((x < y ? y < 0 ? 270 : 90) : (y < 0 ? 360 : 0)) + __m128 mask = _mm_cmplt_ps(y4, zero); + a4 = _mm_or_ps(_mm_and_ps(_mm_sub_ps(_360, a4), mask), _mm_andnot_ps(mask, a4)); + // a4 <- (x < 0 && !(x < y) ? 180 : a4) + mask = _mm_andnot_ps(xly, _mm_cmplt_ps(x4, zero)); + a4 = _mm_or_ps(_mm_and_ps(_180, mask), _mm_andnot_ps(mask, a4)); + + // a4 <- (x < y ? a4 - z4 : a4 + z4) + a4 = _mm_mul_ps(_mm_add_ps(_mm_xor_ps(z4, _mm_andnot_ps(absmask, xly)), a4), scale4); + _mm_storeu_ps(angle + i, a4); + } + } +#endif + + for( ; i < len; i++ ) + { + double x = X[i], y = Y[i], x2 = x*x, y2 = y*y, a; + + if( y2 <= x2 ) + a = (x < 0 ? CV_PI : y >= 0 ? 0 : CV_PI*2) + + x*y*(x2 + 0.43157974*y2)/(x2*x2 + y2*(0.76443945*x2 + 0.05831938*y2) + (float)DBL_EPSILON); + else + { + a = (y >= 0 ? CV_PI*0.5 : CV_PI*1.5) - + x*y*(y2 + 0.43157974*x2)/(y2*y2 + x2*(0.76443945*y2 + 0.05831938*x2) + (float)DBL_EPSILON); + } + angle[i] = (float)(a*scale); + } +} + + +/* ************************************************************************** *\ + Fast cube root by Ken Turkowski + (http://www.worldserver.com/turk/computergraphics/papers.html) +\* ************************************************************************** */ +float cubeRoot( float value ) +{ + float fr; + Cv32suf v, m; + int ix, s; + int ex, shx; + + v.f = value; + ix = v.i & 0x7fffffff; + s = v.i & 0x80000000; + ex = (ix >> 23) - 127; + shx = ex % 3; + shx -= shx >= 0 ? 3 : 0; + ex = (ex - shx) / 3; /* exponent of cube root */ + v.i = (ix & ((1<<23)-1)) | ((shx + 127)<<23); + fr = v.f; + + /* 0.125 <= fr < 1.0 */ + /* Use quartic rational polynomial with error < 2^(-24) */ + fr = (float)(((((45.2548339756803022511987494 * fr + + 192.2798368355061050458134625) * fr + + 119.1654824285581628956914143) * fr + + 13.43250139086239872172837314) * fr + + 0.1636161226585754240958355063)/ + ((((14.80884093219134573786480845 * fr + + 151.9714051044435648658557668) * fr + + 168.5254414101568283957668343) * fr + + 33.9905941350215598754191872) * fr + + 1.0)); + + /* fr *= 2^ex * sign */ + m.f = value; + v.f = fr; + v.i = (v.i + (ex << 23) + s) & (m.i*2 != 0 ? -1 : 0); + return v.f; +} + +static void Magnitude_32f(const float* x, const float* y, float* mag, int len) +{ + int i = 0; + +#if CV_SSE + if( USE_SSE2 ) + { + for( ; i <= len - 8; i += 8 ) + { + __m128 x0 = _mm_loadu_ps(x + i), x1 = _mm_loadu_ps(x + i + 4); + __m128 y0 = _mm_loadu_ps(y + i), y1 = _mm_loadu_ps(y + i + 4); + x0 = _mm_add_ps(_mm_mul_ps(x0, x0), _mm_mul_ps(y0, y0)); + x1 = _mm_add_ps(_mm_mul_ps(x1, x1), _mm_mul_ps(y1, y1)); + x0 = _mm_sqrt_ps(x0); x1 = _mm_sqrt_ps(x1); + _mm_storeu_ps(mag + i, x0); _mm_storeu_ps(mag + i + 4, x1); + } + } +#endif + + for( ; i < len; i++ ) + { + float x0 = x[i], y0 = y[i]; + mag[i] = std::sqrt(x0*x0 + y0*y0); + } +} + +static void Magnitude_64f(const double* x, const double* y, double* mag, int len) +{ + int i = 0; + +#if CV_SSE2 + if( USE_SSE2 ) + { + for( ; i <= len - 4; i += 4 ) + { + __m128d x0 = _mm_loadu_pd(x + i), x1 = _mm_loadu_pd(x + i + 2); + __m128d y0 = _mm_loadu_pd(y + i), y1 = _mm_loadu_pd(y + i + 2); + x0 = _mm_add_pd(_mm_mul_pd(x0, x0), _mm_mul_pd(y0, y0)); + x1 = _mm_add_pd(_mm_mul_pd(x1, x1), _mm_mul_pd(y1, y1)); + x0 = _mm_sqrt_pd(x0); x1 = _mm_sqrt_pd(x1); + _mm_storeu_pd(mag + i, x0); _mm_storeu_pd(mag + i + 2, x1); + } + } +#endif + + for( ; i < len; i++ ) + { + double x0 = x[i], y0 = y[i]; + mag[i] = std::sqrt(x0*x0 + y0*y0); + } +} + + +static void InvSqrt_32f(const float* src, float* dst, int len) +{ + int i = 0; + +#if CV_SSE + if( USE_SSE2 ) + { + __m128 _0_5 = _mm_set1_ps(0.5f), _1_5 = _mm_set1_ps(1.5f); + if( (((size_t)src|(size_t)dst) & 15) == 0 ) + for( ; i <= len - 8; i += 8 ) + { + __m128 t0 = _mm_load_ps(src + i), t1 = _mm_load_ps(src + i + 4); + __m128 h0 = _mm_mul_ps(t0, _0_5), h1 = _mm_mul_ps(t1, _0_5); + t0 = _mm_rsqrt_ps(t0); t1 = _mm_rsqrt_ps(t1); + t0 = _mm_mul_ps(t0, _mm_sub_ps(_1_5, _mm_mul_ps(_mm_mul_ps(t0,t0),h0))); + t1 = _mm_mul_ps(t1, _mm_sub_ps(_1_5, _mm_mul_ps(_mm_mul_ps(t1,t1),h1))); + _mm_store_ps(dst + i, t0); _mm_store_ps(dst + i + 4, t1); + } + else + for( ; i <= len - 8; i += 8 ) + { + __m128 t0 = _mm_loadu_ps(src + i), t1 = _mm_loadu_ps(src + i + 4); + __m128 h0 = _mm_mul_ps(t0, _0_5), h1 = _mm_mul_ps(t1, _0_5); + t0 = _mm_rsqrt_ps(t0); t1 = _mm_rsqrt_ps(t1); + t0 = _mm_mul_ps(t0, _mm_sub_ps(_1_5, _mm_mul_ps(_mm_mul_ps(t0,t0),h0))); + t1 = _mm_mul_ps(t1, _mm_sub_ps(_1_5, _mm_mul_ps(_mm_mul_ps(t1,t1),h1))); + _mm_storeu_ps(dst + i, t0); _mm_storeu_ps(dst + i + 4, t1); + } + } +#endif + + for( ; i < len; i++ ) + dst[i] = 1/std::sqrt(src[i]); +} + + +static void InvSqrt_64f(const double* src, double* dst, int len) +{ + for( int i = 0; i < len; i++ ) + dst[i] = 1/std::sqrt(src[i]); +} + + +static void Sqrt_32f(const float* src, float* dst, int len) +{ + int i = 0; + +#if CV_SSE + if( USE_SSE2 ) + { + if( (((size_t)src|(size_t)dst) & 15) == 0 ) + for( ; i <= len - 8; i += 8 ) + { + __m128 t0 = _mm_load_ps(src + i), t1 = _mm_load_ps(src + i + 4); + t0 = _mm_sqrt_ps(t0); t1 = _mm_sqrt_ps(t1); + _mm_store_ps(dst + i, t0); _mm_store_ps(dst + i + 4, t1); + } + else + for( ; i <= len - 8; i += 8 ) + { + __m128 t0 = _mm_loadu_ps(src + i), t1 = _mm_loadu_ps(src + i + 4); + t0 = _mm_sqrt_ps(t0); t1 = _mm_sqrt_ps(t1); + _mm_storeu_ps(dst + i, t0); _mm_storeu_ps(dst + i + 4, t1); + } + } +#endif + + for( ; i < len; i++ ) + dst[i] = std::sqrt(src[i]); +} + + +static void Sqrt_64f(const double* src, double* dst, int len) +{ + int i = 0; + +#if CV_SSE2 + if( USE_SSE2 ) + { + if( (((size_t)src|(size_t)dst) & 15) == 0 ) + for( ; i <= len - 4; i += 4 ) + { + __m128d t0 = _mm_load_pd(src + i), t1 = _mm_load_pd(src + i + 2); + t0 = _mm_sqrt_pd(t0); t1 = _mm_sqrt_pd(t1); + _mm_store_pd(dst + i, t0); _mm_store_pd(dst + i + 2, t1); + } + else + for( ; i <= len - 4; i += 4 ) + { + __m128d t0 = _mm_loadu_pd(src + i), t1 = _mm_loadu_pd(src + i + 2); + t0 = _mm_sqrt_pd(t0); t1 = _mm_sqrt_pd(t1); + _mm_storeu_pd(dst + i, t0); _mm_storeu_pd(dst + i + 2, t1); + } + } +#endif + + for( ; i < len; i++ ) + dst[i] = std::sqrt(src[i]); +} + + +/****************************************************************************************\ +* Cartezian -> Polar * +\****************************************************************************************/ + +void magnitude( InputArray src1, InputArray src2, OutputArray dst ) +{ + Mat X = src1.getMat(), Y = src2.getMat(); + int type = X.type(), depth = X.depth(), cn = X.channels(); + CV_Assert( X.size == Y.size && type == Y.type() && (depth == CV_32F || depth == CV_64F)); + dst.create(X.dims, X.size, X.type()); + Mat Mag = dst.getMat(); + + const Mat* arrays[] = {&X, &Y, &Mag, 0}; + uchar* ptrs[3]; + NAryMatIterator it(arrays, ptrs); + int len = (int)it.size*cn; + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + if( depth == CV_32F ) + { + const float *x = (const float*)ptrs[0], *y = (const float*)ptrs[1]; + float *mag = (float*)ptrs[2]; + Magnitude_32f( x, y, mag, len ); + } + else + { + const double *x = (const double*)ptrs[0], *y = (const double*)ptrs[1]; + double *mag = (double*)ptrs[2]; + Magnitude_64f( x, y, mag, len ); + } + } +} + + +void phase( InputArray src1, InputArray src2, OutputArray dst, bool angleInDegrees ) +{ + Mat X = src1.getMat(), Y = src2.getMat(); + int type = X.type(), depth = X.depth(), cn = X.channels(); + CV_Assert( X.size == Y.size && type == Y.type() && (depth == CV_32F || depth == CV_64F)); + dst.create( X.dims, X.size, type ); + Mat Angle = dst.getMat(); + + const Mat* arrays[] = {&X, &Y, &Angle, 0}; + uchar* ptrs[3]; + NAryMatIterator it(arrays, ptrs); + cv::AutoBuffer _buf; + float* buf[2] = {0, 0}; + int j, k, total = (int)(it.size*cn), blockSize = total; + size_t esz1 = X.elemSize1(); + + if( depth == CV_64F ) + { + blockSize = std::min(blockSize, ((BLOCK_SIZE+cn-1)/cn)*cn); + _buf.allocate(blockSize*2); + buf[0] = _buf; + buf[1] = buf[0] + blockSize; + } + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( j = 0; j < total; j += blockSize ) + { + int len = std::min(total - j, blockSize); + if( depth == CV_32F ) + { + const float *x = (const float*)ptrs[0], *y = (const float*)ptrs[1]; + float *angle = (float*)ptrs[2]; + FastAtan2_32f( y, x, angle, len, angleInDegrees ); + } + else + { + const double *x = (const double*)ptrs[0], *y = (const double*)ptrs[1]; + double *angle = (double*)ptrs[2]; + for( k = 0; k < len; k++ ) + { + buf[0][k] = (float)x[k]; + buf[1][k] = (float)y[k]; + } + + FastAtan2_32f( buf[1], buf[0], buf[0], len, angleInDegrees ); + for( k = 0; k < len; k++ ) + angle[k] = buf[0][k]; + } + ptrs[0] += len*esz1; + ptrs[1] += len*esz1; + ptrs[2] += len*esz1; + } + } +} + + +void cartToPolar( InputArray src1, InputArray src2, + OutputArray dst1, OutputArray dst2, bool angleInDegrees ) +{ + Mat X = src1.getMat(), Y = src2.getMat(); + int type = X.type(), depth = X.depth(), cn = X.channels(); + CV_Assert( X.size == Y.size && type == Y.type() && (depth == CV_32F || depth == CV_64F)); + dst1.create( X.dims, X.size, type ); + dst2.create( X.dims, X.size, type ); + Mat Mag = dst1.getMat(), Angle = dst2.getMat(); + + const Mat* arrays[] = {&X, &Y, &Mag, &Angle, 0}; + uchar* ptrs[4]; + NAryMatIterator it(arrays, ptrs); + cv::AutoBuffer _buf; + float* buf[2] = {0, 0}; + int j, k, total = (int)(it.size*cn), blockSize = std::min(total, ((BLOCK_SIZE+cn-1)/cn)*cn); + size_t esz1 = X.elemSize1(); + + if( depth == CV_64F ) + { + _buf.allocate(blockSize*2); + buf[0] = _buf; + buf[1] = buf[0] + blockSize; + } + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( j = 0; j < total; j += blockSize ) + { + int len = std::min(total - j, blockSize); + if( depth == CV_32F ) + { + const float *x = (const float*)ptrs[0], *y = (const float*)ptrs[1]; + float *mag = (float*)ptrs[2], *angle = (float*)ptrs[3]; + Magnitude_32f( x, y, mag, len ); + FastAtan2_32f( y, x, angle, len, angleInDegrees ); + } + else + { + const double *x = (const double*)ptrs[0], *y = (const double*)ptrs[1]; + double *angle = (double*)ptrs[3]; + + Magnitude_64f(x, y, (double*)ptrs[2], len); + for( k = 0; k < len; k++ ) + { + buf[0][k] = (float)x[k]; + buf[1][k] = (float)y[k]; + } + + FastAtan2_32f( buf[1], buf[0], buf[0], len, angleInDegrees ); + for( k = 0; k < len; k++ ) + angle[k] = buf[0][k]; + } + ptrs[0] += len*esz1; + ptrs[1] += len*esz1; + ptrs[2] += len*esz1; + ptrs[3] += len*esz1; + } + } +} + + +/****************************************************************************************\ +* Polar -> Cartezian * +\****************************************************************************************/ + +static void SinCos_32f( const float *angle, float *sinval, float* cosval, + int len, int angle_in_degrees ) +{ + const int N = 64; + + static const double sin_table[] = + { + 0.00000000000000000000, 0.09801714032956060400, + 0.19509032201612825000, 0.29028467725446233000, + 0.38268343236508978000, 0.47139673682599764000, + 0.55557023301960218000, 0.63439328416364549000, + 0.70710678118654746000, 0.77301045336273699000, + 0.83146961230254524000, 0.88192126434835494000, + 0.92387953251128674000, 0.95694033573220894000, + 0.98078528040323043000, 0.99518472667219682000, + 1.00000000000000000000, 0.99518472667219693000, + 0.98078528040323043000, 0.95694033573220894000, + 0.92387953251128674000, 0.88192126434835505000, + 0.83146961230254546000, 0.77301045336273710000, + 0.70710678118654757000, 0.63439328416364549000, + 0.55557023301960218000, 0.47139673682599786000, + 0.38268343236508989000, 0.29028467725446239000, + 0.19509032201612861000, 0.09801714032956082600, + 0.00000000000000012246, -0.09801714032956059000, + -0.19509032201612836000, -0.29028467725446211000, + -0.38268343236508967000, -0.47139673682599764000, + -0.55557023301960196000, -0.63439328416364527000, + -0.70710678118654746000, -0.77301045336273666000, + -0.83146961230254524000, -0.88192126434835494000, + -0.92387953251128652000, -0.95694033573220882000, + -0.98078528040323032000, -0.99518472667219693000, + -1.00000000000000000000, -0.99518472667219693000, + -0.98078528040323043000, -0.95694033573220894000, + -0.92387953251128663000, -0.88192126434835505000, + -0.83146961230254546000, -0.77301045336273688000, + -0.70710678118654768000, -0.63439328416364593000, + -0.55557023301960218000, -0.47139673682599792000, + -0.38268343236509039000, -0.29028467725446250000, + -0.19509032201612872000, -0.09801714032956050600, + }; + + static const double k2 = (2*CV_PI)/N; + + static const double sin_a0 = -0.166630293345647*k2*k2*k2; + static const double sin_a2 = k2; + + static const double cos_a0 = -0.499818138450326*k2*k2; + /*static const double cos_a2 = 1;*/ + + double k1; + int i; + + if( !angle_in_degrees ) + k1 = N/(2*CV_PI); + else + k1 = N/360.; + + for( i = 0; i < len; i++ ) + { + double t = angle[i]*k1; + int it = cvRound(t); + t -= it; + int sin_idx = it & (N - 1); + int cos_idx = (N/4 - sin_idx) & (N - 1); + + double sin_b = (sin_a0*t*t + sin_a2)*t; + double cos_b = cos_a0*t*t + 1; + + double sin_a = sin_table[sin_idx]; + double cos_a = sin_table[cos_idx]; + + double sin_val = sin_a*cos_b + cos_a*sin_b; + double cos_val = cos_a*cos_b - sin_a*sin_b; + + sinval[i] = (float)sin_val; + cosval[i] = (float)cos_val; + } +} + + +void polarToCart( InputArray src1, InputArray src2, + OutputArray dst1, OutputArray dst2, bool angleInDegrees ) +{ + Mat Mag = src1.getMat(), Angle = src2.getMat(); + int type = Angle.type(), depth = Angle.depth(), cn = Angle.channels(); + CV_Assert( Mag.empty() || (Angle.size == Mag.size && type == Mag.type() && (depth == CV_32F || depth == CV_64F))); + dst1.create( Angle.dims, Angle.size, type ); + dst2.create( Angle.dims, Angle.size, type ); + Mat X = dst1.getMat(), Y = dst2.getMat(); + + const Mat* arrays[] = {&Mag, &Angle, &X, &Y, 0}; + uchar* ptrs[4]; + NAryMatIterator it(arrays, ptrs); + cv::AutoBuffer _buf; + float* buf[2] = {0, 0}; + int j, k, total = (int)(it.size*cn), blockSize = std::min(total, ((BLOCK_SIZE+cn-1)/cn)*cn); + size_t esz1 = Angle.elemSize1(); + + if( depth == CV_64F ) + { + _buf.allocate(blockSize*2); + buf[0] = _buf; + buf[1] = buf[0] + blockSize; + } + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( j = 0; j < total; j += blockSize ) + { + int len = std::min(total - j, blockSize); + if( depth == CV_32F ) + { + const float *mag = (const float*)ptrs[0], *angle = (const float*)ptrs[1]; + float *x = (float*)ptrs[2], *y = (float*)ptrs[3]; + + SinCos_32f( angle, y, x, len, angleInDegrees ); + if( mag ) + for( k = 0; k < len; k++ ) + { + float m = mag[k]; + x[k] *= m; y[k] *= m; + } + } + else + { + const double *mag = (const double*)ptrs[0], *angle = (const double*)ptrs[1]; + double *x = (double*)ptrs[2], *y = (double*)ptrs[3]; + + for( k = 0; k < len; k++ ) + buf[0][k] = (float)angle[k]; + + SinCos_32f( buf[0], buf[1], buf[0], len, angleInDegrees ); + if( mag ) + for( k = 0; k < len; k++ ) + { + double m = mag[k]; + x[k] = buf[0][k]*m; y[k] = buf[1][k]*m; + } + else + for( k = 0; k < len; k++ ) + { + x[k] = buf[0][k]; y[k] = buf[1][k]; + } + } + + if( ptrs[0] ) + ptrs[0] += len*esz1; + ptrs[1] += len*esz1; + ptrs[2] += len*esz1; + ptrs[3] += len*esz1; + } + } +} + +/****************************************************************************************\ +* E X P * +\****************************************************************************************/ + +typedef union +{ + struct { +#if ( defined( WORDS_BIGENDIAN ) && !defined( OPENCV_UNIVERSAL_BUILD ) ) || defined( __BIG_ENDIAN__ ) + int hi; + int lo; +#else + int lo; + int hi; +#endif + } i; + double d; +} +DBLINT; + +#ifndef HAVE_IPP + +#define EXPTAB_SCALE 6 +#define EXPTAB_MASK ((1 << EXPTAB_SCALE) - 1) + +#define EXPPOLY_32F_A0 .9670371139572337719125840413672004409288e-2 + +static const double expTab[] = { + 1.0 * EXPPOLY_32F_A0, + 1.0108892860517004600204097905619 * EXPPOLY_32F_A0, + 1.0218971486541166782344801347833 * EXPPOLY_32F_A0, + 1.0330248790212284225001082839705 * EXPPOLY_32F_A0, + 1.0442737824274138403219664787399 * EXPPOLY_32F_A0, + 1.0556451783605571588083413251529 * EXPPOLY_32F_A0, + 1.0671404006768236181695211209928 * EXPPOLY_32F_A0, + 1.0787607977571197937406800374385 * EXPPOLY_32F_A0, + 1.0905077326652576592070106557607 * EXPPOLY_32F_A0, + 1.1023825833078409435564142094256 * EXPPOLY_32F_A0, + 1.1143867425958925363088129569196 * EXPPOLY_32F_A0, + 1.126521618608241899794798643787 * EXPPOLY_32F_A0, + 1.1387886347566916537038302838415 * EXPPOLY_32F_A0, + 1.151189229952982705817759635202 * EXPPOLY_32F_A0, + 1.1637248587775775138135735990922 * EXPPOLY_32F_A0, + 1.1763969916502812762846457284838 * EXPPOLY_32F_A0, + 1.1892071150027210667174999705605 * EXPPOLY_32F_A0, + 1.2021567314527031420963969574978 * EXPPOLY_32F_A0, + 1.2152473599804688781165202513388 * EXPPOLY_32F_A0, + 1.2284805361068700056940089577928 * EXPPOLY_32F_A0, + 1.2418578120734840485936774687266 * EXPPOLY_32F_A0, + 1.2553807570246910895793906574423 * EXPPOLY_32F_A0, + 1.2690509571917332225544190810323 * EXPPOLY_32F_A0, + 1.2828700160787782807266697810215 * EXPPOLY_32F_A0, + 1.2968395546510096659337541177925 * EXPPOLY_32F_A0, + 1.3109612115247643419229917863308 * EXPPOLY_32F_A0, + 1.3252366431597412946295370954987 * EXPPOLY_32F_A0, + 1.3396675240533030053600306697244 * EXPPOLY_32F_A0, + 1.3542555469368927282980147401407 * EXPPOLY_32F_A0, + 1.3690024229745906119296011329822 * EXPPOLY_32F_A0, + 1.3839098819638319548726595272652 * EXPPOLY_32F_A0, + 1.3989796725383111402095281367152 * EXPPOLY_32F_A0, + 1.4142135623730950488016887242097 * EXPPOLY_32F_A0, + 1.4296133383919700112350657782751 * EXPPOLY_32F_A0, + 1.4451808069770466200370062414717 * EXPPOLY_32F_A0, + 1.4609177941806469886513028903106 * EXPPOLY_32F_A0, + 1.476826145939499311386907480374 * EXPPOLY_32F_A0, + 1.4929077282912648492006435314867 * EXPPOLY_32F_A0, + 1.5091644275934227397660195510332 * EXPPOLY_32F_A0, + 1.5255981507445383068512536895169 * EXPPOLY_32F_A0, + 1.5422108254079408236122918620907 * EXPPOLY_32F_A0, + 1.5590044002378369670337280894749 * EXPPOLY_32F_A0, + 1.5759808451078864864552701601819 * EXPPOLY_32F_A0, + 1.5931421513422668979372486431191 * EXPPOLY_32F_A0, + 1.6104903319492543081795206673574 * EXPPOLY_32F_A0, + 1.628027421857347766848218522014 * EXPPOLY_32F_A0, + 1.6457554781539648445187567247258 * EXPPOLY_32F_A0, + 1.6636765803267364350463364569764 * EXPPOLY_32F_A0, + 1.6817928305074290860622509524664 * EXPPOLY_32F_A0, + 1.7001063537185234695013625734975 * EXPPOLY_32F_A0, + 1.7186192981224779156293443764563 * EXPPOLY_32F_A0, + 1.7373338352737062489942020818722 * EXPPOLY_32F_A0, + 1.7562521603732994831121606193753 * EXPPOLY_32F_A0, + 1.7753764925265212525505592001993 * EXPPOLY_32F_A0, + 1.7947090750031071864277032421278 * EXPPOLY_32F_A0, + 1.8142521755003987562498346003623 * EXPPOLY_32F_A0, + 1.8340080864093424634870831895883 * EXPPOLY_32F_A0, + 1.8539791250833855683924530703377 * EXPPOLY_32F_A0, + 1.8741676341102999013299989499544 * EXPPOLY_32F_A0, + 1.8945759815869656413402186534269 * EXPPOLY_32F_A0, + 1.9152065613971472938726112702958 * EXPPOLY_32F_A0, + 1.9360617934922944505980559045667 * EXPPOLY_32F_A0, + 1.9571441241754002690183222516269 * EXPPOLY_32F_A0, + 1.9784560263879509682582499181312 * EXPPOLY_32F_A0, +}; + + +// the code below uses _mm_cast* intrinsics, which are not avialable on VS2005 +#if (defined _MSC_VER && _MSC_VER < 1500) || \ + (!defined __APPLE__ && defined __GNUC__ && __GNUC__*100 + __GNUC_MINOR__ < 402) +#undef CV_SSE2 +#define CV_SSE2 0 +#endif + +static const double exp_prescale = 1.4426950408889634073599246810019 * (1 << EXPTAB_SCALE); +static const double exp_postscale = 1./(1 << EXPTAB_SCALE); +static const double exp_max_val = 3000.*(1 << EXPTAB_SCALE); // log10(DBL_MAX) < 3000 + +static void Exp_32f( const float *_x, float *y, int n ) +{ + static const float + A4 = (float)(1.000000000000002438532970795181890933776 / EXPPOLY_32F_A0), + A3 = (float)(.6931471805521448196800669615864773144641 / EXPPOLY_32F_A0), + A2 = (float)(.2402265109513301490103372422686535526573 / EXPPOLY_32F_A0), + A1 = (float)(.5550339366753125211915322047004666939128e-1 / EXPPOLY_32F_A0); + +#undef EXPPOLY +#define EXPPOLY(x) \ + (((((x) + A1)*(x) + A2)*(x) + A3)*(x) + A4) + + int i = 0; + const Cv32suf* x = (const Cv32suf*)_x; + Cv32suf buf[4]; + +#if CV_SSE2 + if( n >= 8 && USE_SSE2 ) + { + static const __m128d prescale2 = _mm_set1_pd(exp_prescale); + static const __m128 postscale4 = _mm_set1_ps((float)exp_postscale); + static const __m128 maxval4 = _mm_set1_ps((float)(exp_max_val/exp_prescale)); + static const __m128 minval4 = _mm_set1_ps((float)(-exp_max_val/exp_prescale)); + + static const __m128 mA1 = _mm_set1_ps(A1); + static const __m128 mA2 = _mm_set1_ps(A2); + static const __m128 mA3 = _mm_set1_ps(A3); + static const __m128 mA4 = _mm_set1_ps(A4); + bool y_aligned = (size_t)(void*)y % 16 == 0; + + ushort CV_DECL_ALIGNED(16) tab_idx[8]; + + for( ; i <= n - 8; i += 8 ) + { + __m128 xf0, xf1; + xf0 = _mm_loadu_ps(&x[i].f); + xf1 = _mm_loadu_ps(&x[i+4].f); + __m128i xi0, xi1, xi2, xi3; + + xf0 = _mm_min_ps(_mm_max_ps(xf0, minval4), maxval4); + xf1 = _mm_min_ps(_mm_max_ps(xf1, minval4), maxval4); + + __m128d xd0 = _mm_cvtps_pd(xf0); + __m128d xd2 = _mm_cvtps_pd(_mm_movehl_ps(xf0, xf0)); + __m128d xd1 = _mm_cvtps_pd(xf1); + __m128d xd3 = _mm_cvtps_pd(_mm_movehl_ps(xf1, xf1)); + + xd0 = _mm_mul_pd(xd0, prescale2); + xd2 = _mm_mul_pd(xd2, prescale2); + xd1 = _mm_mul_pd(xd1, prescale2); + xd3 = _mm_mul_pd(xd3, prescale2); + + xi0 = _mm_cvtpd_epi32(xd0); + xi2 = _mm_cvtpd_epi32(xd2); + + xi1 = _mm_cvtpd_epi32(xd1); + xi3 = _mm_cvtpd_epi32(xd3); + + xd0 = _mm_sub_pd(xd0, _mm_cvtepi32_pd(xi0)); + xd2 = _mm_sub_pd(xd2, _mm_cvtepi32_pd(xi2)); + xd1 = _mm_sub_pd(xd1, _mm_cvtepi32_pd(xi1)); + xd3 = _mm_sub_pd(xd3, _mm_cvtepi32_pd(xi3)); + + xf0 = _mm_movelh_ps(_mm_cvtpd_ps(xd0), _mm_cvtpd_ps(xd2)); + xf1 = _mm_movelh_ps(_mm_cvtpd_ps(xd1), _mm_cvtpd_ps(xd3)); + + xf0 = _mm_mul_ps(xf0, postscale4); + xf1 = _mm_mul_ps(xf1, postscale4); + + xi0 = _mm_unpacklo_epi64(xi0, xi2); + xi1 = _mm_unpacklo_epi64(xi1, xi3); + xi0 = _mm_packs_epi32(xi0, xi1); + + _mm_store_si128((__m128i*)tab_idx, _mm_and_si128(xi0, _mm_set1_epi16(EXPTAB_MASK))); + + xi0 = _mm_add_epi16(_mm_srai_epi16(xi0, EXPTAB_SCALE), _mm_set1_epi16(127)); + xi0 = _mm_max_epi16(xi0, _mm_setzero_si128()); + xi0 = _mm_min_epi16(xi0, _mm_set1_epi16(255)); + xi1 = _mm_unpackhi_epi16(xi0, _mm_setzero_si128()); + xi0 = _mm_unpacklo_epi16(xi0, _mm_setzero_si128()); + + __m128d yd0 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[0]), _mm_load_sd(expTab + tab_idx[1])); + __m128d yd1 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[2]), _mm_load_sd(expTab + tab_idx[3])); + __m128d yd2 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[4]), _mm_load_sd(expTab + tab_idx[5])); + __m128d yd3 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[6]), _mm_load_sd(expTab + tab_idx[7])); + + __m128 yf0 = _mm_movelh_ps(_mm_cvtpd_ps(yd0), _mm_cvtpd_ps(yd1)); + __m128 yf1 = _mm_movelh_ps(_mm_cvtpd_ps(yd2), _mm_cvtpd_ps(yd3)); + + yf0 = _mm_mul_ps(yf0, _mm_castsi128_ps(_mm_slli_epi32(xi0, 23))); + yf1 = _mm_mul_ps(yf1, _mm_castsi128_ps(_mm_slli_epi32(xi1, 23))); + + __m128 zf0 = _mm_add_ps(xf0, mA1); + __m128 zf1 = _mm_add_ps(xf1, mA1); + + zf0 = _mm_add_ps(_mm_mul_ps(zf0, xf0), mA2); + zf1 = _mm_add_ps(_mm_mul_ps(zf1, xf1), mA2); + + zf0 = _mm_add_ps(_mm_mul_ps(zf0, xf0), mA3); + zf1 = _mm_add_ps(_mm_mul_ps(zf1, xf1), mA3); + + zf0 = _mm_add_ps(_mm_mul_ps(zf0, xf0), mA4); + zf1 = _mm_add_ps(_mm_mul_ps(zf1, xf1), mA4); + + zf0 = _mm_mul_ps(zf0, yf0); + zf1 = _mm_mul_ps(zf1, yf1); + + if( y_aligned ) + { + _mm_store_ps(y + i, zf0); + _mm_store_ps(y + i + 4, zf1); + } + else + { + _mm_storeu_ps(y + i, zf0); + _mm_storeu_ps(y + i + 4, zf1); + } + } + } + else +#endif + for( ; i <= n - 4; i += 4 ) + { + double x0 = x[i].f * exp_prescale; + double x1 = x[i + 1].f * exp_prescale; + double x2 = x[i + 2].f * exp_prescale; + double x3 = x[i + 3].f * exp_prescale; + int val0, val1, val2, val3, t; + + if( ((x[i].i >> 23) & 255) > 127 + 10 ) + x0 = x[i].i < 0 ? -exp_max_val : exp_max_val; + + if( ((x[i+1].i >> 23) & 255) > 127 + 10 ) + x1 = x[i+1].i < 0 ? -exp_max_val : exp_max_val; + + if( ((x[i+2].i >> 23) & 255) > 127 + 10 ) + x2 = x[i+2].i < 0 ? -exp_max_val : exp_max_val; + + if( ((x[i+3].i >> 23) & 255) > 127 + 10 ) + x3 = x[i+3].i < 0 ? -exp_max_val : exp_max_val; + + val0 = cvRound(x0); + val1 = cvRound(x1); + val2 = cvRound(x2); + val3 = cvRound(x3); + + x0 = (x0 - val0)*exp_postscale; + x1 = (x1 - val1)*exp_postscale; + x2 = (x2 - val2)*exp_postscale; + x3 = (x3 - val3)*exp_postscale; + + t = (val0 >> EXPTAB_SCALE) + 127; + t = !(t & ~255) ? t : t < 0 ? 0 : 255; + buf[0].i = t << 23; + + t = (val1 >> EXPTAB_SCALE) + 127; + t = !(t & ~255) ? t : t < 0 ? 0 : 255; + buf[1].i = t << 23; + + t = (val2 >> EXPTAB_SCALE) + 127; + t = !(t & ~255) ? t : t < 0 ? 0 : 255; + buf[2].i = t << 23; + + t = (val3 >> EXPTAB_SCALE) + 127; + t = !(t & ~255) ? t : t < 0 ? 0 : 255; + buf[3].i = t << 23; + + x0 = buf[0].f * expTab[val0 & EXPTAB_MASK] * EXPPOLY( x0 ); + x1 = buf[1].f * expTab[val1 & EXPTAB_MASK] * EXPPOLY( x1 ); + + y[i] = (float)x0; + y[i + 1] = (float)x1; + + x2 = buf[2].f * expTab[val2 & EXPTAB_MASK] * EXPPOLY( x2 ); + x3 = buf[3].f * expTab[val3 & EXPTAB_MASK] * EXPPOLY( x3 ); + + y[i + 2] = (float)x2; + y[i + 3] = (float)x3; + } + + for( ; i < n; i++ ) + { + double x0 = x[i].f * exp_prescale; + int val0, t; + + if( ((x[i].i >> 23) & 255) > 127 + 10 ) + x0 = x[i].i < 0 ? -exp_max_val : exp_max_val; + + val0 = cvRound(x0); + t = (val0 >> EXPTAB_SCALE) + 127; + t = !(t & ~255) ? t : t < 0 ? 0 : 255; + + buf[0].i = t << 23; + x0 = (x0 - val0)*exp_postscale; + + y[i] = (float)(buf[0].f * expTab[val0 & EXPTAB_MASK] * EXPPOLY(x0)); + } +} + + +static void Exp_64f( const double *_x, double *y, int n ) +{ + static const double + A5 = .99999999999999999998285227504999 / EXPPOLY_32F_A0, + A4 = .69314718055994546743029643825322 / EXPPOLY_32F_A0, + A3 = .24022650695886477918181338054308 / EXPPOLY_32F_A0, + A2 = .55504108793649567998466049042729e-1 / EXPPOLY_32F_A0, + A1 = .96180973140732918010002372686186e-2 / EXPPOLY_32F_A0, + A0 = .13369713757180123244806654839424e-2 / EXPPOLY_32F_A0; + +#undef EXPPOLY +#define EXPPOLY(x) (((((A0*(x) + A1)*(x) + A2)*(x) + A3)*(x) + A4)*(x) + A5) + + int i = 0; + Cv64suf buf[4]; + const Cv64suf* x = (const Cv64suf*)_x; + +#if CV_SSE2 + if( USE_SSE2 ) + { + static const __m128d prescale2 = _mm_set1_pd(exp_prescale); + static const __m128d postscale2 = _mm_set1_pd(exp_postscale); + static const __m128d maxval2 = _mm_set1_pd(exp_max_val); + static const __m128d minval2 = _mm_set1_pd(-exp_max_val); + + static const __m128d mA0 = _mm_set1_pd(A0); + static const __m128d mA1 = _mm_set1_pd(A1); + static const __m128d mA2 = _mm_set1_pd(A2); + static const __m128d mA3 = _mm_set1_pd(A3); + static const __m128d mA4 = _mm_set1_pd(A4); + static const __m128d mA5 = _mm_set1_pd(A5); + + int CV_DECL_ALIGNED(16) tab_idx[4]; + + for( ; i <= n - 4; i += 4 ) + { + __m128d xf0 = _mm_loadu_pd(&x[i].f), xf1 = _mm_loadu_pd(&x[i+2].f); + __m128i xi0, xi1; + xf0 = _mm_min_pd(_mm_max_pd(xf0, minval2), maxval2); + xf1 = _mm_min_pd(_mm_max_pd(xf1, minval2), maxval2); + xf0 = _mm_mul_pd(xf0, prescale2); + xf1 = _mm_mul_pd(xf1, prescale2); + + xi0 = _mm_cvtpd_epi32(xf0); + xi1 = _mm_cvtpd_epi32(xf1); + xf0 = _mm_mul_pd(_mm_sub_pd(xf0, _mm_cvtepi32_pd(xi0)), postscale2); + xf1 = _mm_mul_pd(_mm_sub_pd(xf1, _mm_cvtepi32_pd(xi1)), postscale2); + + xi0 = _mm_unpacklo_epi64(xi0, xi1); + _mm_store_si128((__m128i*)tab_idx, _mm_and_si128(xi0, _mm_set1_epi32(EXPTAB_MASK))); + + xi0 = _mm_add_epi32(_mm_srai_epi32(xi0, EXPTAB_SCALE), _mm_set1_epi32(1023)); + xi0 = _mm_packs_epi32(xi0, xi0); + xi0 = _mm_max_epi16(xi0, _mm_setzero_si128()); + xi0 = _mm_min_epi16(xi0, _mm_set1_epi16(2047)); + xi0 = _mm_unpacklo_epi16(xi0, _mm_setzero_si128()); + xi1 = _mm_unpackhi_epi32(xi0, _mm_setzero_si128()); + xi0 = _mm_unpacklo_epi32(xi0, _mm_setzero_si128()); + + __m128d yf0 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[0]), _mm_load_sd(expTab + tab_idx[1])); + __m128d yf1 = _mm_unpacklo_pd(_mm_load_sd(expTab + tab_idx[2]), _mm_load_sd(expTab + tab_idx[3])); + yf0 = _mm_mul_pd(yf0, _mm_castsi128_pd(_mm_slli_epi64(xi0, 52))); + yf1 = _mm_mul_pd(yf1, _mm_castsi128_pd(_mm_slli_epi64(xi1, 52))); + + __m128d zf0 = _mm_add_pd(_mm_mul_pd(mA0, xf0), mA1); + __m128d zf1 = _mm_add_pd(_mm_mul_pd(mA0, xf1), mA1); + + zf0 = _mm_add_pd(_mm_mul_pd(zf0, xf0), mA2); + zf1 = _mm_add_pd(_mm_mul_pd(zf1, xf1), mA2); + + zf0 = _mm_add_pd(_mm_mul_pd(zf0, xf0), mA3); + zf1 = _mm_add_pd(_mm_mul_pd(zf1, xf1), mA3); + + zf0 = _mm_add_pd(_mm_mul_pd(zf0, xf0), mA4); + zf1 = _mm_add_pd(_mm_mul_pd(zf1, xf1), mA4); + + zf0 = _mm_add_pd(_mm_mul_pd(zf0, xf0), mA5); + zf1 = _mm_add_pd(_mm_mul_pd(zf1, xf1), mA5); + + zf0 = _mm_mul_pd(zf0, yf0); + zf1 = _mm_mul_pd(zf1, yf1); + + _mm_storeu_pd(y + i, zf0); + _mm_storeu_pd(y + i + 2, zf1); + } + } + else +#endif + for( ; i <= n - 4; i += 4 ) + { + double x0 = x[i].f * exp_prescale; + double x1 = x[i + 1].f * exp_prescale; + double x2 = x[i + 2].f * exp_prescale; + double x3 = x[i + 3].f * exp_prescale; + + double y0, y1, y2, y3; + int val0, val1, val2, val3, t; + + t = (int)(x[i].i >> 52); + if( (t & 2047) > 1023 + 10 ) + x0 = t < 0 ? -exp_max_val : exp_max_val; + + t = (int)(x[i+1].i >> 52); + if( (t & 2047) > 1023 + 10 ) + x1 = t < 0 ? -exp_max_val : exp_max_val; + + t = (int)(x[i+2].i >> 52); + if( (t & 2047) > 1023 + 10 ) + x2 = t < 0 ? -exp_max_val : exp_max_val; + + t = (int)(x[i+3].i >> 52); + if( (t & 2047) > 1023 + 10 ) + x3 = t < 0 ? -exp_max_val : exp_max_val; + + val0 = cvRound(x0); + val1 = cvRound(x1); + val2 = cvRound(x2); + val3 = cvRound(x3); + + x0 = (x0 - val0)*exp_postscale; + x1 = (x1 - val1)*exp_postscale; + x2 = (x2 - val2)*exp_postscale; + x3 = (x3 - val3)*exp_postscale; + + t = (val0 >> EXPTAB_SCALE) + 1023; + t = !(t & ~2047) ? t : t < 0 ? 0 : 2047; + buf[0].i = (int64)t << 52; + + t = (val1 >> EXPTAB_SCALE) + 1023; + t = !(t & ~2047) ? t : t < 0 ? 0 : 2047; + buf[1].i = (int64)t << 52; + + t = (val2 >> EXPTAB_SCALE) + 1023; + t = !(t & ~2047) ? t : t < 0 ? 0 : 2047; + buf[2].i = (int64)t << 52; + + t = (val3 >> EXPTAB_SCALE) + 1023; + t = !(t & ~2047) ? t : t < 0 ? 0 : 2047; + buf[3].i = (int64)t << 52; + + y0 = buf[0].f * expTab[val0 & EXPTAB_MASK] * EXPPOLY( x0 ); + y1 = buf[1].f * expTab[val1 & EXPTAB_MASK] * EXPPOLY( x1 ); + + y[i] = y0; + y[i + 1] = y1; + + y2 = buf[2].f * expTab[val2 & EXPTAB_MASK] * EXPPOLY( x2 ); + y3 = buf[3].f * expTab[val3 & EXPTAB_MASK] * EXPPOLY( x3 ); + + y[i + 2] = y2; + y[i + 3] = y3; + } + + for( ; i < n; i++ ) + { + double x0 = x[i].f * exp_prescale; + int val0, t; + + t = (int)(x[i].i >> 52); + if( (t & 2047) > 1023 + 10 ) + x0 = t < 0 ? -exp_max_val : exp_max_val; + + val0 = cvRound(x0); + t = (val0 >> EXPTAB_SCALE) + 1023; + t = !(t & ~2047) ? t : t < 0 ? 0 : 2047; + + buf[0].i = (int64)t << 52; + x0 = (x0 - val0)*exp_postscale; + + y[i] = buf[0].f * expTab[val0 & EXPTAB_MASK] * EXPPOLY( x0 ); + } +} + +#undef EXPTAB_SCALE +#undef EXPTAB_MASK +#undef EXPPOLY_32F_A0 + +#else + +#define Exp_32f ippsExp_32f_A21 +#define Exp_64f ippsExp_64f_A50 + +#endif + +void exp( InputArray _src, OutputArray _dst ) +{ + Mat src = _src.getMat(); + int type = src.type(), depth = src.depth(), cn = src.channels(); + + _dst.create( src.dims, src.size, type ); + Mat dst = _dst.getMat(); + + CV_Assert( depth == CV_32F || depth == CV_64F ); + + const Mat* arrays[] = {&src, &dst, 0}; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs); + int len = (int)(it.size*cn); + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + if( depth == CV_32F ) + Exp_32f( (const float*)ptrs[0], (float*)ptrs[1], len ); + else + Exp_64f( (const double*)ptrs[0], (double*)ptrs[1], len ); + } +} + + +/****************************************************************************************\ +* L O G * +\****************************************************************************************/ + +#ifndef HAVE_IPP + +#define LOGTAB_SCALE 8 +#define LOGTAB_MASK ((1 << LOGTAB_SCALE) - 1) +#define LOGTAB_MASK2 ((1 << (20 - LOGTAB_SCALE)) - 1) +#define LOGTAB_MASK2_32F ((1 << (23 - LOGTAB_SCALE)) - 1) + +static const double CV_DECL_ALIGNED(16) icvLogTab[] = { +0.0000000000000000000000000000000000000000, 1.000000000000000000000000000000000000000, +.00389864041565732288852075271279318258166, .9961089494163424124513618677042801556420, +.00778214044205494809292034119607706088573, .9922480620155038759689922480620155038760, +.01165061721997527263705585198749759001657, .9884169884169884169884169884169884169884, +.01550418653596525274396267235488267033361, .9846153846153846153846153846153846153846, +.01934296284313093139406447562578250654042, .9808429118773946360153256704980842911877, +.02316705928153437593630670221500622574241, .9770992366412213740458015267175572519084, +.02697658769820207233514075539915211265906, .9733840304182509505703422053231939163498, +.03077165866675368732785500469617545604706, .9696969696969696969696969696969696969697, +.03455238150665972812758397481047722976656, .9660377358490566037735849056603773584906, +.03831886430213659461285757856785494368522, .9624060150375939849624060150375939849624, +.04207121392068705056921373852674150839447, .9588014981273408239700374531835205992509, +.04580953603129420126371940114040626212953, .9552238805970149253731343283582089552239, +.04953393512227662748292900118940451648088, .9516728624535315985130111524163568773234, +.05324451451881227759255210685296333394944, .9481481481481481481481481481481481481481, +.05694137640013842427411105973078520037234, .9446494464944649446494464944649446494465, +.06062462181643483993820353816772694699466, .9411764705882352941176470588235294117647, +.06429435070539725460836422143984236754475, .9377289377289377289377289377289377289377, +.06795066190850773679699159401934593915938, .9343065693430656934306569343065693430657, +.07159365318700880442825962290953611955044, .9309090909090909090909090909090909090909, +.07522342123758751775142172846244648098944, .9275362318840579710144927536231884057971, +.07884006170777602129362549021607264876369, .9241877256317689530685920577617328519856, +.08244366921107458556772229485432035289706, .9208633093525179856115107913669064748201, +.08603433734180314373940490213499288074675, .9175627240143369175627240143369175627240, +.08961215868968712416897659522874164395031, .9142857142857142857142857142857142857143, +.09317722485418328259854092721070628613231, .9110320284697508896797153024911032028470, +.09672962645855109897752299730200320482256, .9078014184397163120567375886524822695035, +.10026945316367513738597949668474029749630, .9045936395759717314487632508833922261484, +.10379679368164355934833764649738441221420, .9014084507042253521126760563380281690141, +.10731173578908805021914218968959175981580, .8982456140350877192982456140350877192982, +.11081436634029011301105782649756292812530, .8951048951048951048951048951048951048951, +.11430477128005862852422325204315711744130, .8919860627177700348432055749128919860627, +.11778303565638344185817487641543266363440, .8888888888888888888888888888888888888889, +.12124924363286967987640707633545389398930, .8858131487889273356401384083044982698962, +.12470347850095722663787967121606925502420, .8827586206896551724137931034482758620690, +.12814582269193003360996385708858724683530, .8797250859106529209621993127147766323024, +.13157635778871926146571524895989568904040, .8767123287671232876712328767123287671233, +.13499516453750481925766280255629681050780, .8737201365187713310580204778156996587031, +.13840232285911913123754857224412262439730, .8707482993197278911564625850340136054422, +.14179791186025733629172407290752744302150, .8677966101694915254237288135593220338983, +.14518200984449788903951628071808954700830, .8648648648648648648648648648648648648649, +.14855469432313711530824207329715136438610, .8619528619528619528619528619528619528620, +.15191604202584196858794030049466527998450, .8590604026845637583892617449664429530201, +.15526612891112392955683674244937719777230, .8561872909698996655518394648829431438127, +.15860503017663857283636730244325008243330, .8533333333333333333333333333333333333333, +.16193282026931324346641360989451641216880, .8504983388704318936877076411960132890365, +.16524957289530714521497145597095368430010, .8476821192052980132450331125827814569536, +.16855536102980664403538924034364754334090, .8448844884488448844884488448844884488449, +.17185025692665920060697715143760433420540, .8421052631578947368421052631578947368421, +.17513433212784912385018287750426679849630, .8393442622950819672131147540983606557377, +.17840765747281828179637841458315961062910, .8366013071895424836601307189542483660131, +.18167030310763465639212199675966985523700, .8338762214983713355048859934853420195440, +.18492233849401198964024217730184318497780, .8311688311688311688311688311688311688312, +.18816383241818296356839823602058459073300, .8284789644012944983818770226537216828479, +.19139485299962943898322009772527962923050, .8258064516129032258064516129032258064516, +.19461546769967164038916962454095482826240, .8231511254019292604501607717041800643087, +.19782574332991986754137769821682013571260, .8205128205128205128205128205128205128205, +.20102574606059073203390141770796617493040, .8178913738019169329073482428115015974441, +.20421554142869088876999228432396193966280, .8152866242038216560509554140127388535032, +.20739519434607056602715147164417430758480, .8126984126984126984126984126984126984127, +.21056476910734961416338251183333341032260, .8101265822784810126582278481012658227848, +.21372432939771812687723695489694364368910, .8075709779179810725552050473186119873817, +.21687393830061435506806333251006435602900, .8050314465408805031446540880503144654088, +.22001365830528207823135744547471404075630, .8025078369905956112852664576802507836991, +.22314355131420973710199007200571941211830, .8000000000000000000000000000000000000000, +.22626367865045338145790765338460914790630, .7975077881619937694704049844236760124611, +.22937410106484582006380890106811420992010, .7950310559006211180124223602484472049689, +.23247487874309405442296849741978803649550, .7925696594427244582043343653250773993808, +.23556607131276688371634975283086532726890, .7901234567901234567901234567901234567901, +.23864773785017498464178231643018079921600, .7876923076923076923076923076923076923077, +.24171993688714515924331749374687206000090, .7852760736196319018404907975460122699387, +.24478272641769091566565919038112042471760, .7828746177370030581039755351681957186544, +.24783616390458124145723672882013488560910, .7804878048780487804878048780487804878049, +.25088030628580937353433455427875742316250, .7781155015197568389057750759878419452888, +.25391520998096339667426946107298135757450, .7757575757575757575757575757575757575758, +.25694093089750041913887912414793390780680, .7734138972809667673716012084592145015106, +.25995752443692604627401010475296061486000, .7710843373493975903614457831325301204819, +.26296504550088134477547896494797896593800, .7687687687687687687687687687687687687688, +.26596354849713793599974565040611196309330, .7664670658682634730538922155688622754491, +.26895308734550393836570947314612567424780, .7641791044776119402985074626865671641791, +.27193371548364175804834985683555714786050, .7619047619047619047619047619047619047619, +.27490548587279922676529508862586226314300, .7596439169139465875370919881305637982196, +.27786845100345625159121709657483734190480, .7573964497041420118343195266272189349112, +.28082266290088775395616949026589281857030, .7551622418879056047197640117994100294985, +.28376817313064456316240580235898960381750, .7529411764705882352941176470588235294118, +.28670503280395426282112225635501090437180, .7507331378299120234604105571847507331378, +.28963329258304265634293983566749375313530, .7485380116959064327485380116959064327485, +.29255300268637740579436012922087684273730, .7463556851311953352769679300291545189504, +.29546421289383584252163927885703742504130, .7441860465116279069767441860465116279070, +.29836697255179722709783618483925238251680, .7420289855072463768115942028985507246377, +.30126133057816173455023545102449133992200, .7398843930635838150289017341040462427746, +.30414733546729666446850615102448500692850, .7377521613832853025936599423631123919308, +.30702503529491181888388950937951449304830, .7356321839080459770114942528735632183908, +.30989447772286465854207904158101882785550, .7335243553008595988538681948424068767908, +.31275571000389684739317885942000430077330, .7314285714285714285714285714285714285714, +.31560877898630329552176476681779604405180, .7293447293447293447293447293447293447293, +.31845373111853458869546784626436419785030, .7272727272727272727272727272727272727273, +.32129061245373424782201254856772720813750, .7252124645892351274787535410764872521246, +.32411946865421192853773391107097268104550, .7231638418079096045197740112994350282486, +.32694034499585328257253991068864706903700, .7211267605633802816901408450704225352113, +.32975328637246797969240219572384376078850, .7191011235955056179775280898876404494382, +.33255833730007655635318997155991382896900, .7170868347338935574229691876750700280112, +.33535554192113781191153520921943709254280, .7150837988826815642458100558659217877095, +.33814494400871636381467055798566434532400, .7130919220055710306406685236768802228412, +.34092658697059319283795275623560883104800, .7111111111111111111111111111111111111111, +.34370051385331840121395430287520866841080, .7091412742382271468144044321329639889197, +.34646676734620857063262633346312213689100, .7071823204419889502762430939226519337017, +.34922538978528827602332285096053965389730, .7052341597796143250688705234159779614325, +.35197642315717814209818925519357435405250, .7032967032967032967032967032967032967033, +.35471990910292899856770532096561510115850, .7013698630136986301369863013698630136986, +.35745588892180374385176833129662554711100, .6994535519125683060109289617486338797814, +.36018440357500774995358483465679455548530, .6975476839237057220708446866485013623978, +.36290549368936841911903457003063522279280, .6956521739130434782608695652173913043478, +.36561919956096466943762379742111079394830, .6937669376693766937669376693766937669377, +.36832556115870762614150635272380895912650, .6918918918918918918918918918918918918919, +.37102461812787262962487488948681857436900, .6900269541778975741239892183288409703504, +.37371640979358405898480555151763837784530, .6881720430107526881720430107526881720430, +.37640097516425302659470730759494472295050, .6863270777479892761394101876675603217158, +.37907835293496944251145919224654790014030, .6844919786096256684491978609625668449198, +.38174858149084833769393299007788300514230, .6826666666666666666666666666666666666667, +.38441169891033200034513583887019194662580, .6808510638297872340425531914893617021277, +.38706774296844825844488013899535872042180, .6790450928381962864721485411140583554377, +.38971675114002518602873692543653305619950, .6772486772486772486772486772486772486772, +.39235876060286384303665840889152605086580, .6754617414248021108179419525065963060686, +.39499380824086893770896722344332374632350, .6736842105263157894736842105263157894737, +.39762193064713846624158577469643205404280, .6719160104986876640419947506561679790026, +.40024316412701266276741307592601515352730, .6701570680628272251308900523560209424084, +.40285754470108348090917615991202183067800, .6684073107049608355091383812010443864230, +.40546510810816432934799991016916465014230, .6666666666666666666666666666666666666667, +.40806588980822172674223224930756259709600, .6649350649350649350649350649350649350649, +.41065992498526837639616360320360399782650, .6632124352331606217616580310880829015544, +.41324724855021932601317757871584035456180, .6614987080103359173126614987080103359173, +.41582789514371093497757669865677598863850, .6597938144329896907216494845360824742268, +.41840189913888381489925905043492093682300, .6580976863753213367609254498714652956298, +.42096929464412963239894338585145305842150, .6564102564102564102564102564102564102564, +.42353011550580327293502591601281892508280, .6547314578005115089514066496163682864450, +.42608439531090003260516141381231136620050, .6530612244897959183673469387755102040816, +.42863216738969872610098832410585600882780, .6513994910941475826972010178117048346056, +.43117346481837132143866142541810404509300, .6497461928934010152284263959390862944162, +.43370832042155937902094819946796633303180, .6481012658227848101265822784810126582278, +.43623676677491801667585491486534010618930, .6464646464646464646464646464646464646465, +.43875883620762790027214350629947148263450, .6448362720403022670025188916876574307305, +.44127456080487520440058801796112675219780, .6432160804020100502512562814070351758794, +.44378397241030093089975139264424797147500, .6416040100250626566416040100250626566416, +.44628710262841947420398014401143882423650, .6400000000000000000000000000000000000000, +.44878398282700665555822183705458883196130, .6384039900249376558603491271820448877805, +.45127464413945855836729492693848442286250, .6368159203980099502487562189054726368159, +.45375911746712049854579618113348260521900, .6352357320099255583126550868486352357320, +.45623743348158757315857769754074979573500, .6336633663366336633663366336633663366337, +.45870962262697662081833982483658473938700, .6320987654320987654320987654320987654321, +.46117571512217014895185229761409573256980, .6305418719211822660098522167487684729064, +.46363574096303250549055974261136725544930, .6289926289926289926289926289926289926290, +.46608972992459918316399125615134835243230, .6274509803921568627450980392156862745098, +.46853771156323925639597405279346276074650, .6259168704156479217603911980440097799511, +.47097971521879100631480241645476780831830, .6243902439024390243902439024390243902439, +.47341577001667212165614273544633761048330, .6228710462287104622871046228710462287105, +.47584590486996386493601107758877333253630, .6213592233009708737864077669902912621359, +.47827014848147025860569669930555392056700, .6198547215496368038740920096852300242131, +.48068852934575190261057286988943815231330, .6183574879227053140096618357487922705314, +.48310107575113581113157579238759353756900, .6168674698795180722891566265060240963855, +.48550781578170076890899053978500887751580, .6153846153846153846153846153846153846154, +.48790877731923892879351001283794175833480, .6139088729016786570743405275779376498801, +.49030398804519381705802061333088204264650, .6124401913875598086124401913875598086124, +.49269347544257524607047571407747454941280, .6109785202863961813842482100238663484487, +.49507726679785146739476431321236304938800, .6095238095238095238095238095238095238095, +.49745538920281889838648226032091770321130, .6080760095011876484560570071258907363420, +.49982786955644931126130359189119189977650, .6066350710900473933649289099526066350711, +.50219473456671548383667413872899487614650, .6052009456264775413711583924349881796690, +.50455601075239520092452494282042607665050, .6037735849056603773584905660377358490566, +.50691172444485432801997148999362252652650, .6023529411764705882352941176470588235294, +.50926190178980790257412536448100581765150, .6009389671361502347417840375586854460094, +.51160656874906207391973111953120678663250, .5995316159250585480093676814988290398126, +.51394575110223428282552049495279788970950, .5981308411214953271028037383177570093458, +.51627947444845445623684554448118433356300, .5967365967365967365967365967365967365967, +.51860776420804555186805373523384332656850, .5953488372093023255813953488372093023256, +.52093064562418522900344441950437612831600, .5939675174013921113689095127610208816705, +.52324814376454775732838697877014055848100, .5925925925925925925925925925925925925926, +.52556028352292727401362526507000438869000, .5912240184757505773672055427251732101617, +.52786708962084227803046587723656557500350, .5898617511520737327188940092165898617512, +.53016858660912158374145519701414741575700, .5885057471264367816091954022988505747126, +.53246479886947173376654518506256863474850, .5871559633027522935779816513761467889908, +.53475575061602764748158733709715306758900, .5858123569794050343249427917620137299771, +.53704146589688361856929077475797384977350, .5844748858447488584474885844748858447489, +.53932196859560876944783558428753167390800, .5831435079726651480637813211845102505695, +.54159728243274429804188230264117009937750, .5818181818181818181818181818181818181818, +.54386743096728351609669971367111429572100, .5804988662131519274376417233560090702948, +.54613243759813556721383065450936555862450, .5791855203619909502262443438914027149321, +.54839232556557315767520321969641372561450, .5778781038374717832957110609480812641084, +.55064711795266219063194057525834068655950, .5765765765765765765765765765765765765766, +.55289683768667763352766542084282264113450, .5752808988764044943820224719101123595506, +.55514150754050151093110798683483153581600, .5739910313901345291479820627802690582960, +.55738115013400635344709144192165695130850, .5727069351230425055928411633109619686801, +.55961578793542265941596269840374588966350, .5714285714285714285714285714285714285714, +.56184544326269181269140062795486301183700, .5701559020044543429844097995545657015590, +.56407013828480290218436721261241473257550, .5688888888888888888888888888888888888889, +.56628989502311577464155334382667206227800, .5676274944567627494456762749445676274945, +.56850473535266865532378233183408156037350, .5663716814159292035398230088495575221239, +.57071468100347144680739575051120482385150, .5651214128035320088300220750551876379691, +.57291975356178548306473885531886480748650, .5638766519823788546255506607929515418502, +.57511997447138785144460371157038025558000, .5626373626373626373626373626373626373626, +.57731536503482350219940144597785547375700, .5614035087719298245614035087719298245614, +.57950594641464214795689713355386629700650, .5601750547045951859956236323851203501094, +.58169173963462239562716149521293118596100, .5589519650655021834061135371179039301310, +.58387276558098266665552955601015128195300, .5577342047930283224400871459694989106754, +.58604904500357812846544902640744112432000, .5565217391304347826086956521739130434783, +.58822059851708596855957011939608491957200, .5553145336225596529284164859002169197397, +.59038744660217634674381770309992134571100, .5541125541125541125541125541125541125541, +.59254960960667157898740242671919986605650, .5529157667386609071274298056155507559395, +.59470710774669277576265358220553025603300, .5517241379310344827586206896551724137931, +.59685996110779382384237123915227130055450, .5505376344086021505376344086021505376344, +.59900818964608337768851242799428291618800, .5493562231759656652360515021459227467811, +.60115181318933474940990890900138765573500, .5481798715203426124197002141327623126338, +.60329085143808425240052883964381180703650, .5470085470085470085470085470085470085470, +.60542532396671688843525771517306566238400, .5458422174840085287846481876332622601279, +.60755525022454170969155029524699784815300, .5446808510638297872340425531914893617021, +.60968064953685519036241657886421307921400, .5435244161358811040339702760084925690021, +.61180154110599282990534675263916142284850, .5423728813559322033898305084745762711864, +.61391794401237043121710712512140162289150, .5412262156448202959830866807610993657505, +.61602987721551394351138242200249806046500, .5400843881856540084388185654008438818565, +.61813735955507864705538167982012964785100, .5389473684210526315789473684210526315789, +.62024040975185745772080281312810257077200, .5378151260504201680672268907563025210084, +.62233904640877868441606324267922900617100, .5366876310272536687631027253668763102725, +.62443328801189346144440150965237990021700, .5355648535564853556485355648535564853556, +.62652315293135274476554741340805776417250, .5344467640918580375782881002087682672234, +.62860865942237409420556559780379757285100, .5333333333333333333333333333333333333333, +.63068982562619868570408243613201193511500, .5322245322245322245322245322245322245322, +.63276666957103777644277897707070223987100, .5311203319502074688796680497925311203320, +.63483920917301017716738442686619237065300, .5300207039337474120082815734989648033126, +.63690746223706917739093569252872839570050, .5289256198347107438016528925619834710744, +.63897144645792069983514238629140891134750, .5278350515463917525773195876288659793814, +.64103117942093124081992527862894348800200, .5267489711934156378600823045267489711934, +.64308667860302726193566513757104985415950, .5256673511293634496919917864476386036961, +.64513796137358470073053240412264131009600, .5245901639344262295081967213114754098361, +.64718504499530948859131740391603671014300, .5235173824130879345603271983640081799591, +.64922794662510974195157587018911726772800, .5224489795918367346938775510204081632653, +.65126668331495807251485530287027359008800, .5213849287169042769857433808553971486762, +.65330127201274557080523663898929953575150, .5203252032520325203252032520325203252033, +.65533172956312757406749369692988693714150, .5192697768762677484787018255578093306288, +.65735807270835999727154330685152672231200, .5182186234817813765182186234817813765182, +.65938031808912778153342060249997302889800, .5171717171717171717171717171717171717172, +.66139848224536490484126716182800009846700, .5161290322580645161290322580645161290323, +.66341258161706617713093692145776003599150, .5150905432595573440643863179074446680080, +.66542263254509037562201001492212526500250, .5140562248995983935742971887550200803213, +.66742865127195616370414654738851822912700, .5130260521042084168336673346693386773547, +.66943065394262923906154583164607174694550, .5120000000000000000000000000000000000000, +.67142865660530226534774556057527661323550, .5109780439121756487025948103792415169661, +.67342267521216669923234121597488410770900, .5099601593625498007968127490039840637450, +.67541272562017662384192817626171745359900, .5089463220675944333996023856858846918489, +.67739882359180603188519853574689477682100, .5079365079365079365079365079365079365079, +.67938098479579733801614338517538271844400, .5069306930693069306930693069306930693069, +.68135922480790300781450241629499942064300, .5059288537549407114624505928853754940711, +.68333355911162063645036823800182901322850, .5049309664694280078895463510848126232742, +.68530400309891936760919861626462079584600, .5039370078740157480314960629921259842520, +.68727057207096020619019327568821609020250, .5029469548133595284872298624754420432220, +.68923328123880889251040571252815425395950, .5019607843137254901960784313725490196078, +.69314718055994530941723212145818, 5.0e-01, +}; + + + +#define LOGTAB_TRANSLATE(x,h) (((x) - 1.)*icvLogTab[(h)+1]) +static const double ln_2 = 0.69314718055994530941723212145818; + +static void Log_32f( const float *_x, float *y, int n ) +{ + static const float shift[] = { 0, -1.f/512 }; + static const float + A0 = 0.3333333333333333333333333f, + A1 = -0.5f, + A2 = 1.f; + + #undef LOGPOLY + #define LOGPOLY(x) (((A0*(x) + A1)*(x) + A2)*(x)) + + int i = 0; + Cv32suf buf[4]; + const int* x = (const int*)_x; + +#if CV_SSE2 + if( USE_SSE2 ) + { + static const __m128d ln2_2 = _mm_set1_pd(ln_2); + static const __m128 _1_4 = _mm_set1_ps(1.f); + static const __m128 shift4 = _mm_set1_ps(-1.f/512); + + static const __m128 mA0 = _mm_set1_ps(A0); + static const __m128 mA1 = _mm_set1_ps(A1); + static const __m128 mA2 = _mm_set1_ps(A2); + + int CV_DECL_ALIGNED(16) idx[4]; + + for( ; i <= n - 4; i += 4 ) + { + __m128i h0 = _mm_loadu_si128((const __m128i*)(x + i)); + __m128i yi0 = _mm_sub_epi32(_mm_and_si128(_mm_srli_epi32(h0, 23), _mm_set1_epi32(255)), _mm_set1_epi32(127)); + __m128d yd0 = _mm_mul_pd(_mm_cvtepi32_pd(yi0), ln2_2); + __m128d yd1 = _mm_mul_pd(_mm_cvtepi32_pd(_mm_unpackhi_epi64(yi0,yi0)), ln2_2); + + __m128i xi0 = _mm_or_si128(_mm_and_si128(h0, _mm_set1_epi32(LOGTAB_MASK2_32F)), _mm_set1_epi32(127 << 23)); + + h0 = _mm_and_si128(_mm_srli_epi32(h0, 23 - LOGTAB_SCALE - 1), _mm_set1_epi32(LOGTAB_MASK*2)); + _mm_store_si128((__m128i*)idx, h0); + h0 = _mm_cmpeq_epi32(h0, _mm_set1_epi32(510)); + + __m128d t0, t1, t2, t3, t4; + t0 = _mm_load_pd(icvLogTab + idx[0]); + t2 = _mm_load_pd(icvLogTab + idx[1]); + t1 = _mm_unpackhi_pd(t0, t2); + t0 = _mm_unpacklo_pd(t0, t2); + t2 = _mm_load_pd(icvLogTab + idx[2]); + t4 = _mm_load_pd(icvLogTab + idx[3]); + t3 = _mm_unpackhi_pd(t2, t4); + t2 = _mm_unpacklo_pd(t2, t4); + + yd0 = _mm_add_pd(yd0, t0); + yd1 = _mm_add_pd(yd1, t2); + + __m128 yf0 = _mm_movelh_ps(_mm_cvtpd_ps(yd0), _mm_cvtpd_ps(yd1)); + + __m128 xf0 = _mm_sub_ps(_mm_castsi128_ps(xi0), _1_4); + xf0 = _mm_mul_ps(xf0, _mm_movelh_ps(_mm_cvtpd_ps(t1), _mm_cvtpd_ps(t3))); + xf0 = _mm_add_ps(xf0, _mm_and_ps(_mm_castsi128_ps(h0), shift4)); + + __m128 zf0 = _mm_mul_ps(xf0, mA0); + zf0 = _mm_mul_ps(_mm_add_ps(zf0, mA1), xf0); + zf0 = _mm_mul_ps(_mm_add_ps(zf0, mA2), xf0); + yf0 = _mm_add_ps(yf0, zf0); + + _mm_storeu_ps(y + i, yf0); + } + } + else +#endif + for( ; i <= n - 4; i += 4 ) + { + double x0, x1, x2, x3; + double y0, y1, y2, y3; + int h0, h1, h2, h3; + + h0 = x[i]; + h1 = x[i+1]; + buf[0].i = (h0 & LOGTAB_MASK2_32F) | (127 << 23); + buf[1].i = (h1 & LOGTAB_MASK2_32F) | (127 << 23); + + y0 = (((h0 >> 23) & 0xff) - 127) * ln_2; + y1 = (((h1 >> 23) & 0xff) - 127) * ln_2; + + h0 = (h0 >> (23 - LOGTAB_SCALE - 1)) & LOGTAB_MASK * 2; + h1 = (h1 >> (23 - LOGTAB_SCALE - 1)) & LOGTAB_MASK * 2; + + y0 += icvLogTab[h0]; + y1 += icvLogTab[h1]; + + h2 = x[i+2]; + h3 = x[i+3]; + + x0 = LOGTAB_TRANSLATE( buf[0].f, h0 ); + x1 = LOGTAB_TRANSLATE( buf[1].f, h1 ); + + buf[2].i = (h2 & LOGTAB_MASK2_32F) | (127 << 23); + buf[3].i = (h3 & LOGTAB_MASK2_32F) | (127 << 23); + + y2 = (((h2 >> 23) & 0xff) - 127) * ln_2; + y3 = (((h3 >> 23) & 0xff) - 127) * ln_2; + + h2 = (h2 >> (23 - LOGTAB_SCALE - 1)) & LOGTAB_MASK * 2; + h3 = (h3 >> (23 - LOGTAB_SCALE - 1)) & LOGTAB_MASK * 2; + + y2 += icvLogTab[h2]; + y3 += icvLogTab[h3]; + + x2 = LOGTAB_TRANSLATE( buf[2].f, h2 ); + x3 = LOGTAB_TRANSLATE( buf[3].f, h3 ); + + x0 += shift[h0 == 510]; + x1 += shift[h1 == 510]; + y0 += LOGPOLY( x0 ); + y1 += LOGPOLY( x1 ); + + y[i] = (float) y0; + y[i + 1] = (float) y1; + + x2 += shift[h2 == 510]; + x3 += shift[h3 == 510]; + y2 += LOGPOLY( x2 ); + y3 += LOGPOLY( x3 ); + + y[i + 2] = (float) y2; + y[i + 3] = (float) y3; + } + + for( ; i < n; i++ ) + { + int h0 = x[i]; + double y0; + float x0; + + y0 = (((h0 >> 23) & 0xff) - 127) * ln_2; + + buf[0].i = (h0 & LOGTAB_MASK2_32F) | (127 << 23); + h0 = (h0 >> (23 - LOGTAB_SCALE - 1)) & LOGTAB_MASK * 2; + + y0 += icvLogTab[h0]; + x0 = (float)LOGTAB_TRANSLATE( buf[0].f, h0 ); + x0 += shift[h0 == 510]; + y0 += LOGPOLY( x0 ); + + y[i] = (float)y0; + } +} + + +static void Log_64f( const double *x, double *y, int n ) +{ + static const double shift[] = { 0, -1./512 }; + static const double + A7 = 1.0, + A6 = -0.5, + A5 = 0.333333333333333314829616256247390992939472198486328125, + A4 = -0.25, + A3 = 0.2, + A2 = -0.1666666666666666574148081281236954964697360992431640625, + A1 = 0.1428571428571428769682682968777953647077083587646484375, + A0 = -0.125; + + #undef LOGPOLY + #define LOGPOLY(x,k) ((x)+=shift[k], xq = (x)*(x),\ + (((A0*xq + A2)*xq + A4)*xq + A6)*xq + \ + (((A1*xq + A3)*xq + A5)*xq + A7)*(x)) + + int i = 0; + DBLINT buf[4]; + DBLINT *X = (DBLINT *) x; + +#if CV_SSE2 + if( USE_SSE2 ) + { + static const __m128d ln2_2 = _mm_set1_pd(ln_2); + static const __m128d _1_2 = _mm_set1_pd(1.); + static const __m128d shift2 = _mm_set1_pd(-1./512); + + static const __m128i log_and_mask2 = _mm_set_epi32(LOGTAB_MASK2, 0xffffffff, LOGTAB_MASK2, 0xffffffff); + static const __m128i log_or_mask2 = _mm_set_epi32(1023 << 20, 0, 1023 << 20, 0); + + static const __m128d mA0 = _mm_set1_pd(A0); + static const __m128d mA1 = _mm_set1_pd(A1); + static const __m128d mA2 = _mm_set1_pd(A2); + static const __m128d mA3 = _mm_set1_pd(A3); + static const __m128d mA4 = _mm_set1_pd(A4); + static const __m128d mA5 = _mm_set1_pd(A5); + static const __m128d mA6 = _mm_set1_pd(A6); + static const __m128d mA7 = _mm_set1_pd(A7); + + int CV_DECL_ALIGNED(16) idx[4]; + + for( ; i <= n - 4; i += 4 ) + { + __m128i h0 = _mm_loadu_si128((const __m128i*)(x + i)); + __m128i h1 = _mm_loadu_si128((const __m128i*)(x + i + 2)); + + __m128d xd0 = _mm_castsi128_pd(_mm_or_si128(_mm_and_si128(h0, log_and_mask2), log_or_mask2)); + __m128d xd1 = _mm_castsi128_pd(_mm_or_si128(_mm_and_si128(h1, log_and_mask2), log_or_mask2)); + + h0 = _mm_unpackhi_epi32(_mm_unpacklo_epi32(h0, h1), _mm_unpackhi_epi32(h0, h1)); + + __m128i yi0 = _mm_sub_epi32(_mm_and_si128(_mm_srli_epi32(h0, 20), + _mm_set1_epi32(2047)), _mm_set1_epi32(1023)); + __m128d yd0 = _mm_mul_pd(_mm_cvtepi32_pd(yi0), ln2_2); + __m128d yd1 = _mm_mul_pd(_mm_cvtepi32_pd(_mm_unpackhi_epi64(yi0, yi0)), ln2_2); + + h0 = _mm_and_si128(_mm_srli_epi32(h0, 20 - LOGTAB_SCALE - 1), _mm_set1_epi32(LOGTAB_MASK * 2)); + _mm_store_si128((__m128i*)idx, h0); + h0 = _mm_cmpeq_epi32(h0, _mm_set1_epi32(510)); + + __m128d t0, t1, t2, t3, t4; + t0 = _mm_load_pd(icvLogTab + idx[0]); + t2 = _mm_load_pd(icvLogTab + idx[1]); + t1 = _mm_unpackhi_pd(t0, t2); + t0 = _mm_unpacklo_pd(t0, t2); + t2 = _mm_load_pd(icvLogTab + idx[2]); + t4 = _mm_load_pd(icvLogTab + idx[3]); + t3 = _mm_unpackhi_pd(t2, t4); + t2 = _mm_unpacklo_pd(t2, t4); + + yd0 = _mm_add_pd(yd0, t0); + yd1 = _mm_add_pd(yd1, t2); + + xd0 = _mm_mul_pd(_mm_sub_pd(xd0, _1_2), t1); + xd1 = _mm_mul_pd(_mm_sub_pd(xd1, _1_2), t3); + + xd0 = _mm_add_pd(xd0, _mm_and_pd(_mm_castsi128_pd(_mm_unpacklo_epi32(h0, h0)), shift2)); + xd1 = _mm_add_pd(xd1, _mm_and_pd(_mm_castsi128_pd(_mm_unpackhi_epi32(h0, h0)), shift2)); + + __m128d zd0 = _mm_mul_pd(xd0, mA0); + __m128d zd1 = _mm_mul_pd(xd1, mA0); + zd0 = _mm_mul_pd(_mm_add_pd(zd0, mA1), xd0); + zd1 = _mm_mul_pd(_mm_add_pd(zd1, mA1), xd1); + zd0 = _mm_mul_pd(_mm_add_pd(zd0, mA2), xd0); + zd1 = _mm_mul_pd(_mm_add_pd(zd1, mA2), xd1); + zd0 = _mm_mul_pd(_mm_add_pd(zd0, mA3), xd0); + zd1 = _mm_mul_pd(_mm_add_pd(zd1, mA3), xd1); + zd0 = _mm_mul_pd(_mm_add_pd(zd0, mA4), xd0); + zd1 = _mm_mul_pd(_mm_add_pd(zd1, mA4), xd1); + zd0 = _mm_mul_pd(_mm_add_pd(zd0, mA5), xd0); + zd1 = _mm_mul_pd(_mm_add_pd(zd1, mA5), xd1); + zd0 = _mm_mul_pd(_mm_add_pd(zd0, mA6), xd0); + zd1 = _mm_mul_pd(_mm_add_pd(zd1, mA6), xd1); + zd0 = _mm_mul_pd(_mm_add_pd(zd0, mA7), xd0); + zd1 = _mm_mul_pd(_mm_add_pd(zd1, mA7), xd1); + + yd0 = _mm_add_pd(yd0, zd0); + yd1 = _mm_add_pd(yd1, zd1); + + _mm_storeu_pd(y + i, yd0); + _mm_storeu_pd(y + i + 2, yd1); + } + } + else +#endif + for( ; i <= n - 4; i += 4 ) + { + double xq; + double x0, x1, x2, x3; + double y0, y1, y2, y3; + int h0, h1, h2, h3; + + h0 = X[i].i.lo; + h1 = X[i + 1].i.lo; + buf[0].i.lo = h0; + buf[1].i.lo = h1; + + h0 = X[i].i.hi; + h1 = X[i + 1].i.hi; + buf[0].i.hi = (h0 & LOGTAB_MASK2) | (1023 << 20); + buf[1].i.hi = (h1 & LOGTAB_MASK2) | (1023 << 20); + + y0 = (((h0 >> 20) & 0x7ff) - 1023) * ln_2; + y1 = (((h1 >> 20) & 0x7ff) - 1023) * ln_2; + + h2 = X[i + 2].i.lo; + h3 = X[i + 3].i.lo; + buf[2].i.lo = h2; + buf[3].i.lo = h3; + + h0 = (h0 >> (20 - LOGTAB_SCALE - 1)) & LOGTAB_MASK * 2; + h1 = (h1 >> (20 - LOGTAB_SCALE - 1)) & LOGTAB_MASK * 2; + + y0 += icvLogTab[h0]; + y1 += icvLogTab[h1]; + + h2 = X[i + 2].i.hi; + h3 = X[i + 3].i.hi; + + x0 = LOGTAB_TRANSLATE( buf[0].d, h0 ); + x1 = LOGTAB_TRANSLATE( buf[1].d, h1 ); + + buf[2].i.hi = (h2 & LOGTAB_MASK2) | (1023 << 20); + buf[3].i.hi = (h3 & LOGTAB_MASK2) | (1023 << 20); + + y2 = (((h2 >> 20) & 0x7ff) - 1023) * ln_2; + y3 = (((h3 >> 20) & 0x7ff) - 1023) * ln_2; + + h2 = (h2 >> (20 - LOGTAB_SCALE - 1)) & LOGTAB_MASK * 2; + h3 = (h3 >> (20 - LOGTAB_SCALE - 1)) & LOGTAB_MASK * 2; + + y2 += icvLogTab[h2]; + y3 += icvLogTab[h3]; + + x2 = LOGTAB_TRANSLATE( buf[2].d, h2 ); + x3 = LOGTAB_TRANSLATE( buf[3].d, h3 ); + + y0 += LOGPOLY( x0, h0 == 510 ); + y1 += LOGPOLY( x1, h1 == 510 ); + + y[i] = y0; + y[i + 1] = y1; + + y2 += LOGPOLY( x2, h2 == 510 ); + y3 += LOGPOLY( x3, h3 == 510 ); + + y[i + 2] = y2; + y[i + 3] = y3; + } + + for( ; i < n; i++ ) + { + int h0 = X[i].i.hi; + double xq; + double x0, y0 = (((h0 >> 20) & 0x7ff) - 1023) * ln_2; + + buf[0].i.hi = (h0 & LOGTAB_MASK2) | (1023 << 20); + buf[0].i.lo = X[i].i.lo; + h0 = (h0 >> (20 - LOGTAB_SCALE - 1)) & LOGTAB_MASK * 2; + + y0 += icvLogTab[h0]; + x0 = LOGTAB_TRANSLATE( buf[0].d, h0 ); + y0 += LOGPOLY( x0, h0 == 510 ); + y[i] = y0; + } +} + +#else + +#define Log_32f ippsLn_32f_A21 +#define Log_64f ippsLn_64f_A50 + +#endif + +void log( InputArray _src, OutputArray _dst ) +{ + Mat src = _src.getMat(); + int type = src.type(), depth = src.depth(), cn = src.channels(); + + _dst.create( src.dims, src.size, type ); + Mat dst = _dst.getMat(); + + CV_Assert( depth == CV_32F || depth == CV_64F ); + + const Mat* arrays[] = {&src, &dst, 0}; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs); + int len = (int)(it.size*cn); + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + if( depth == CV_32F ) + Log_32f( (const float*)ptrs[0], (float*)ptrs[1], len ); + else + Log_64f( (const double*)ptrs[0], (double*)ptrs[1], len ); + } +} + +/****************************************************************************************\ +* P O W E R * +\****************************************************************************************/ + +template +static void +iPow_( const T* src, T* dst, int len, int power ) +{ + int i; + for( i = 0; i < len; i++ ) + { + WT a = 1, b = src[i]; + int p = power; + while( p > 1 ) + { + if( p & 1 ) + a *= b; + b *= b; + p >>= 1; + } + + a *= b; + dst[i] = saturate_cast(a); + } +} + + +void iPow8u(const uchar* src, uchar* dst, int len, int power) +{ + iPow_(src, dst, len, power); +} + +void iPow8s(const schar* src, schar* dst, int len, int power) +{ + iPow_(src, dst, len, power); +} + +void iPow16u(const ushort* src, ushort* dst, int len, int power) +{ + iPow_(src, dst, len, power); +} + +void iPow16s(const short* src, short* dst, int len, int power) +{ + iPow_(src, dst, len, power); +} + +void iPow32s(const int* src, int* dst, int len, int power) +{ + iPow_(src, dst, len, power); +} + +void iPow32f(const float* src, float* dst, int len, int power) +{ + iPow_(src, dst, len, power); +} + +void iPow64f(const double* src, double* dst, int len, int power) +{ + iPow_(src, dst, len, power); +} + + +typedef void (*IPowFunc)( const uchar* src, uchar* dst, int len, int power ); + +static IPowFunc ipowTab[] = +{ + (IPowFunc)iPow8u, (IPowFunc)iPow8s, (IPowFunc)iPow16u, (IPowFunc)iPow16s, + (IPowFunc)iPow32s, (IPowFunc)iPow32f, (IPowFunc)iPow64f, 0 +}; + + +void pow( InputArray _src, double power, OutputArray _dst ) +{ + Mat src = _src.getMat(); + int type = src.type(), depth = src.depth(), cn = src.channels(); + + _dst.create( src.dims, src.size, type ); + Mat dst = _dst.getMat(); + + int ipower = cvRound(power); + bool is_ipower = false; + + if( fabs(ipower - power) < DBL_EPSILON ) + { + if( ipower < 0 ) + { + divide( 1., src, dst ); + if( ipower == -1 ) + return; + ipower = -ipower; + src = dst; + } + + switch( ipower ) + { + case 0: + dst = Scalar::all(1); + return; + case 1: + src.copyTo(dst); + return; + case 2: + multiply(src, src, dst); + return; + default: + is_ipower = true; + } + } + else + CV_Assert( depth == CV_32F || depth == CV_64F ); + + const Mat* arrays[] = {&src, &dst, 0}; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs); + int len = (int)(it.size*cn); + + if( is_ipower ) + { + IPowFunc func = ipowTab[depth]; + CV_Assert( func != 0 ); + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + func( ptrs[0], ptrs[1], len, ipower ); + } + else if( fabs(fabs(power) - 0.5) < DBL_EPSILON ) + { + MathFunc func = power < 0 ? + (depth == CV_32F ? (MathFunc)InvSqrt_32f : (MathFunc)InvSqrt_64f) : + (depth == CV_32F ? (MathFunc)Sqrt_32f : (MathFunc)Sqrt_64f); + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + func( ptrs[0], ptrs[1], len ); + } + else + { + int j, k, blockSize = std::min(len, ((BLOCK_SIZE + cn-1)/cn)*cn); + size_t esz1 = src.elemSize1(); + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( j = 0; j < len; j += blockSize ) + { + int bsz = std::min(len - j, blockSize); + if( depth == CV_32F ) + { + const float* x = (const float*)ptrs[0]; + float* y = (float*)ptrs[1]; + + Log_32f(x, y, bsz); + for( k = 0; k < bsz; k++ ) + y[k] = (float)(y[k]*power); + Exp_32f(y, y, bsz); + } + else + { + const double* x = (const double*)ptrs[0]; + double* y = (double*)ptrs[1]; + + Log_64f(x, y, bsz); + for( k = 0; k < bsz; k++ ) + y[k] *= power; + Exp_64f(y, y, bsz); + } + ptrs[0] += bsz*esz1; + ptrs[1] += bsz*esz1; + } + } + } +} + +void sqrt(InputArray a, OutputArray b) +{ + pow(a, 0.5, b); +} + +/************************** CheckArray for NaN's, Inf's *********************************/ + +bool checkRange(InputArray _src, bool quiet, Point* pt, + double minVal, double maxVal) +{ + Mat src = _src.getMat(); + if( src.dims > 2 ) + { + const Mat* arrays[] = {&src, 0}; + Mat planes[1]; + NAryMatIterator it(arrays, planes); + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + if( !checkRange( it.planes[0], quiet, pt, minVal, maxVal )) + { + // todo: set index properly + return false; + } + } + return true; + } + + int depth = src.depth(); + Point badPt(-1, -1); + double badValue = 0; + + if( depth < CV_32F ) + { + double m = 0, M = 0; + Point mp, MP; + minMaxLoc(src.reshape(1,0), &m, &M, &mp, &MP); + if( M >= maxVal ) + { + badPt = MP; + badValue = M; + } + else if( m < minVal ) + { + badPt = mp; + badValue = m; + } + } + else + { + int i, loc = 0; + Size size = getContinuousSize( src, src.channels() ); + + if( depth == CV_32F ) + { + Cv32suf a, b; + int ia, ib; + const int* isrc = (const int*)src.data; + size_t step = src.step/sizeof(isrc[0]); + + a.f = (float)std::max(minVal, (double)-FLT_MAX); + b.f = (float)std::min(maxVal, (double)FLT_MAX); + + ia = CV_TOGGLE_FLT(a.i); + ib = CV_TOGGLE_FLT(b.i); + + for( ; badPt.x < 0 && size.height--; loc += size.width, isrc += step ) + { + for( i = 0; i < size.width; i++ ) + { + int val = isrc[i]; + val = CV_TOGGLE_FLT(val); + + if( val < ia || val >= ib ) + { + badPt = Point((loc + i) % src.cols, (loc + i) / src.cols); + badValue = ((const float*)isrc)[i]; + break; + } + } + } + } + else + { + Cv64suf a, b; + int64 ia, ib; + const int64* isrc = (const int64*)src.data; + size_t step = src.step/sizeof(isrc[0]); + + a.f = minVal; + b.f = maxVal; + + ia = CV_TOGGLE_DBL(a.i); + ib = CV_TOGGLE_DBL(b.i); + + for( ; badPt.x < 0 && size.height--; loc += size.width, isrc += step ) + { + for( i = 0; i < size.width; i++ ) + { + int64 val = isrc[i]; + val = CV_TOGGLE_DBL(val); + + if( val < ia || val >= ib ) + { + badPt = Point((loc + i) % src.cols, (loc + i) / src.cols); + badValue = ((const double*)isrc)[i]; + break; + } + } + } + } + } + + if( badPt.x >= 0 ) + { + if( pt ) + *pt = badPt; + if( !quiet ) + CV_Error_( CV_StsOutOfRange, + ("the value at (%d, %d)=%g is out of range", badPt.x, badPt.y, badValue)); + } + return badPt.x < 0; +} + +} + +CV_IMPL float cvCbrt(float value) { return cv::cubeRoot(value); } +CV_IMPL float cvFastArctan(float y, float x) { return cv::fastAtan2(y, x); } + +CV_IMPL void +cvCartToPolar( const CvArr* xarr, const CvArr* yarr, + CvArr* magarr, CvArr* anglearr, + int angle_in_degrees ) +{ + cv::Mat X = cv::cvarrToMat(xarr), Y = cv::cvarrToMat(yarr), Mag, Angle; + if( magarr ) + { + Mag = cv::cvarrToMat(magarr); + CV_Assert( Mag.size() == X.size() && Mag.type() == X.type() ); + } + if( anglearr ) + { + Angle = cv::cvarrToMat(anglearr); + CV_Assert( Angle.size() == X.size() && Angle.type() == X.type() ); + } + if( magarr ) + { + if( anglearr ) + cv::cartToPolar( X, Y, Mag, Angle, angle_in_degrees != 0 ); + else + cv::magnitude( X, Y, Mag ); + } + else + cv::phase( X, Y, Angle, angle_in_degrees != 0 ); +} + +CV_IMPL void +cvPolarToCart( const CvArr* magarr, const CvArr* anglearr, + CvArr* xarr, CvArr* yarr, int angle_in_degrees ) +{ + cv::Mat X, Y, Angle = cv::cvarrToMat(anglearr), Mag; + if( magarr ) + { + Mag = cv::cvarrToMat(magarr); + CV_Assert( Mag.size() == Angle.size() && Mag.type() == Angle.type() ); + } + if( xarr ) + { + X = cv::cvarrToMat(xarr); + CV_Assert( X.size() == Angle.size() && X.type() == Angle.type() ); + } + if( yarr ) + { + Y = cv::cvarrToMat(yarr); + CV_Assert( Y.size() == Angle.size() && Y.type() == Angle.type() ); + } + + cv::polarToCart( Mag, Angle, X, Y, angle_in_degrees != 0 ); +} + +CV_IMPL void cvExp( const CvArr* srcarr, CvArr* dstarr ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + CV_Assert( src.type() == dst.type() && src.size == dst.size ); + cv::exp( src, dst ); +} + +CV_IMPL void cvLog( const CvArr* srcarr, CvArr* dstarr ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + CV_Assert( src.type() == dst.type() && src.size == dst.size ); + cv::log( src, dst ); +} + +CV_IMPL void cvPow( const CvArr* srcarr, CvArr* dstarr, double power ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + CV_Assert( src.type() == dst.type() && src.size == dst.size ); + cv::pow( src, power, dst ); +} + +CV_IMPL int cvCheckArr( const CvArr* arr, int flags, + double minVal, double maxVal ) +{ + if( (flags & CV_CHECK_RANGE) == 0 ) + minVal = -DBL_MAX, maxVal = DBL_MAX; + return cv::checkRange(cv::cvarrToMat(arr), (flags & CV_CHECK_QUIET) != 0, 0, minVal, maxVal ); +} + + +/* + Finds real roots of cubic, quadratic or linear equation. + The original code has been taken from Ken Turkowski web page + (http://www.worldserver.com/turk/opensource/) and adopted for OpenCV. + Here is the copyright notice. + + ----------------------------------------------------------------------- + Copyright (C) 1978-1999 Ken Turkowski. + + All rights reserved. + + Warranty Information + Even though I have reviewed this software, I make no warranty + or representation, either express or implied, with respect to this + software, its quality, accuracy, merchantability, or fitness for a + particular purpose. As a result, this software is provided "as is," + and you, its user, are assuming the entire risk as to its quality + and accuracy. + + This code may be used and freely distributed as long as it includes + this copyright notice and the above warranty information. + ----------------------------------------------------------------------- +*/ + +int cv::solveCubic( InputArray _coeffs, OutputArray _roots ) +{ + const int n0 = 3; + Mat coeffs = _coeffs.getMat(); + int ctype = coeffs.type(); + + CV_Assert( ctype == CV_32F || ctype == CV_64F ); + CV_Assert( (coeffs.size() == Size(n0, 1) || + coeffs.size() == Size(n0+1, 1) || + coeffs.size() == Size(1, n0) || + coeffs.size() == Size(1, n0+1)) ); + + _roots.create(n0, 1, ctype, -1, true, DEPTH_MASK_FLT); + Mat roots = _roots.getMat(); + + int i = -1, n = 0; + double a0 = 1., a1, a2, a3; + double x0 = 0., x1 = 0., x2 = 0.; + int ncoeffs = coeffs.rows + coeffs.cols - 1; + + if( ctype == CV_32FC1 ) + { + if( ncoeffs == 4 ) + a0 = coeffs.at(++i); + + a1 = coeffs.at(i+1); + a2 = coeffs.at(i+2); + a3 = coeffs.at(i+3); + } + else + { + if( ncoeffs == 4 ) + a0 = coeffs.at(++i); + + a1 = coeffs.at(i+1); + a2 = coeffs.at(i+2); + a3 = coeffs.at(i+3); + } + + if( a0 == 0 ) + { + if( a1 == 0 ) + { + if( a2 == 0 ) + n = a3 == 0 ? -1 : 0; + else + { + // linear equation + x0 = -a3/a2; + n = 1; + } + } + else + { + // quadratic equation + double d = a2*a2 - 4*a1*a3; + if( d >= 0 ) + { + d = sqrt(d); + double q1 = (-a2 + d) * 0.5; + double q2 = (a2 + d) * -0.5; + if( fabs(q1) > fabs(q2) ) + { + x0 = q1 / a1; + x1 = a3 / q1; + } + else + { + x0 = q2 / a1; + x1 = a3 / q2; + } + n = d > 0 ? 2 : 1; + } + } + } + else + { + a0 = 1./a0; + a1 *= a0; + a2 *= a0; + a3 *= a0; + + double Q = (a1 * a1 - 3 * a2) * (1./9); + double R = (2 * a1 * a1 * a1 - 9 * a1 * a2 + 27 * a3) * (1./54); + double Qcubed = Q * Q * Q; + double d = Qcubed - R * R; + + if( d >= 0 ) + { + double theta = acos(R / sqrt(Qcubed)); + double sqrtQ = sqrt(Q); + double t0 = -2 * sqrtQ; + double t1 = theta * (1./3); + double t2 = a1 * (1./3); + x0 = t0 * cos(t1) - t2; + x1 = t0 * cos(t1 + (2.*CV_PI/3)) - t2; + x2 = t0 * cos(t1 + (4.*CV_PI/3)) - t2; + n = 3; + } + else + { + double e; + d = sqrt(-d); + e = pow(d + fabs(R), 0.333333333333); + if( R > 0 ) + e = -e; + x0 = (e + Q / e) - a1 * (1./3); + n = 1; + } + } + + if( roots.type() == CV_32FC1 ) + { + roots.at(0) = (float)x0; + roots.at(1) = (float)x1; + roots.at(2) = (float)x2; + } + else + { + roots.at(0) = x0; + roots.at(1) = x1; + roots.at(2) = x2; + } + + return n; +} + +/* finds complex roots of a polynomial using Durand-Kerner method: + http://en.wikipedia.org/wiki/Durand%E2%80%93Kerner_method */ +double cv::solvePoly( InputArray _coeffs0, OutputArray _roots0, int maxIters ) +{ + typedef Complex C; + + double maxDiff = 0; + int iter, i, j; + Mat coeffs0 = _coeffs0.getMat(); + int ctype = _coeffs0.type(); + int cdepth = CV_MAT_DEPTH(ctype); + + CV_Assert( CV_MAT_DEPTH(ctype) >= CV_32F && CV_MAT_CN(ctype) <= 2 ); + CV_Assert( coeffs0.rows == 1 || coeffs0.cols == 1 ); + + int n = coeffs0.cols + coeffs0.rows - 2; + + _roots0.create(n, 1, CV_MAKETYPE(cdepth, 2), -1, true, DEPTH_MASK_FLT); + Mat roots0 = _roots0.getMat(); + + AutoBuffer buf(n*2+2); + C *coeffs = buf, *roots = coeffs + n + 1; + Mat coeffs1(coeffs0.size(), CV_MAKETYPE(CV_64F, coeffs0.channels()), coeffs0.channels() == 2 ? coeffs : roots); + coeffs0.convertTo(coeffs1, coeffs1.type()); + if( coeffs0.channels() == 1 ) + { + const double* rcoeffs = (const double*)roots; + for( i = 0; i <= n; i++ ) + coeffs[i] = C(rcoeffs[i], 0); + } + + C p(1, 0), r(1, 1); + + for( i = 0; i < n; i++ ) + { + roots[i] = p; + p = p * r; + } + + maxIters = maxIters <= 0 ? 1000 : maxIters; + for( iter = 0; iter < maxIters; iter++ ) + { + maxDiff = 0; + for( i = 0; i < n; i++ ) + { + p = roots[i]; + C num = coeffs[n], denom = 1; + for( j = 0; j < n; j++ ) + { + num = num*p + coeffs[n-j-1]; + if( j != i ) denom = denom * (p - roots[j]); + } + num /= denom; + roots[i] = p - num; + maxDiff = max(maxDiff, abs(num)); + } + if( maxDiff <= 0 ) + break; + } + + if( coeffs0.channels() == 1 ) + { + const double verySmallEps = 1e-100; + for( i = 0; i < n; i++ ) + if( fabs(roots[i].im) < verySmallEps ) + roots[i].im = 0; + } + + Mat(roots0.size(), CV_64FC2, roots).convertTo(roots0, roots0.type()); + return maxDiff; +} + + +CV_IMPL int +cvSolveCubic( const CvMat* coeffs, CvMat* roots ) +{ + cv::Mat _coeffs = cv::cvarrToMat(coeffs), _roots = cv::cvarrToMat(roots), _roots0 = _roots; + int nroots = cv::solveCubic(_coeffs, _roots); + CV_Assert( _roots.data == _roots0.data ); // check that the array of roots was not reallocated + return nroots; +} + + +void cvSolvePoly(const CvMat* a, CvMat *r, int maxiter, int) +{ + cv::Mat _a = cv::cvarrToMat(a), _r = cv::cvarrToMat(r), _r0 = r; + cv::solvePoly(_a, _r, maxiter); + CV_Assert( _r.data == _r0.data ); // check that the array of roots was not reallocated +} + + +/* End of file. */ diff --git a/opencv/core/matmul.cpp b/opencv/core/matmul.cpp new file mode 100644 index 0000000..45cb158 --- /dev/null +++ b/opencv/core/matmul.cpp @@ -0,0 +1,3139 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +#ifdef HAVE_IPP +#include "ippversion.h" +#endif + +namespace cv +{ + +/****************************************************************************************\ +* GEMM * +\****************************************************************************************/ + +static void +GEMM_CopyBlock( const uchar* src, size_t src_step, + uchar* dst, size_t dst_step, + Size size, size_t pix_size ) +{ + int j; + size.width *= (int)(pix_size / sizeof(int)); + + for( ; size.height--; src += src_step, dst += dst_step ) + { + for( j = 0; j <= size.width - 4; j += 4 ) + { + int t0 = ((const int*)src)[j]; + int t1 = ((const int*)src)[j+1]; + ((int*)dst)[j] = t0; + ((int*)dst)[j+1] = t1; + t0 = ((const int*)src)[j+2]; + t1 = ((const int*)src)[j+3]; + ((int*)dst)[j+2] = t0; + ((int*)dst)[j+3] = t1; + } + + for( ; j < size.width; j++ ) + ((int*)dst)[j] = ((const int*)src)[j]; + } +} + + +static void +GEMM_TransposeBlock( const uchar* src, size_t src_step, + uchar* dst, size_t dst_step, + Size size, size_t pix_size ) +{ + int i, j; + for( i = 0; i < size.width; i++, dst += dst_step, src += pix_size ) + { + const uchar* _src = src; + switch( pix_size ) + { + case sizeof(int): + for( j = 0; j < size.height; j++, _src += src_step ) + ((int*)dst)[j] = ((int*)_src)[0]; + break; + case sizeof(int)*2: + for( j = 0; j < size.height*2; j += 2, _src += src_step ) + { + int t0 = ((int*)_src)[0]; + int t1 = ((int*)_src)[1]; + ((int*)dst)[j] = t0; + ((int*)dst)[j+1] = t1; + } + break; + case sizeof(int)*4: + for( j = 0; j < size.height*4; j += 4, _src += src_step ) + { + int t0 = ((int*)_src)[0]; + int t1 = ((int*)_src)[1]; + ((int*)dst)[j] = t0; + ((int*)dst)[j+1] = t1; + t0 = ((int*)_src)[2]; + t1 = ((int*)_src)[3]; + ((int*)dst)[j+2] = t0; + ((int*)dst)[j+3] = t1; + } + break; + default: + assert(0); + return; + } + } +} + + +template static void +GEMMSingleMul( const T* a_data, size_t a_step, + const T* b_data, size_t b_step, + const T* c_data, size_t c_step, + T* d_data, size_t d_step, + Size a_size, Size d_size, + double alpha, double beta, int flags ) +{ + int i, j, k, n = a_size.width, m = d_size.width, drows = d_size.height; + const T *_a_data = a_data, *_b_data = b_data, *_c_data = c_data; + cv::AutoBuffer _a_buf; + T* a_buf = 0; + size_t a_step0, a_step1, c_step0, c_step1, t_step; + + a_step /= sizeof(a_data[0]); + b_step /= sizeof(b_data[0]); + c_step /= sizeof(c_data[0]); + d_step /= sizeof(d_data[0]); + a_step0 = a_step; + a_step1 = 1; + + if( !c_data ) + c_step0 = c_step1 = 0; + else if( !(flags & GEMM_3_T) ) + c_step0 = c_step, c_step1 = 1; + else + c_step0 = 1, c_step1 = c_step; + + if( flags & GEMM_1_T ) + { + CV_SWAP( a_step0, a_step1, t_step ); + n = a_size.height; + if( a_step > 1 && n > 1 ) + { + _a_buf.allocate(n); + a_buf = _a_buf; + } + } + + if( n == 1 ) /* external product */ + { + cv::AutoBuffer _b_buf; + T* b_buf = 0; + + if( a_step > 1 && a_size.height > 1 ) + { + _a_buf.allocate(drows); + a_buf = _a_buf; + for( k = 0; k < drows; k++ ) + a_buf[k] = a_data[a_step*k]; + a_data = a_buf; + } + + if( b_step > 1 ) + { + _b_buf.allocate(d_size.width); + b_buf = _b_buf; + for( j = 0; j < d_size.width; j++ ) + b_buf[j] = b_data[j*b_step]; + b_data = b_buf; + } + + for( i = 0; i < drows; i++, _c_data += c_step0, d_data += d_step ) + { + WT al = WT(a_data[i])*alpha; + c_data = _c_data; + for( j = 0; j <= d_size.width - 2; j += 2, c_data += 2*c_step1 ) + { + WT s0 = al*WT(b_data[j]); + WT s1 = al*WT(b_data[j+1]); + if( !c_data ) + { + d_data[j] = T(s0); + d_data[j+1] = T(s1); + } + else + { + d_data[j] = T(s0 + WT(c_data[0])*beta); + d_data[j+1] = T(s1 + WT(c_data[c_step1])*beta); + } + } + + for( ; j < d_size.width; j++, c_data += c_step1 ) + { + WT s0 = al*WT(b_data[j]); + if( !c_data ) + d_data[j] = T(s0); + else + d_data[j] = T(s0 + WT(c_data[0])*beta); + } + } + } + else if( flags & GEMM_2_T ) /* A * Bt */ + { + for( i = 0; i < drows; i++, _a_data += a_step0, _c_data += c_step0, d_data += d_step ) + { + a_data = _a_data; + b_data = _b_data; + c_data = _c_data; + + if( a_buf ) + { + for( k = 0; k < n; k++ ) + a_buf[k] = a_data[a_step1*k]; + a_data = a_buf; + } + + for( j = 0; j < d_size.width; j++, b_data += b_step, + c_data += c_step1 ) + { + WT s0(0), s1(0), s2(0), s3(0); + + for( k = 0; k <= n - 4; k += 4 ) + { + s0 += WT(a_data[k])*WT(b_data[k]); + s1 += WT(a_data[k+1])*WT(b_data[k+1]); + s2 += WT(a_data[k+2])*WT(b_data[k+2]); + s3 += WT(a_data[k+3])*WT(b_data[k+3]); + } + + for( ; k < n; k++ ) + s0 += WT(a_data[k])*WT(b_data[k]); + s0 = (s0+s1+s2+s3)*alpha; + + if( !c_data ) + d_data[j] = T(s0); + else + d_data[j] = T(s0 + WT(c_data[0])*beta); + } + } + } + else if( d_size.width*sizeof(d_data[0]) <= 1600 ) + { + for( i = 0; i < drows; i++, _a_data += a_step0, + _c_data += c_step0, + d_data += d_step ) + { + a_data = _a_data, c_data = _c_data; + + if( a_buf ) + { + for( k = 0; k < n; k++ ) + a_buf[k] = a_data[a_step1*k]; + a_data = a_buf; + } + + for( j = 0; j <= m - 4; j += 4, c_data += 4*c_step1 ) + { + const T* b = _b_data + j; + WT s0(0), s1(0), s2(0), s3(0); + + for( k = 0; k < n; k++, b += b_step ) + { + WT a(a_data[k]); + s0 += a * WT(b[0]); s1 += a * WT(b[1]); + s2 += a * WT(b[2]); s3 += a * WT(b[3]); + } + + if( !c_data ) + { + d_data[j] = T(s0*alpha); + d_data[j+1] = T(s1*alpha); + d_data[j+2] = T(s2*alpha); + d_data[j+3] = T(s3*alpha); + } + else + { + s0 = s0*alpha; s1 = s1*alpha; + s2 = s2*alpha; s3 = s3*alpha; + d_data[j] = T(s0 + WT(c_data[0])*beta); + d_data[j+1] = T(s1 + WT(c_data[c_step1])*beta); + d_data[j+2] = T(s2 + WT(c_data[c_step1*2])*beta); + d_data[j+3] = T(s3 + WT(c_data[c_step1*3])*beta); + } + } + + for( ; j < m; j++, c_data += c_step1 ) + { + const T* b = _b_data + j; + WT s0(0); + + for( k = 0; k < n; k++, b += b_step ) + s0 += WT(a_data[k]) * WT(b[0]); + + s0 = s0*alpha; + if( !c_data ) + d_data[j] = T(s0); + else + d_data[j] = T(s0 + WT(c_data[0])*beta); + } + } + } + else + { + cv::AutoBuffer _d_buf(m); + WT* d_buf = _d_buf; + + for( i = 0; i < drows; i++, _a_data += a_step0, _c_data += c_step0, d_data += d_step ) + { + a_data = _a_data; + b_data = _b_data; + c_data = _c_data; + + if( a_buf ) + { + for( k = 0; k < n; k++ ) + a_buf[k] = _a_data[a_step1*k]; + a_data = a_buf; + } + + for( j = 0; j < m; j++ ) + d_buf[j] = WT(0); + + for( k = 0; k < n; k++, b_data += b_step ) + { + WT al(a_data[k]); + + for( j = 0; j <= m - 4; j += 4 ) + { + WT t0 = d_buf[j] + WT(b_data[j])*al; + WT t1 = d_buf[j+1] + WT(b_data[j+1])*al; + d_buf[j] = t0; + d_buf[j+1] = t1; + t0 = d_buf[j+2] + WT(b_data[j+2])*al; + t1 = d_buf[j+3] + WT(b_data[j+3])*al; + d_buf[j+2] = t0; + d_buf[j+3] = t1; + } + + for( ; j < m; j++ ) + d_buf[j] += WT(b_data[j])*al; + } + + if( !c_data ) + for( j = 0; j < m; j++ ) + d_data[j] = T(d_buf[j]*alpha); + else + for( j = 0; j < m; j++, c_data += c_step1 ) + { + WT t = d_buf[j]*alpha; + d_data[j] = T(t + WT(c_data[0])*beta); + } + } + } +} + + +template static void +GEMMBlockMul( const T* a_data, size_t a_step, + const T* b_data, size_t b_step, + WT* d_data, size_t d_step, + Size a_size, Size d_size, int flags ) +{ + int i, j, k, n = a_size.width, m = d_size.width; + const T *_a_data = a_data, *_b_data = b_data; + cv::AutoBuffer _a_buf; + T* a_buf = 0; + size_t a_step0, a_step1, t_step; + int do_acc = flags & 16; + + a_step /= sizeof(a_data[0]); + b_step /= sizeof(b_data[0]); + d_step /= sizeof(d_data[0]); + + a_step0 = a_step; + a_step1 = 1; + + if( flags & GEMM_1_T ) + { + CV_SWAP( a_step0, a_step1, t_step ); + n = a_size.height; + _a_buf.allocate(n); + a_buf = _a_buf; + } + + if( flags & GEMM_2_T ) + { + /* second operand is transposed */ + for( i = 0; i < d_size.height; i++, _a_data += a_step0, d_data += d_step ) + { + a_data = _a_data; b_data = _b_data; + + if( a_buf ) + { + for( k = 0; k < n; k++ ) + a_buf[k] = a_data[a_step1*k]; + a_data = a_buf; + } + + for( j = 0; j < d_size.width; j++, b_data += b_step ) + { + WT s0 = do_acc ? d_data[j]:WT(0), s1(0); + for( k = 0; k <= n - 2; k += 2 ) + { + s0 += WT(a_data[k])*WT(b_data[k]); + s1 += WT(a_data[k+1])*WT(b_data[k+1]); + } + + for( ; k < n; k++ ) + s0 += WT(a_data[k])*WT(b_data[k]); + + d_data[j] = s0 + s1; + } + } + } + else + { + for( i = 0; i < d_size.height; i++, _a_data += a_step0, d_data += d_step ) + { + a_data = _a_data, b_data = _b_data; + + if( a_buf ) + { + for( k = 0; k < n; k++ ) + a_buf[k] = a_data[a_step1*k]; + a_data = a_buf; + } + + for( j = 0; j <= m - 4; j += 4 ) + { + WT s0, s1, s2, s3; + const T* b = b_data + j; + + if( do_acc ) + { + s0 = d_data[j]; s1 = d_data[j+1]; + s2 = d_data[j+2]; s3 = d_data[j+3]; + } + else + s0 = s1 = s2 = s3 = WT(0); + + for( k = 0; k < n; k++, b += b_step ) + { + WT a(a_data[k]); + s0 += a * WT(b[0]); s1 += a * WT(b[1]); + s2 += a * WT(b[2]); s3 += a * WT(b[3]); + } + + d_data[j] = s0; d_data[j+1] = s1; + d_data[j+2] = s2; d_data[j+3] = s3; + } + + for( ; j < m; j++ ) + { + const T* b = b_data + j; + WT s0 = do_acc ? d_data[j] : WT(0); + + for( k = 0; k < n; k++, b += b_step ) + s0 += WT(a_data[k]) * WT(b[0]); + + d_data[j] = s0; + } + } + } +} + + +template static void +GEMMStore( const T* c_data, size_t c_step, + const WT* d_buf, size_t d_buf_step, + T* d_data, size_t d_step, Size d_size, + double alpha, double beta, int flags ) +{ + const T* _c_data = c_data; + int j; + size_t c_step0, c_step1; + + c_step /= sizeof(c_data[0]); + d_buf_step /= sizeof(d_buf[0]); + d_step /= sizeof(d_data[0]); + + if( !c_data ) + c_step0 = c_step1 = 0; + else if( !(flags & GEMM_3_T) ) + c_step0 = c_step, c_step1 = 1; + else + c_step0 = 1, c_step1 = c_step; + + for( ; d_size.height--; _c_data += c_step0, d_buf += d_buf_step, d_data += d_step ) + { + if( _c_data ) + { + c_data = _c_data; + for( j = 0; j <= d_size.width - 4; j += 4, c_data += 4*c_step1 ) + { + WT t0 = alpha*d_buf[j]; + WT t1 = alpha*d_buf[j+1]; + t0 += beta*WT(c_data[0]); + t1 += beta*WT(c_data[c_step1]); + d_data[j] = T(t0); + d_data[j+1] = T(t1); + t0 = alpha*d_buf[j+2]; + t1 = alpha*d_buf[j+3]; + t0 += beta*WT(c_data[c_step1*2]); + t1 += beta*WT(c_data[c_step1*3]); + d_data[j+2] = T(t0); + d_data[j+3] = T(t1); + } + for( ; j < d_size.width; j++, c_data += c_step1 ) + { + WT t0 = alpha*d_buf[j]; + d_data[j] = T(t0 + WT(c_data[0])*beta); + } + } + else + { + for( j = 0; j <= d_size.width - 4; j += 4 ) + { + WT t0 = alpha*d_buf[j]; + WT t1 = alpha*d_buf[j+1]; + d_data[j] = T(t0); + d_data[j+1] = T(t1); + t0 = alpha*d_buf[j+2]; + t1 = alpha*d_buf[j+3]; + d_data[j+2] = T(t0); + d_data[j+3] = T(t1); + } + for( ; j < d_size.width; j++ ) + d_data[j] = T(alpha*d_buf[j]); + } + } +} + + +typedef void (*GEMMSingleMulFunc)( const void* src1, size_t step1, + const void* src2, size_t step2, const void* src3, size_t step3, + void* dst, size_t dststep, Size srcsize, Size dstsize, + double alpha, double beta, int flags ); + +typedef void (*GEMMBlockMulFunc)( const void* src1, size_t step1, + const void* src2, size_t step2, void* dst, size_t dststep, + Size srcsize, Size dstsize, int flags ); + +typedef void (*GEMMStoreFunc)( const void* src1, size_t step1, + const void* src2, size_t step2, void* dst, size_t dststep, + Size dstsize, double alpha, double beta, int flags ); + +static void GEMMSingleMul_32f( const float* a_data, size_t a_step, + const float* b_data, size_t b_step, + const float* c_data, size_t c_step, + float* d_data, size_t d_step, + Size a_size, Size d_size, + double alpha, double beta, int flags ) +{ + GEMMSingleMul(a_data, a_step, b_data, b_step, c_data, + c_step, d_data, d_step, a_size, d_size, + alpha, beta, flags); +} + +static void GEMMSingleMul_64f( const double* a_data, size_t a_step, + const double* b_data, size_t b_step, + const double* c_data, size_t c_step, + double* d_data, size_t d_step, + Size a_size, Size d_size, + double alpha, double beta, int flags ) +{ + GEMMSingleMul(a_data, a_step, b_data, b_step, c_data, + c_step, d_data, d_step, a_size, d_size, + alpha, beta, flags); +} + + +static void GEMMSingleMul_32fc( const Complexf* a_data, size_t a_step, + const Complexf* b_data, size_t b_step, + const Complexf* c_data, size_t c_step, + Complexf* d_data, size_t d_step, + Size a_size, Size d_size, + double alpha, double beta, int flags ) +{ + GEMMSingleMul(a_data, a_step, b_data, b_step, c_data, + c_step, d_data, d_step, a_size, d_size, + alpha, beta, flags); +} + +static void GEMMSingleMul_64fc( const Complexd* a_data, size_t a_step, + const Complexd* b_data, size_t b_step, + const Complexd* c_data, size_t c_step, + Complexd* d_data, size_t d_step, + Size a_size, Size d_size, + double alpha, double beta, int flags ) +{ + GEMMSingleMul(a_data, a_step, b_data, b_step, c_data, + c_step, d_data, d_step, a_size, d_size, + alpha, beta, flags); +} + +static void GEMMBlockMul_32f( const float* a_data, size_t a_step, + const float* b_data, size_t b_step, + double* d_data, size_t d_step, + Size a_size, Size d_size, int flags ) +{ + GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags); +} + + +static void GEMMBlockMul_64f( const double* a_data, size_t a_step, + const double* b_data, size_t b_step, + double* d_data, size_t d_step, + Size a_size, Size d_size, int flags ) +{ + GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags); +} + + +static void GEMMBlockMul_32fc( const Complexf* a_data, size_t a_step, + const Complexf* b_data, size_t b_step, + Complexd* d_data, size_t d_step, + Size a_size, Size d_size, int flags ) +{ + GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags); +} + + +static void GEMMBlockMul_64fc( const Complexd* a_data, size_t a_step, + const Complexd* b_data, size_t b_step, + Complexd* d_data, size_t d_step, + Size a_size, Size d_size, int flags ) +{ + GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags); +} + + +static void GEMMStore_32f( const float* c_data, size_t c_step, + const double* d_buf, size_t d_buf_step, + float* d_data, size_t d_step, Size d_size, + double alpha, double beta, int flags ) +{ + GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags); +} + + +static void GEMMStore_64f( const double* c_data, size_t c_step, + const double* d_buf, size_t d_buf_step, + double* d_data, size_t d_step, Size d_size, + double alpha, double beta, int flags ) +{ + GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags); +} + + +static void GEMMStore_32fc( const Complexf* c_data, size_t c_step, + const Complexd* d_buf, size_t d_buf_step, + Complexf* d_data, size_t d_step, Size d_size, + double alpha, double beta, int flags ) +{ + GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags); +} + + +static void GEMMStore_64fc( const Complexd* c_data, size_t c_step, + const Complexd* d_buf, size_t d_buf_step, + Complexd* d_data, size_t d_step, Size d_size, + double alpha, double beta, int flags ) +{ + GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags); +} + +} + +void cv::gemm( InputArray matA, InputArray matB, double alpha, + InputArray matC, double beta, OutputArray matD, int flags ) +{ + const int block_lin_size = 128; + const int block_size = block_lin_size * block_lin_size; + + static double zero[] = {0,0,0,0}; + static float zerof[] = {0,0,0,0}; + + Mat A = matA.getMat(), B = matB.getMat(), C = beta != 0 ? matC.getMat() : Mat(); + Size a_size = A.size(), d_size; + int i, len = 0, type = A.type(); + + CV_Assert( type == B.type() && (type == CV_32FC1 || type == CV_64FC1 || type == CV_32FC2 || type == CV_64FC2) ); + + switch( flags & (GEMM_1_T|GEMM_2_T) ) + { + case 0: + d_size = Size( B.cols, a_size.height ); + len = B.rows; + CV_Assert( a_size.width == len ); + break; + case 1: + d_size = Size( B.cols, a_size.width ); + len = B.rows; + CV_Assert( a_size.height == len ); + break; + case 2: + d_size = Size( B.rows, a_size.height ); + len = B.cols; + CV_Assert( a_size.width == len ); + break; + case 3: + d_size = Size( B.rows, a_size.width ); + len = B.cols; + CV_Assert( a_size.height == len ); + break; + } + + if( C.data ) + { + CV_Assert( C.type() == type && + (((flags&GEMM_3_T) == 0 && C.rows == d_size.height && C.cols == d_size.width) || + ((flags&GEMM_3_T) != 0 && C.rows == d_size.width && C.cols == d_size.height))); + } + + matD.create( d_size.height, d_size.width, type ); + Mat D = matD.getMat(); + if( (flags & GEMM_3_T) != 0 && C.data == D.data ) + { + transpose( C, C ); + flags &= ~GEMM_3_T; + } + + if( flags == 0 && 2 <= len && len <= 4 && (len == d_size.width || len == d_size.height) ) + { + if( type == CV_32F ) + { + float* d = (float*)D.data; + const float *a = (const float*)A.data, + *b = (const float*)B.data, + *c = (const float*)C.data; + size_t d_step = D.step/sizeof(d[0]), + a_step = A.step/sizeof(a[0]), + b_step = B.step/sizeof(b[0]), + c_step = C.data ? C.step/sizeof(c[0]) : 0; + + if( !c ) + c = zerof; + + switch( len ) + { + case 2: + if( len == d_size.width && b != d ) + { + for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step ) + { + float t0 = a[0]*b[0] + a[1]*b[b_step]; + float t1 = a[0]*b[1] + a[1]*b[b_step+1]; + d[0] = (float)(t0*alpha + c[0]*beta); + d[1] = (float)(t1*alpha + c[1]*beta); + } + } + else if( a != d ) + { + int c_step0 = 1; + if( c == zerof ) + { + c_step0 = 0; + c_step = 1; + } + + for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 ) + { + float t0 = a[0]*b[0] + a[1]*b[b_step]; + float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step]; + d[0] = (float)(t0*alpha + c[0]*beta); + d[d_step] = (float)(t1*alpha + c[c_step]*beta); + } + } + else + break; + return; + case 3: + if( len == d_size.width && b != d ) + { + for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step ) + { + float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2]; + float t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1]; + float t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2]; + d[0] = (float)(t0*alpha + c[0]*beta); + d[1] = (float)(t1*alpha + c[1]*beta); + d[2] = (float)(t2*alpha + c[2]*beta); + } + } + else if( a != d ) + { + int c_step0 = 1; + if( c == zerof ) + { + c_step0 = 0; + c_step = 1; + } + + for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 ) + { + float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2]; + float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] + a[a_step+2]*b[b_step*2]; + float t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] + a[a_step*2+2]*b[b_step*2]; + + d[0] = (float)(t0*alpha + c[0]*beta); + d[d_step] = (float)(t1*alpha + c[c_step]*beta); + d[d_step*2] = (float)(t2*alpha + c[c_step*2]*beta); + } + } + else + break; + return; + case 4: + if( len == d_size.width && b != d ) + { + for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step ) + { + float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3]; + float t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1] + a[3]*b[b_step*3+1]; + float t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2] + a[3]*b[b_step*3+2]; + float t3 = a[0]*b[3] + a[1]*b[b_step+3] + a[2]*b[b_step*2+3] + a[3]*b[b_step*3+3]; + d[0] = (float)(t0*alpha + c[0]*beta); + d[1] = (float)(t1*alpha + c[1]*beta); + d[2] = (float)(t2*alpha + c[2]*beta); + d[3] = (float)(t3*alpha + c[3]*beta); + } + } + else if( len <= 16 && a != d ) + { + int c_step0 = 1; + if( c == zerof ) + { + c_step0 = 0; + c_step = 1; + } + + for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 ) + { + float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3]; + float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] + + a[a_step+2]*b[b_step*2] + a[a_step+3]*b[b_step*3]; + float t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] + + a[a_step*2+2]*b[b_step*2] + a[a_step*2+3]*b[b_step*3]; + float t3 = a[a_step*3]*b[0] + a[a_step*3+1]*b[b_step] + + a[a_step*3+2]*b[b_step*2] + a[a_step*3+3]*b[b_step*3]; + d[0] = (float)(t0*alpha + c[0]*beta); + d[d_step] = (float)(t1*alpha + c[c_step]*beta); + d[d_step*2] = (float)(t2*alpha + c[c_step*2]*beta); + d[d_step*3] = (float)(t3*alpha + c[c_step*3]*beta); + } + } + else + break; + return; + } + } + + if( type == CV_64F ) + { + double* d = (double*)D.data; + const double *a = (const double*)A.data, + *b = (const double*)B.data, + *c = (const double*)C.data; + size_t d_step = D.step/sizeof(d[0]), + a_step = A.step/sizeof(a[0]), + b_step = B.step/sizeof(b[0]), + c_step = C.data ? C.step/sizeof(c[0]) : 0; + if( !c ) + c = zero; + + switch( len ) + { + case 2: + if( len == d_size.width && b != d ) + { + for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step ) + { + double t0 = a[0]*b[0] + a[1]*b[b_step]; + double t1 = a[0]*b[1] + a[1]*b[b_step+1]; + d[0] = t0*alpha + c[0]*beta; + d[1] = t1*alpha + c[1]*beta; + } + } + else if( a != d ) + { + int c_step0 = 1; + if( c == zero ) + { + c_step0 = 0; + c_step = 1; + } + + for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 ) + { + double t0 = a[0]*b[0] + a[1]*b[b_step]; + double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step]; + d[0] = t0*alpha + c[0]*beta; + d[d_step] = t1*alpha + c[c_step]*beta; + } + } + else + break; + return; + case 3: + if( len == d_size.width && b != d ) + { + for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step ) + { + double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2]; + double t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1]; + double t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2]; + d[0] = t0*alpha + c[0]*beta; + d[1] = t1*alpha + c[1]*beta; + d[2] = t2*alpha + c[2]*beta; + } + } + else if( a != d ) + { + int c_step0 = 1; + if( c == zero ) + { + c_step0 = 0; + c_step = 1; + } + + for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 ) + { + double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2]; + double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] + a[a_step+2]*b[b_step*2]; + double t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] + a[a_step*2+2]*b[b_step*2]; + + d[0] = t0*alpha + c[0]*beta; + d[d_step] = t1*alpha + c[c_step]*beta; + d[d_step*2] = t2*alpha + c[c_step*2]*beta; + } + } + else + break; + return; + case 4: + if( len == d_size.width && b != d ) + { + for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step ) + { + double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3]; + double t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1] + a[3]*b[b_step*3+1]; + double t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2] + a[3]*b[b_step*3+2]; + double t3 = a[0]*b[3] + a[1]*b[b_step+3] + a[2]*b[b_step*2+3] + a[3]*b[b_step*3+3]; + d[0] = t0*alpha + c[0]*beta; + d[1] = t1*alpha + c[1]*beta; + d[2] = t2*alpha + c[2]*beta; + d[3] = t3*alpha + c[3]*beta; + } + } + else if( d_size.width <= 16 && a != d ) + { + int c_step0 = 1; + if( c == zero ) + { + c_step0 = 0; + c_step = 1; + } + + for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 ) + { + double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3]; + double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] + + a[a_step+2]*b[b_step*2] + a[a_step+3]*b[b_step*3]; + double t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] + + a[a_step*2+2]*b[b_step*2] + a[a_step*2+3]*b[b_step*3]; + double t3 = a[a_step*3]*b[0] + a[a_step*3+1]*b[b_step] + + a[a_step*3+2]*b[b_step*2] + a[a_step*3+3]*b[b_step*3]; + d[0] = t0*alpha + c[0]*beta; + d[d_step] = t1*alpha + c[c_step]*beta; + d[d_step*2] = t2*alpha + c[c_step*2]*beta; + d[d_step*3] = t3*alpha + c[c_step*3]*beta; + } + } + else + break; + return; + } + } + } + + { + size_t b_step = B.step; + GEMMSingleMulFunc singleMulFunc; + GEMMBlockMulFunc blockMulFunc; + GEMMStoreFunc storeFunc; + Mat *matD = &D, tmat; + const uchar* Cdata = C.data; + size_t Cstep = C.data ? (size_t)C.step : 0; + AutoBuffer buf; + + if( type == CV_32FC1 ) + { + singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_32f; + blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_32f; + storeFunc = (GEMMStoreFunc)GEMMStore_32f; + } + else if( type == CV_64FC1 ) + { + singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_64f; + blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_64f; + storeFunc = (GEMMStoreFunc)GEMMStore_64f; + } + else if( type == CV_32FC2 ) + { + singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_32fc; + blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_32fc; + storeFunc = (GEMMStoreFunc)GEMMStore_32fc; + } + else + { + CV_Assert( type == CV_64FC2 ); + singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_64fc; + blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_64fc; + storeFunc = (GEMMStoreFunc)GEMMStore_64fc; + } + + if( D.data == A.data || D.data == B.data ) + { + buf.allocate(d_size.width*d_size.height*CV_ELEM_SIZE(type)); + tmat = Mat(d_size.height, d_size.width, type, (uchar*)buf ); + matD = &tmat; + } + + if( (d_size.width == 1 || len == 1) && !(flags & GEMM_2_T) && B.isContinuous() ) + { + b_step = d_size.width == 1 ? 0 : CV_ELEM_SIZE(type); + flags |= GEMM_2_T; + } + + /*if( (d_size.width | d_size.height | len) >= 16 && icvBLAS_GEMM_32f_p != 0 ) + { + blas_func = type == CV_32FC1 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_32f_p : + type == CV_64FC1 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_64f_p : + type == CV_32FC2 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_32fc_p : + type == CV_64FC2 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_64fc_p : 0; + } + + if( blas_func ) + { + const char* transa = flags & GEMM_1_T ? "t" : "n"; + const char* transb = flags & GEMM_2_T ? "t" : "n"; + int lda, ldb, ldd; + + if( C->data.ptr ) + { + if( C->data.ptr != D->data.ptr ) + { + if( !(flags & GEMM_3_T) ) + cvCopy( C, D ); + else + cvTranspose( C, D ); + } + } + + if( CV_MAT_DEPTH(type) == CV_32F ) + { + Complex32f _alpha, _beta; + + lda = A->step/sizeof(float); + ldb = b_step/sizeof(float); + ldd = D->step/sizeof(float); + _alpha.re = (float)alpha; + _alpha.im = 0; + _beta.re = C->data.ptr ? (float)beta : 0; + _beta.im = 0; + if( CV_MAT_CN(type) == 2 ) + lda /= 2, ldb /= 2, ldd /= 2; + + blas_func( transb, transa, &d_size.width, &d_size.height, &len, + &_alpha, B->data.ptr, &ldb, A->data.ptr, &lda, + &_beta, D->data.ptr, &ldd ); + } + else + { + CvComplex64f _alpha, _beta; + + lda = A->step/sizeof(double); + ldb = b_step/sizeof(double); + ldd = D->step/sizeof(double); + _alpha.re = alpha; + _alpha.im = 0; + _beta.re = C->data.ptr ? beta : 0; + _beta.im = 0; + if( CV_MAT_CN(type) == 2 ) + lda /= 2, ldb /= 2, ldd /= 2; + + blas_func( transb, transa, &d_size.width, &d_size.height, &len, + &_alpha, B->data.ptr, &ldb, A->data.ptr, &lda, + &_beta, D->data.ptr, &ldd ); + } + } + else*/ if( ((d_size.height <= block_lin_size/2 || d_size.width <= block_lin_size/2) && + len <= 10000) || len <= 10 || + (d_size.width <= block_lin_size && + d_size.height <= block_lin_size && len <= block_lin_size) ) + { + singleMulFunc( A.data, A.step, B.data, b_step, Cdata, Cstep, + matD->data, matD->step, a_size, d_size, alpha, beta, flags ); + } + else + { + int is_a_t = flags & GEMM_1_T; + int is_b_t = flags & GEMM_2_T; + int elem_size = CV_ELEM_SIZE(type); + int dk0_1, dk0_2; + int a_buf_size = 0, b_buf_size, d_buf_size; + uchar* a_buf = 0; + uchar* b_buf = 0; + uchar* d_buf = 0; + int j, k, di = 0, dj = 0, dk = 0; + int dm0, dn0, dk0; + size_t a_step0, a_step1, b_step0, b_step1, c_step0, c_step1; + int work_elem_size = elem_size << (CV_MAT_DEPTH(type) == CV_32F ? 1 : 0); + + if( !is_a_t ) + a_step0 = A.step, a_step1 = elem_size; + else + a_step0 = elem_size, a_step1 = A.step; + + if( !is_b_t ) + b_step0 = b_step, b_step1 = elem_size; + else + b_step0 = elem_size, b_step1 = b_step; + + if( !C.data ) + { + c_step0 = c_step1 = 0; + flags &= ~GEMM_3_T; + } + else if( !(flags & GEMM_3_T) ) + c_step0 = C.step, c_step1 = elem_size; + else + c_step0 = elem_size, c_step1 = C.step; + + dm0 = std::min( block_lin_size, d_size.height ); + dn0 = std::min( block_lin_size, d_size.width ); + dk0_1 = block_size / dm0; + dk0_2 = block_size / dn0; + dk0 = std::min( dk0_1, dk0_2 ); + dk0 = std::min( dk0, len ); + if( dk0*dm0 > block_size ) + dm0 = block_size / dk0; + if( dk0*dn0 > block_size ) + dn0 = block_size / dk0; + + dk0_1 = (dn0+dn0/8+2) & -2; + b_buf_size = (dk0+dk0/8+1)*dk0_1*elem_size; + d_buf_size = (dk0+dk0/8+1)*dk0_1*work_elem_size; + + if( is_a_t ) + { + a_buf_size = (dm0+dm0/8+1)*((dk0+dk0/8+2)&-2)*elem_size; + flags &= ~GEMM_1_T; + } + + buf.allocate(a_buf_size + b_buf_size + d_buf_size); + d_buf = (uchar*)buf; + b_buf = d_buf + d_buf_size; + + if( is_a_t ) + a_buf = b_buf + b_buf_size; + + for( i = 0; i < d_size.height; i += di ) + { + di = dm0; + if( i + di >= d_size.height || 8*(i + di) + di > 8*d_size.height ) + di = d_size.height - i; + + for( j = 0; j < d_size.width; j += dj ) + { + uchar* _d = matD->data + i*matD->step + j*elem_size; + const uchar* _c = Cdata + i*c_step0 + j*c_step1; + size_t _d_step = matD->step; + dj = dn0; + + if( j + dj >= d_size.width || 8*(j + dj) + dj > 8*d_size.width ) + dj = d_size.width - j; + + flags &= 15; + if( dk0 < len ) + { + _d = d_buf; + _d_step = dj*work_elem_size; + } + + for( k = 0; k < len; k += dk ) + { + const uchar* _a = A.data + i*a_step0 + k*a_step1; + size_t _a_step = A.step; + const uchar* _b = B.data + k*b_step0 + j*b_step1; + size_t _b_step = b_step; + Size a_bl_size; + + dk = dk0; + if( k + dk >= len || 8*(k + dk) + dk > 8*len ) + dk = len - k; + + if( !is_a_t ) + a_bl_size.width = dk, a_bl_size.height = di; + else + a_bl_size.width = di, a_bl_size.height = dk; + + if( a_buf && is_a_t ) + { + _a_step = dk*elem_size; + GEMM_TransposeBlock( _a, A.step, a_buf, _a_step, a_bl_size, elem_size ); + std::swap( a_bl_size.width, a_bl_size.height ); + _a = a_buf; + } + + if( dj < d_size.width ) + { + Size b_size; + if( !is_b_t ) + b_size.width = dj, b_size.height = dk; + else + b_size.width = dk, b_size.height = dj; + + _b_step = b_size.width*elem_size; + GEMM_CopyBlock( _b, b_step, b_buf, _b_step, b_size, elem_size ); + _b = b_buf; + } + + if( dk0 < len ) + blockMulFunc( _a, _a_step, _b, _b_step, _d, _d_step, + a_bl_size, Size(dj,di), flags ); + else + singleMulFunc( _a, _a_step, _b, _b_step, _c, Cstep, + _d, _d_step, a_bl_size, Size(dj,di), alpha, beta, flags ); + flags |= 16; + } + + if( dk0 < len ) + storeFunc( _c, Cstep, _d, _d_step, + matD->data + i*matD->step + j*elem_size, + matD->step, Size(dj,di), alpha, beta, flags ); + } + } + } + + if( matD != &D ) + matD->copyTo(D); + } +} + +/****************************************************************************************\ +* Transform * +\****************************************************************************************/ + +namespace cv +{ + +template static void +transform_( const T* src, T* dst, const WT* m, int len, int scn, int dcn ) +{ + int x; + + if( scn == 2 && dcn == 2 ) + { + for( x = 0; x < len*2; x += 2 ) + { + WT v0 = src[x], v1 = src[x+1]; + T t0 = saturate_cast(m[0]*v0 + m[1]*v1 + m[2]); + T t1 = saturate_cast(m[3]*v0 + m[4]*v1 + m[5]); + dst[x] = t0; dst[x+1] = t1; + } + } + else if( scn == 3 && dcn == 3 ) + { + for( x = 0; x < len*3; x += 3 ) + { + WT v0 = src[x], v1 = src[x+1], v2 = src[x+2]; + T t0 = saturate_cast(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]); + T t1 = saturate_cast(m[4]*v0 + m[5]*v1 + m[6]*v2 + m[7]); + T t2 = saturate_cast(m[8]*v0 + m[9]*v1 + m[10]*v2 + m[11]); + dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2; + } + } + else if( scn == 3 && dcn == 1 ) + { + for( x = 0; x < len; x++, src += 3 ) + dst[x] = saturate_cast(m[0]*src[0] + m[1]*src[1] + m[2]*src[2] + m[3]); + } + else if( scn == 4 && dcn == 4 ) + { + for( x = 0; x < len*4; x += 4 ) + { + WT v0 = src[x], v1 = src[x+1], v2 = src[x+2], v3 = src[x+3]; + T t0 = saturate_cast(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]*v3 + m[4]); + T t1 = saturate_cast(m[5]*v0 + m[6]*v1 + m[7]*v2 + m[8]*v3 + m[9]); + dst[x] = t0; dst[x+1] = t1; + t0 = saturate_cast(m[10]*v0 + m[11]*v1 + m[12]*v2 + m[13]*v3 + m[14]); + t1 = saturate_cast(m[15]*v0 + m[16]*v1 + m[17]*v2 + m[18]*v3 + m[19]); + dst[x+2] = t0; dst[x+3] = t1; + } + } + else + { + for( x = 0; x < len; x++, src += scn, dst += dcn ) + { + const WT* _m = m; + int j, k; + for( j = 0; j < dcn; j++, _m += scn + 1 ) + { + WT s = _m[scn]; + for( k = 0; k < scn; k++ ) + s += _m[k]*src[k]; + dst[j] = saturate_cast(s); + } + } + } +} + +#if CV_SSE2 + +static inline void +load3x3Matrix( const float* m, __m128& m0, __m128& m1, __m128& m2, __m128& m3 ) +{ + m0 = _mm_setr_ps(m[0], m[4], m[8], 0); + m1 = _mm_setr_ps(m[1], m[5], m[9], 0); + m2 = _mm_setr_ps(m[2], m[6], m[10], 0); + m3 = _mm_setr_ps(m[3], m[7], m[11], 0); +} + +static inline void +load4x4Matrix( const float* m, __m128& m0, __m128& m1, __m128& m2, __m128& m3, __m128& m4 ) +{ + m0 = _mm_setr_ps(m[0], m[5], m[10], m[15]); + m1 = _mm_setr_ps(m[1], m[6], m[11], m[16]); + m2 = _mm_setr_ps(m[2], m[7], m[12], m[17]); + m3 = _mm_setr_ps(m[3], m[8], m[13], m[18]); + m4 = _mm_setr_ps(m[4], m[9], m[14], m[19]); +} + +#endif + +static void +transform_8u( const uchar* src, uchar* dst, const float* m, int len, int scn, int dcn ) +{ +#if CV_SSE2 + const int BITS = 10, SCALE = 1 << BITS; + const float MAX_M = (float)(1 << (15 - BITS)); + + if( USE_SSE2 && scn == 3 && dcn == 3 && + std::abs(m[0]) < MAX_M && std::abs(m[1]) < MAX_M && std::abs(m[2]) < MAX_M && std::abs(m[3]) < MAX_M*256 && + std::abs(m[4]) < MAX_M && std::abs(m[5]) < MAX_M && std::abs(m[6]) < MAX_M && std::abs(m[7]) < MAX_M*256 && + std::abs(m[8]) < MAX_M && std::abs(m[9]) < MAX_M && std::abs(m[10]) < MAX_M && std::abs(m[11]) < MAX_M*256 ) + { + // faster fixed-point transformation + short m00 = saturate_cast(m[0]*SCALE), m01 = saturate_cast(m[1]*SCALE), + m02 = saturate_cast(m[2]*SCALE), m10 = saturate_cast(m[4]*SCALE), + m11 = saturate_cast(m[5]*SCALE), m12 = saturate_cast(m[6]*SCALE), + m20 = saturate_cast(m[8]*SCALE), m21 = saturate_cast(m[9]*SCALE), + m22 = saturate_cast(m[10]*SCALE); + int m03 = saturate_cast((m[3]+0.5f)*SCALE), m13 = saturate_cast((m[7]+0.5f)*SCALE ), + m23 = saturate_cast((m[11]+0.5f)*SCALE); + + __m128i m0 = _mm_setr_epi16(0, m00, m01, m02, m00, m01, m02, 0); + __m128i m1 = _mm_setr_epi16(0, m10, m11, m12, m10, m11, m12, 0); + __m128i m2 = _mm_setr_epi16(0, m20, m21, m22, m20, m21, m22, 0); + __m128i m3 = _mm_setr_epi32(m03, m13, m23, 0); + int x = 0; + + for( ; x <= (len - 8)*3; x += 8*3 ) + { + __m128i z = _mm_setzero_si128(), t0, t1, t2, r0, r1; + __m128i v0 = _mm_loadl_epi64((const __m128i*)(src + x)); + __m128i v1 = _mm_loadl_epi64((const __m128i*)(src + x + 8)); + __m128i v2 = _mm_loadl_epi64((const __m128i*)(src + x + 16)), v3; + v0 = _mm_unpacklo_epi8(v0, z); // b0 g0 r0 b1 g1 r1 b2 g2 + v1 = _mm_unpacklo_epi8(v1, z); // r2 b3 g3 r3 b4 g4 r4 b5 + v2 = _mm_unpacklo_epi8(v2, z); // g5 r5 b6 g6 r6 b7 g7 r7 + + v3 = _mm_srli_si128(v2, 2); // ? b6 g6 r6 b7 g7 r7 0 + v2 = _mm_or_si128(_mm_slli_si128(v2, 10), _mm_srli_si128(v1, 6)); // ? b4 g4 r4 b5 g5 r5 ? + v1 = _mm_or_si128(_mm_slli_si128(v1, 6), _mm_srli_si128(v0, 10)); // ? b2 g2 r2 b3 g3 r3 ? + v0 = _mm_slli_si128(v0, 2); // 0 b0 g0 r0 b1 g1 r1 ? + + // process pixels 0 & 1 + t0 = _mm_madd_epi16(v0, m0); // a0 b0 a1 b1 + t1 = _mm_madd_epi16(v0, m1); // c0 d0 c1 d1 + t2 = _mm_madd_epi16(v0, m2); // e0 f0 e1 f1 + v0 = _mm_unpacklo_epi32(t0, t1); // a0 c0 b0 d0 + t0 = _mm_unpackhi_epi32(t0, t1); // a1 b1 c1 d1 + t1 = _mm_unpacklo_epi32(t2, z); // e0 0 f0 0 + t2 = _mm_unpackhi_epi32(t2, z); // e1 0 f1 0 + r0 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(v0, t1), _mm_unpackhi_epi64(v0,t1)), m3); // B0 G0 R0 0 + r1 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(t0, t2), _mm_unpackhi_epi64(t0,t2)), m3); // B1 G1 R1 0 + r0 = _mm_srai_epi32(r0, BITS); + r1 = _mm_srai_epi32(r1, BITS); + v0 = _mm_packus_epi16(_mm_packs_epi32(_mm_slli_si128(r0, 4), r1), z); // 0 B0 G0 R0 B1 G1 R1 0 + + // process pixels 2 & 3 + t0 = _mm_madd_epi16(v1, m0); // a0 b0 a1 b1 + t1 = _mm_madd_epi16(v1, m1); // c0 d0 c1 d1 + t2 = _mm_madd_epi16(v1, m2); // e0 f0 e1 f1 + v1 = _mm_unpacklo_epi32(t0, t1); // a0 c0 b0 d0 + t0 = _mm_unpackhi_epi32(t0, t1); // a1 b1 c1 d1 + t1 = _mm_unpacklo_epi32(t2, z); // e0 0 f0 0 + t2 = _mm_unpackhi_epi32(t2, z); // e1 0 f1 0 + r0 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(v1, t1), _mm_unpackhi_epi64(v1,t1)), m3); // B2 G2 R2 0 + r1 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(t0, t2), _mm_unpackhi_epi64(t0,t2)), m3); // B3 G3 R3 0 + r0 = _mm_srai_epi32(r0, BITS); + r1 = _mm_srai_epi32(r1, BITS); + v1 = _mm_packus_epi16(_mm_packs_epi32(_mm_slli_si128(r0, 4), r1), z); // 0 B2 G2 R2 B3 G3 R3 0 + + // process pixels 4 & 5 + t0 = _mm_madd_epi16(v2, m0); // a0 b0 a1 b1 + t1 = _mm_madd_epi16(v2, m1); // c0 d0 c1 d1 + t2 = _mm_madd_epi16(v2, m2); // e0 f0 e1 f1 + v2 = _mm_unpacklo_epi32(t0, t1); // a0 c0 b0 d0 + t0 = _mm_unpackhi_epi32(t0, t1); // a1 b1 c1 d1 + t1 = _mm_unpacklo_epi32(t2, z); // e0 0 f0 0 + t2 = _mm_unpackhi_epi32(t2, z); // e1 0 f1 0 + r0 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(v2, t1), _mm_unpackhi_epi64(v2,t1)), m3); // B4 G4 R4 0 + r1 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(t0, t2), _mm_unpackhi_epi64(t0,t2)), m3); // B5 G5 R5 0 + r0 = _mm_srai_epi32(r0, BITS); + r1 = _mm_srai_epi32(r1, BITS); + v2 = _mm_packus_epi16(_mm_packs_epi32(_mm_slli_si128(r0, 4), r1), z); // 0 B4 G4 R4 B5 G5 R5 0 + + // process pixels 6 & 7 + t0 = _mm_madd_epi16(v3, m0); // a0 b0 a1 b1 + t1 = _mm_madd_epi16(v3, m1); // c0 d0 c1 d1 + t2 = _mm_madd_epi16(v3, m2); // e0 f0 e1 f1 + v3 = _mm_unpacklo_epi32(t0, t1); // a0 c0 b0 d0 + t0 = _mm_unpackhi_epi32(t0, t1); // a1 b1 c1 d1 + t1 = _mm_unpacklo_epi32(t2, z); // e0 0 f0 0 + t2 = _mm_unpackhi_epi32(t2, z); // e1 0 f1 0 + r0 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(v3, t1), _mm_unpackhi_epi64(v3,t1)), m3); // B6 G6 R6 0 + r1 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(t0, t2), _mm_unpackhi_epi64(t0,t2)), m3); // B7 G7 R7 0 + r0 = _mm_srai_epi32(r0, BITS); + r1 = _mm_srai_epi32(r1, BITS); + v3 = _mm_packus_epi16(_mm_packs_epi32(_mm_slli_si128(r0, 4), r1), z); // 0 B6 G6 R6 B7 G7 R7 0 + + v0 = _mm_or_si128(_mm_srli_si128(v0, 1), _mm_slli_si128(v1, 5)); + v1 = _mm_or_si128(_mm_srli_si128(v1, 3), _mm_slli_si128(v2, 3)); + v2 = _mm_or_si128(_mm_srli_si128(v2, 5), _mm_slli_si128(v3, 1)); + _mm_storel_epi64((__m128i*)(dst + x), v0); + _mm_storel_epi64((__m128i*)(dst + x + 8), v1); + _mm_storel_epi64((__m128i*)(dst + x + 16), v2); + } + + for( ; x < len*3; x += 3 ) + { + int v0 = src[x], v1 = src[x+1], v2 = src[x+2]; + uchar t0 = saturate_cast((m00*v0 + m01*v1 + m02*v2 + m03)>>BITS); + uchar t1 = saturate_cast((m10*v0 + m11*v1 + m12*v2 + m13)>>BITS); + uchar t2 = saturate_cast((m20*v0 + m21*v1 + m22*v2 + m23)>>BITS); + dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2; + } + return; + } +#endif + + transform_(src, dst, m, len, scn, dcn); +} + +static void +transform_16u( const ushort* src, ushort* dst, const float* m, int len, int scn, int dcn ) +{ +#if CV_SSE2 + if( USE_SSE2 && scn == 3 && dcn == 3 ) + { + __m128 m0, m1, m2, m3; + __m128i delta = _mm_setr_epi16(0,-32768,-32768,-32768,-32768,-32768,-32768,0); + load3x3Matrix(m, m0, m1, m2, m3); + m3 = _mm_sub_ps(m3, _mm_setr_ps(32768.f, 32768.f, 32768.f, 0.f)); + + int x = 0; + for( ; x <= (len - 4)*3; x += 4*3 ) + { + __m128i z = _mm_setzero_si128(); + __m128i v0 = _mm_loadu_si128((const __m128i*)(src + x)), v1; + __m128i v2 = _mm_loadl_epi64((const __m128i*)(src + x + 8)), v3; + v1 = _mm_unpacklo_epi16(_mm_srli_si128(v0, 6), z); // b1 g1 r1 + v3 = _mm_unpacklo_epi16(_mm_srli_si128(v2, 2), z); // b3 g3 r3 + v2 = _mm_or_si128(_mm_srli_si128(v0, 12), _mm_slli_si128(v2, 4)); + v0 = _mm_unpacklo_epi16(v0, z); // b0 g0 r0 + v2 = _mm_unpacklo_epi16(v2, z); // b2 g2 r2 + __m128 x0 = _mm_cvtepi32_ps(v0), x1 = _mm_cvtepi32_ps(v1); + __m128 x2 = _mm_cvtepi32_ps(v2), x3 = _mm_cvtepi32_ps(v3); + __m128 y0 = _mm_add_ps(_mm_add_ps(_mm_add_ps( + _mm_mul_ps(m0, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(0,0,0,0))), + _mm_mul_ps(m1, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(1,1,1,1)))), + _mm_mul_ps(m2, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(2,2,2,2)))), m3); + __m128 y1 = _mm_add_ps(_mm_add_ps(_mm_add_ps( + _mm_mul_ps(m0, _mm_shuffle_ps(x1,x1,_MM_SHUFFLE(0,0,0,0))), + _mm_mul_ps(m1, _mm_shuffle_ps(x1,x1,_MM_SHUFFLE(1,1,1,1)))), + _mm_mul_ps(m2, _mm_shuffle_ps(x1,x1,_MM_SHUFFLE(2,2,2,2)))), m3); + __m128 y2 = _mm_add_ps(_mm_add_ps(_mm_add_ps( + _mm_mul_ps(m0, _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(0,0,0,0))), + _mm_mul_ps(m1, _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(1,1,1,1)))), + _mm_mul_ps(m2, _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(2,2,2,2)))), m3); + __m128 y3 = _mm_add_ps(_mm_add_ps(_mm_add_ps( + _mm_mul_ps(m0, _mm_shuffle_ps(x3,x3,_MM_SHUFFLE(0,0,0,0))), + _mm_mul_ps(m1, _mm_shuffle_ps(x3,x3,_MM_SHUFFLE(1,1,1,1)))), + _mm_mul_ps(m2, _mm_shuffle_ps(x3,x3,_MM_SHUFFLE(2,2,2,2)))), m3); + v0 = _mm_cvtps_epi32(y0); v1 = _mm_cvtps_epi32(y1); + v2 = _mm_cvtps_epi32(y2); v3 = _mm_cvtps_epi32(y3); + + v0 = _mm_add_epi16(_mm_packs_epi32(_mm_slli_si128(v0,4), v1), delta); // 0 b0 g0 r0 b1 g1 r1 0 + v2 = _mm_add_epi16(_mm_packs_epi32(_mm_slli_si128(v2,4), v3), delta); // 0 b2 g2 r2 b3 g3 r3 0 + v1 = _mm_or_si128(_mm_srli_si128(v0,2), _mm_slli_si128(v2,10)); // b0 g0 r0 b1 g1 r1 b2 g2 + v2 = _mm_srli_si128(v2, 6); // r2 b3 g3 r3 0 0 0 0 + _mm_storeu_si128((__m128i*)(dst + x), v1); + _mm_storel_epi64((__m128i*)(dst + x + 8), v2); + } + + for( ; x < len*3; x += 3 ) + { + float v0 = src[x], v1 = src[x+1], v2 = src[x+2]; + ushort t0 = saturate_cast(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]); + ushort t1 = saturate_cast(m[4]*v0 + m[5]*v1 + m[6]*v2 + m[7]); + ushort t2 = saturate_cast(m[8]*v0 + m[9]*v1 + m[10]*v2 + m[11]); + dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2; + } + return; + } +#endif + + transform_(src, dst, m, len, scn, dcn); +} + + +static void +transform_32f( const float* src, float* dst, const float* m, int len, int scn, int dcn ) +{ +#if CV_SSE2 + if( USE_SSE2 ) + { + int x = 0; + if( scn == 3 && dcn == 3 ) + { + __m128 m0, m1, m2, m3; + load3x3Matrix(m, m0, m1, m2, m3); + + for( ; x < (len - 1)*3; x += 3 ) + { + __m128 x0 = _mm_loadu_ps(src + x); + __m128 y0 = _mm_add_ps(_mm_add_ps(_mm_add_ps( + _mm_mul_ps(m0, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(0,0,0,0))), + _mm_mul_ps(m1, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(1,1,1,1)))), + _mm_mul_ps(m2, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(2,2,2,2)))), m3); + _mm_storel_pi((__m64*)(dst + x), y0); + _mm_store_ss(dst + x + 2, _mm_movehl_ps(y0,y0)); + } + + for( ; x < len*3; x += 3 ) + { + float v0 = src[x], v1 = src[x+1], v2 = src[x+2]; + float t0 = saturate_cast(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]); + float t1 = saturate_cast(m[4]*v0 + m[5]*v1 + m[6]*v2 + m[7]); + float t2 = saturate_cast(m[8]*v0 + m[9]*v1 + m[10]*v2 + m[11]); + dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2; + } + return; + } + + if( scn == 4 && dcn == 4 ) + { + __m128 m0, m1, m2, m3, m4; + load4x4Matrix(m, m0, m1, m2, m3, m4); + + for( ; x < len*4; x += 4 ) + { + __m128 x0 = _mm_loadu_ps(src + x); + __m128 y0 = _mm_add_ps(_mm_add_ps(_mm_add_ps(_mm_add_ps( + _mm_mul_ps(m0, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(0,0,0,0))), + _mm_mul_ps(m1, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(1,1,1,1)))), + _mm_mul_ps(m2, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(2,2,2,2)))), + _mm_mul_ps(m3, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(3,3,3,3)))), m4); + _mm_storeu_ps(dst + x, y0); + } + return; + } + } +#endif + + transform_(src, dst, m, len, scn, dcn); +} + + +static void +transform_8s(const schar* src, schar* dst, const float* m, int len, int scn, int dcn) +{ + transform_(src, dst, m, len, scn, dcn); +} + +static void +transform_16s(const short* src, short* dst, const float* m, int len, int scn, int dcn) +{ + transform_(src, dst, m, len, scn, dcn); +} + +static void +transform_32s(const int* src, int* dst, const double* m, int len, int scn, int dcn) +{ + transform_(src, dst, m, len, scn, dcn); +} + +static void +transform_64f(const double* src, double* dst, const double* m, int len, int scn, int dcn) +{ + transform_(src, dst, m, len, scn, dcn); +} + +template static void +diagtransform_( const T* src, T* dst, const WT* m, int len, int cn, int ) +{ + int x; + + if( cn == 2 ) + { + for( x = 0; x < len*2; x += 2 ) + { + T t0 = saturate_cast(m[0]*src[x] + m[2]); + T t1 = saturate_cast(m[4]*src[x+1] + m[5]); + dst[x] = t0; dst[x+1] = t1; + } + } + else if( cn == 3 ) + { + for( x = 0; x < len*3; x += 3 ) + { + T t0 = saturate_cast(m[0]*src[x] + m[3]); + T t1 = saturate_cast(m[5]*src[x+1] + m[7]); + T t2 = saturate_cast(m[10]*src[x+2] + m[11]); + dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2; + } + } + else if( cn == 4 ) + { + for( x = 0; x < len*4; x += 4 ) + { + T t0 = saturate_cast(m[0]*src[x] + m[4]); + T t1 = saturate_cast(m[6]*src[x+1] + m[9]); + dst[x] = t0; dst[x+1] = t1; + t0 = saturate_cast(m[12]*src[x+2] + m[14]); + t1 = saturate_cast(m[18]*src[x+3] + m[19]); + dst[x+2] = t0; dst[x+3] = t1; + } + } + else + { + for( x = 0; x < len; x++, src += cn, dst += cn ) + { + const WT* _m = m; + for( int j = 0; j < cn; j++, _m += cn + 1 ) + dst[j] = saturate_cast(src[j]*_m[j] + _m[cn]); + } + } +} + +static void +diagtransform_8u(const uchar* src, uchar* dst, const float* m, int len, int scn, int dcn) +{ + diagtransform_(src, dst, m, len, scn, dcn); +} + +static void +diagtransform_8s(const schar* src, schar* dst, const float* m, int len, int scn, int dcn) +{ + diagtransform_(src, dst, m, len, scn, dcn); +} + +static void +diagtransform_16u(const ushort* src, ushort* dst, const float* m, int len, int scn, int dcn) +{ + diagtransform_(src, dst, m, len, scn, dcn); +} + +static void +diagtransform_16s(const short* src, short* dst, const float* m, int len, int scn, int dcn) +{ + diagtransform_(src, dst, m, len, scn, dcn); +} + +static void +diagtransform_32s(const int* src, int* dst, const double* m, int len, int scn, int dcn) +{ + diagtransform_(src, dst, m, len, scn, dcn); +} + +static void +diagtransform_32f(const float* src, float* dst, const float* m, int len, int scn, int dcn) +{ + diagtransform_(src, dst, m, len, scn, dcn); +} + +static void +diagtransform_64f(const double* src, double* dst, const double* m, int len, int scn, int dcn) +{ + diagtransform_(src, dst, m, len, scn, dcn); +} + + +typedef void (*TransformFunc)( const uchar* src, uchar* dst, const uchar* m, int, int, int ); + +static TransformFunc transformTab[] = +{ + (TransformFunc)transform_8u, (TransformFunc)transform_8s, (TransformFunc)transform_16u, + (TransformFunc)transform_16s, (TransformFunc)transform_32s, (TransformFunc)transform_32f, + (TransformFunc)transform_64f, 0 +}; + +static TransformFunc diagTransformTab[] = +{ + (TransformFunc)diagtransform_8u, (TransformFunc)diagtransform_8s, (TransformFunc)diagtransform_16u, + (TransformFunc)diagtransform_16s, (TransformFunc)diagtransform_32s, (TransformFunc)diagtransform_32f, + (TransformFunc)diagtransform_64f, 0 +}; + +} + +void cv::transform( InputArray _src, OutputArray _dst, InputArray _mtx ) +{ + Mat src = _src.getMat(), m = _mtx.getMat(); + int depth = src.depth(), scn = src.channels(), dcn = m.rows; + CV_Assert( scn == m.cols || scn + 1 == m.cols ); + bool isDiag = false; + + _dst.create( src.size(), CV_MAKETYPE(depth, dcn) ); + Mat dst = _dst.getMat(); + + int mtype = depth == CV_32S || depth == CV_64F ? CV_64F : CV_32F; + AutoBuffer _mbuf; + double* mbuf; + + if( !m.isContinuous() || m.type() != mtype || m.cols != scn + 1 ) + { + _mbuf.allocate(dcn*(scn+1)); + mbuf = (double*)_mbuf; + Mat tmp(dcn, scn+1, mtype, mbuf); + memset(tmp.data, 0, tmp.total()*tmp.elemSize()); + if( m.cols == scn+1 ) + m.convertTo(tmp, mtype); + else + { + Mat tmppart = tmp.colRange(0, m.cols); + m.convertTo(tmppart, mtype); + } + m = tmp; + } + else + mbuf = (double*)m.data; + + if( scn == dcn ) + { + int i, j; + double eps = mtype == CV_32F ? FLT_EPSILON : DBL_EPSILON; + + if( scn == 1 ) + { + double alpha, beta; + if( mtype == CV_32F ) + alpha = m.at(0), beta = m.at(1); + else + alpha = m.at(0), beta = m.at(1); + src.convertTo(dst, dst.type(), alpha, beta); + return; + } + + for( i = 0, isDiag = true; isDiag && i < scn; i++ ) + { + for( j = 0; isDiag && j < scn; j++ ) + { + double v = mtype == CV_32F ? m.at(i, j) : m.at(i, j); + if( i != j && fabs(v) > eps ) + isDiag = false; + } + } + } + + TransformFunc func = isDiag ? diagTransformTab[depth] : transformTab[depth]; + CV_Assert( func != 0 ); + + const Mat* arrays[] = {&src, &dst, 0}; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs); + size_t i, total = it.size; + + for( i = 0; i < it.nplanes; i++, ++it ) + func( ptrs[0], ptrs[1], (uchar*)mbuf, (int)total, scn, dcn ); +} + +/****************************************************************************************\ +* Perspective Transform * +\****************************************************************************************/ + +namespace cv +{ + +template static void +perspectiveTransform_( const T* src, T* dst, const double* m, int len, int scn, int dcn ) +{ + const double eps = FLT_EPSILON; + int i; + + if( scn == 2 && dcn == 2 ) + { + for( i = 0; i < len*2; i += 2 ) + { + T x = src[i], y = src[i + 1]; + double w = x*m[6] + y*m[7] + m[8]; + + if( fabs(w) > eps ) + { + w = 1./w; + dst[i] = (T)((x*m[0] + y*m[1] + m[2])*w); + dst[i+1] = (T)((x*m[3] + y*m[4] + m[5])*w); + } + else + dst[i] = dst[i+1] = (T)0; + } + } + else if( scn == 3 && dcn == 3 ) + { + for( i = 0; i < len*3; i += 3 ) + { + T x = src[i], y = src[i + 1], z = src[i + 2]; + double w = x*m[12] + y*m[13] + z*m[14] + m[15]; + + if( fabs(w) > eps ) + { + w = 1./w; + dst[i] = (T)((x*m[0] + y*m[1] + z*m[2] + m[3]) * w); + dst[i+1] = (T)((x*m[4] + y*m[5] + z*m[6] + m[7]) * w); + dst[i+2] = (T)((x*m[8] + y*m[9] + z*m[10] + m[11]) * w); + } + else + dst[i] = dst[i+1] = dst[i+2] = (T)0; + } + } + else if( scn == 3 && dcn == 2 ) + { + for( i = 0; i < len; i++, src += 3, dst += 2 ) + { + T x = src[0], y = src[1], z = src[2]; + double w = x*m[8] + y*m[9] + z*m[10] + m[11]; + + if( fabs(w) > eps ) + { + w = 1./w; + dst[0] = (T)((x*m[0] + y*m[1] + z*m[2] + m[3])*w); + dst[1] = (T)((x*m[4] + y*m[5] + z*m[6] + m[7])*w); + } + else + dst[0] = dst[1] = (T)0; + } + } + else + { + for( i = 0; i < len; i++, src += scn, dst += dcn ) + { + const double* _m = m + dcn*(scn + 1); + double w = _m[scn]; + int j, k; + for( k = 0; k < scn; k++ ) + w += _m[k]*src[k]; + if( fabs(w) > eps ) + { + _m = m; + for( j = 0; j < dcn; j++, _m += scn + 1 ) + { + double s = _m[scn]; + for( k = 0; k < scn; k++ ) + s += _m[k]*src[k]; + dst[j] = (T)(s*w); + } + } + else + for( j = 0; j < dcn; j++ ) + dst[j] = 0; + } + } +} + + +static void +perspectiveTransform_32f(const float* src, float* dst, const double* m, int len, int scn, int dcn) +{ + perspectiveTransform_(src, dst, m, len, scn, dcn); +} + +static void +perspectiveTransform_64f(const double* src, double* dst, const double* m, int len, int scn, int dcn) +{ + perspectiveTransform_(src, dst, m, len, scn, dcn); +} + +} + +void cv::perspectiveTransform( InputArray _src, OutputArray _dst, InputArray _mtx ) +{ + Mat src = _src.getMat(), m = _mtx.getMat(); + int depth = src.depth(), scn = src.channels(), dcn = m.rows-1; + CV_Assert( scn + 1 == m.cols && (depth == CV_32F || depth == CV_64F)); + + _dst.create( src.size(), CV_MAKETYPE(depth, dcn) ); + Mat dst = _dst.getMat(); + + const int mtype = CV_64F; + AutoBuffer _mbuf; + double* mbuf = _mbuf; + + if( !m.isContinuous() || m.type() != mtype ) + { + _mbuf.allocate((dcn+1)*(scn+1)); + Mat tmp(dcn+1, scn+1, mtype, (double*)_mbuf); + m.convertTo(tmp, mtype); + m = tmp; + } + else + mbuf = (double*)m.data; + + TransformFunc func = depth == CV_32F ? + (TransformFunc)perspectiveTransform_32f : + (TransformFunc)perspectiveTransform_64f; + CV_Assert( func != 0 ); + + const Mat* arrays[] = {&src, &dst, 0}; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs); + size_t i, total = it.size; + + for( i = 0; i < it.nplanes; i++, ++it ) + func( ptrs[0], ptrs[1], (uchar*)mbuf, (int)total, scn, dcn ); +} + +/****************************************************************************************\ +* ScaleAdd * +\****************************************************************************************/ + +namespace cv +{ + +static void scaleAdd_32f(const float* src1, const float* src2, float* dst, + int len, float* _alpha) +{ + float alpha = *_alpha; + int i = 0; +#if CV_SSE2 + if( USE_SSE2 ) + { + __m128 a4 = _mm_set1_ps(alpha); + if( (((size_t)src1|(size_t)src2|(size_t)dst) & 15) == 0 ) + for( ; i <= len - 8; i += 8 ) + { + __m128 x0, x1, y0, y1, t0, t1; + x0 = _mm_load_ps(src1 + i); x1 = _mm_load_ps(src1 + i + 4); + y0 = _mm_load_ps(src2 + i); y1 = _mm_load_ps(src2 + i + 4); + t0 = _mm_add_ps(_mm_mul_ps(x0, a4), y0); + t1 = _mm_add_ps(_mm_mul_ps(x1, a4), y1); + _mm_store_ps(dst + i, t0); + _mm_store_ps(dst + i + 4, t1); + } + else + for( ; i <= len - 8; i += 8 ) + { + __m128 x0, x1, y0, y1, t0, t1; + x0 = _mm_loadu_ps(src1 + i); x1 = _mm_loadu_ps(src1 + i + 4); + y0 = _mm_loadu_ps(src2 + i); y1 = _mm_loadu_ps(src2 + i + 4); + t0 = _mm_add_ps(_mm_mul_ps(x0, a4), y0); + t1 = _mm_add_ps(_mm_mul_ps(x1, a4), y1); + _mm_storeu_ps(dst + i, t0); + _mm_storeu_ps(dst + i + 4, t1); + } + } + else +#endif + for( ; i <= len - 4; i += 4 ) + { + float t0, t1; + t0 = src1[i]*alpha + src2[i]; + t1 = src1[i+1]*alpha + src2[i+1]; + dst[i] = t0; dst[i+1] = t1; + t0 = src1[i+2]*alpha + src2[i+2]; + t1 = src1[i+3]*alpha + src2[i+3]; + dst[i+2] = t0; dst[i+3] = t1; + } + for( ; i < len; i++ ) + dst[i] = src1[i]*alpha + src2[i]; +} + + +static void scaleAdd_64f(const double* src1, const double* src2, double* dst, + int len, double* _alpha) +{ + double alpha = *_alpha; + int i = 0; +#if CV_SSE2 + if( USE_SSE2 && (((size_t)src1|(size_t)src2|(size_t)dst) & 15) == 0 ) + { + __m128d a2 = _mm_set1_pd(alpha); + for( ; i <= len - 4; i += 4 ) + { + __m128d x0, x1, y0, y1, t0, t1; + x0 = _mm_load_pd(src1 + i); x1 = _mm_load_pd(src1 + i + 2); + y0 = _mm_load_pd(src2 + i); y1 = _mm_load_pd(src2 + i + 2); + t0 = _mm_add_pd(_mm_mul_pd(x0, a2), y0); + t1 = _mm_add_pd(_mm_mul_pd(x1, a2), y1); + _mm_store_pd(dst + i, t0); + _mm_store_pd(dst + i + 2, t1); + } + } + else +#endif + for( ; i <= len - 4; i += 4 ) + { + double t0, t1; + t0 = src1[i]*alpha + src2[i]; + t1 = src1[i+1]*alpha + src2[i+1]; + dst[i] = t0; dst[i+1] = t1; + t0 = src1[i+2]*alpha + src2[i+2]; + t1 = src1[i+3]*alpha + src2[i+3]; + dst[i+2] = t0; dst[i+3] = t1; + } + for( ; i < len; i++ ) + dst[i] = src1[i]*alpha + src2[i]; +} + +typedef void (*ScaleAddFunc)(const uchar* src1, const uchar* src2, uchar* dst, int len, const void* alpha); + +} + +void cv::scaleAdd( InputArray _src1, double alpha, InputArray _src2, OutputArray _dst ) +{ + Mat src1 = _src1.getMat(), src2 = _src2.getMat(); + int depth = src1.depth(), cn = src1.channels(); + + CV_Assert( src1.type() == src2.type() ); + if( depth < CV_32F ) + { + addWeighted(_src1, alpha, _src2, 1, 0, _dst, depth); + return; + } + + _dst.create(src1.dims, src1.size, src1.type()); + Mat dst = _dst.getMat(); + + float falpha = (float)alpha; + void* palpha = depth == CV_32F ? (void*)&falpha : (void*)α + + ScaleAddFunc func = depth == CV_32F ? (ScaleAddFunc)scaleAdd_32f : (ScaleAddFunc)scaleAdd_64f; + + if( src1.isContinuous() && src2.isContinuous() && dst.isContinuous() ) + { + size_t len = src1.total()*cn; + func(src1.data, src2.data, dst.data, (int)len, palpha); + return; + } + + const Mat* arrays[] = {&src1, &src2, &dst, 0}; + uchar* ptrs[3]; + NAryMatIterator it(arrays, ptrs); + size_t i, len = it.size*cn; + + for( i = 0; i < it.nplanes; i++, ++it ) + func( ptrs[0], ptrs[1], ptrs[2], (int)len, palpha ); +} + +/****************************************************************************************\ +* Covariation Matrix * +\****************************************************************************************/ + +void cv::calcCovarMatrix( const Mat* data, int nsamples, Mat& covar, Mat& _mean, int flags, int ctype ) +{ + CV_Assert( data && nsamples > 0 ); + Size size = data[0].size(); + int sz = size.width*size.height, esz = (int)data[0].elemSize(); + int type = data[0].type(); + Mat mean; + ctype = std::max(std::max(CV_MAT_DEPTH(ctype >= 0 ? ctype : type), _mean.depth()), CV_32F); + + if( (flags & CV_COVAR_USE_AVG) != 0 ) + { + CV_Assert( _mean.size() == size ); + if( _mean.isContinuous() && _mean.type() == ctype ) + mean = _mean.reshape(1, 1); + else + { + _mean.convertTo(mean, ctype); + mean = mean.reshape(1, 1); + } + } + + Mat _data(nsamples, sz, type); + for( int i = 0; i < nsamples; i++ ) + { + CV_Assert( data[i].size() == size && data[i].type() == type ); + if( data[i].isContinuous() ) + memcpy( _data.ptr(i), data[i].data, sz*esz ); + else + { + Mat dataRow(size.height, size.width, type, _data.ptr(i)); + data[i].copyTo(dataRow); + } + } + + calcCovarMatrix( _data, covar, mean, (flags & ~(CV_COVAR_ROWS|CV_COVAR_COLS)) | CV_COVAR_ROWS, ctype ); + if( (flags & CV_COVAR_USE_AVG) == 0 ) + _mean = mean.reshape(1, size.height); +} + +void cv::calcCovarMatrix( InputArray _data, OutputArray _covar, InputOutputArray _mean, int flags, int ctype ) +{ + Mat data = _data.getMat(), mean; + CV_Assert( ((flags & CV_COVAR_ROWS) != 0) ^ ((flags & CV_COVAR_COLS) != 0) ); + bool takeRows = (flags & CV_COVAR_ROWS) != 0; + int type = data.type(); + int nsamples = takeRows ? data.rows : data.cols; + CV_Assert( nsamples > 0 ); + Size size = takeRows ? Size(data.cols, 1) : Size(1, data.rows); + + if( (flags & CV_COVAR_USE_AVG) != 0 ) + { + mean = _mean.getMat(); + ctype = std::max(std::max(CV_MAT_DEPTH(ctype >= 0 ? ctype : type), mean.depth()), CV_32F); + CV_Assert( mean.size() == size ); + if( mean.type() != ctype ) + { + _mean.create(mean.size(), ctype); + Mat tmp = _mean.getMat(); + mean.convertTo(tmp, ctype); + mean = tmp; + } + } + else + { + ctype = std::max(CV_MAT_DEPTH(ctype >= 0 ? ctype : type), CV_32F); + reduce( _data, _mean, takeRows ? 0 : 1, CV_REDUCE_AVG, ctype ); + mean = _mean.getMat(); + } + + mulTransposed( data, _covar, ((flags & CV_COVAR_NORMAL) == 0) ^ takeRows, + mean, (flags & CV_COVAR_SCALE) != 0 ? 1./nsamples : 1, ctype ); +} + +/****************************************************************************************\ +* Mahalanobis * +\****************************************************************************************/ + +double cv::Mahalanobis( InputArray _v1, InputArray _v2, InputArray _icovar ) +{ + Mat v1 = _v1.getMat(), v2 = _v2.getMat(), icovar = _icovar.getMat(); + int type = v1.type(), depth = v1.depth(); + Size sz = v1.size(); + int i, j, len = sz.width*sz.height*v1.channels(); + AutoBuffer buf(len); + double result = 0; + + CV_Assert( type == v2.type() && type == icovar.type() && + sz == v2.size() && len == icovar.rows && len == icovar.cols ); + + sz.width *= v1.channels(); + if( v1.isContinuous() && v2.isContinuous() ) + { + sz.width *= sz.height; + sz.height = 1; + } + + if( depth == CV_32F ) + { + const float* src1 = (const float*)v1.data; + const float* src2 = (const float*)v2.data; + size_t step1 = v1.step/sizeof(src1[0]); + size_t step2 = v2.step/sizeof(src2[0]); + double* diff = buf; + const float* mat = (const float*)icovar.data; + size_t matstep = icovar.step/sizeof(mat[0]); + + for( ; sz.height--; src1 += step1, src2 += step2, diff += sz.width ) + { + for( i = 0; i < sz.width; i++ ) + diff[i] = src1[i] - src2[i]; + } + + diff = buf; + for( i = 0; i < len; i++, mat += matstep ) + { + double row_sum = 0; + for( j = 0; j <= len - 4; j += 4 ) + row_sum += diff[j]*mat[j] + diff[j+1]*mat[j+1] + + diff[j+2]*mat[j+2] + diff[j+3]*mat[j+3]; + for( ; j < len; j++ ) + row_sum += diff[j]*mat[j]; + result += row_sum * diff[i]; + } + } + else if( depth == CV_64F ) + { + const double* src1 = (const double*)v1.data; + const double* src2 = (const double*)v2.data; + size_t step1 = v1.step/sizeof(src1[0]); + size_t step2 = v2.step/sizeof(src2[0]); + double* diff = buf; + const double* mat = (const double*)icovar.data; + size_t matstep = icovar.step/sizeof(mat[0]); + + for( ; sz.height--; src1 += step1, src2 += step2, diff += sz.width ) + { + for( i = 0; i < sz.width; i++ ) + diff[i] = src1[i] - src2[i]; + } + + diff = buf; + for( i = 0; i < len; i++, mat += matstep ) + { + double row_sum = 0; + for( j = 0; j <= len - 4; j += 4 ) + row_sum += diff[j]*mat[j] + diff[j+1]*mat[j+1] + + diff[j+2]*mat[j+2] + diff[j+3]*mat[j+3]; + for( ; j < len; j++ ) + row_sum += diff[j]*mat[j]; + result += row_sum * diff[i]; + } + } + else + CV_Error( CV_StsUnsupportedFormat, "" ); + + return std::sqrt(result); +} + +double cv::Mahalonobis( InputArray _v1, InputArray _v2, InputArray _icovar ) +{ + return Mahalanobis(_v1, _v2, _icovar); +} + +/****************************************************************************************\ +* MulTransposed * +\****************************************************************************************/ + +namespace cv +{ + +template static void +MulTransposedR( const Mat& srcmat, Mat& dstmat, const Mat& deltamat, double scale ) +{ + int i, j, k; + const sT* src = (const sT*)srcmat.data; + dT* dst = (dT*)dstmat.data; + const dT* delta = (const dT*)deltamat.data; + size_t srcstep = srcmat.step/sizeof(src[0]); + size_t dststep = dstmat.step/sizeof(dst[0]); + size_t deltastep = deltamat.rows > 1 ? deltamat.step/sizeof(delta[0]) : 0; + int delta_cols = deltamat.cols; + Size size = srcmat.size(); + dT* tdst = dst; + dT* col_buf = 0; + dT* delta_buf = 0; + int buf_size = size.height*sizeof(dT); + AutoBuffer buf; + + if( delta && delta_cols < size.width ) + { + assert( delta_cols == 1 ); + buf_size *= 5; + } + buf.allocate(buf_size); + col_buf = (dT*)(uchar*)buf; + + if( delta && delta_cols < size.width ) + { + delta_buf = col_buf + size.height; + for( i = 0; i < size.height; i++ ) + delta_buf[i*4] = delta_buf[i*4+1] = + delta_buf[i*4+2] = delta_buf[i*4+3] = delta[i*deltastep]; + delta = delta_buf; + deltastep = deltastep ? 4 : 0; + } + + if( !delta ) + for( i = 0; i < size.width; i++, tdst += dststep ) + { + for( k = 0; k < size.height; k++ ) + col_buf[k] = src[k*srcstep+i]; + + for( j = i; j <= size.width - 4; j += 4 ) + { + double s0 = 0, s1 = 0, s2 = 0, s3 = 0; + const sT *tsrc = src + j; + + for( k = 0; k < size.height; k++, tsrc += srcstep ) + { + double a = col_buf[k]; + s0 += a * tsrc[0]; + s1 += a * tsrc[1]; + s2 += a * tsrc[2]; + s3 += a * tsrc[3]; + } + + tdst[j] = (dT)(s0*scale); + tdst[j+1] = (dT)(s1*scale); + tdst[j+2] = (dT)(s2*scale); + tdst[j+3] = (dT)(s3*scale); + } + + for( ; j < size.width; j++ ) + { + double s0 = 0; + const sT *tsrc = src + j; + + for( k = 0; k < size.height; k++, tsrc += srcstep ) + s0 += (double)col_buf[k] * tsrc[0]; + + tdst[j] = (dT)(s0*scale); + } + } + else + for( i = 0; i < size.width; i++, tdst += dststep ) + { + if( !delta_buf ) + for( k = 0; k < size.height; k++ ) + col_buf[k] = src[k*srcstep+i] - delta[k*deltastep+i]; + else + for( k = 0; k < size.height; k++ ) + col_buf[k] = src[k*srcstep+i] - delta_buf[k*deltastep]; + + for( j = i; j <= size.width - 4; j += 4 ) + { + double s0 = 0, s1 = 0, s2 = 0, s3 = 0; + const sT *tsrc = src + j; + const dT *d = delta_buf ? delta_buf : delta + j; + + for( k = 0; k < size.height; k++, tsrc+=srcstep, d+=deltastep ) + { + double a = col_buf[k]; + s0 += a * (tsrc[0] - d[0]); + s1 += a * (tsrc[1] - d[1]); + s2 += a * (tsrc[2] - d[2]); + s3 += a * (tsrc[3] - d[3]); + } + + tdst[j] = (dT)(s0*scale); + tdst[j+1] = (dT)(s1*scale); + tdst[j+2] = (dT)(s2*scale); + tdst[j+3] = (dT)(s3*scale); + } + + for( ; j < size.width; j++ ) + { + double s0 = 0; + const sT *tsrc = src + j; + const dT *d = delta_buf ? delta_buf : delta + j; + + for( k = 0; k < size.height; k++, tsrc+=srcstep, d+=deltastep ) + s0 += (double)col_buf[k] * (tsrc[0] - d[0]); + + tdst[j] = (dT)(s0*scale); + } + } +} + + +template static void +MulTransposedL( const Mat& srcmat, Mat& dstmat, const Mat& deltamat, double scale ) +{ + int i, j, k; + const sT* src = (const sT*)srcmat.data; + dT* dst = (dT*)dstmat.data; + const dT* delta = (const dT*)deltamat.data; + size_t srcstep = srcmat.step/sizeof(src[0]); + size_t dststep = dstmat.step/sizeof(dst[0]); + size_t deltastep = deltamat.rows > 1 ? deltamat.step/sizeof(delta[0]) : 0; + int delta_cols = deltamat.cols; + Size size = srcmat.size(); + dT* tdst = dst; + + if( !delta ) + for( i = 0; i < size.height; i++, tdst += dststep ) + for( j = i; j < size.height; j++ ) + { + double s = 0; + const sT *tsrc1 = src + i*srcstep; + const sT *tsrc2 = src + j*srcstep; + + for( k = 0; k <= size.width - 4; k += 4 ) + s += (double)tsrc1[k]*tsrc2[k] + (double)tsrc1[k+1]*tsrc2[k+1] + + (double)tsrc1[k+2]*tsrc2[k+2] + (double)tsrc1[k+3]*tsrc2[k+3]; + for( ; k < size.width; k++ ) + s += (double)tsrc1[k] * tsrc2[k]; + tdst[j] = (dT)(s*scale); + } + else + { + dT delta_buf[4]; + int delta_shift = delta_cols == size.width ? 4 : 0; + AutoBuffer buf(size.width*sizeof(dT)); + dT* row_buf = (dT*)(uchar*)buf; + + for( i = 0; i < size.height; i++, tdst += dststep ) + { + const sT *tsrc1 = src + i*srcstep; + const dT *tdelta1 = delta + i*deltastep; + + if( delta_cols < size.width ) + for( k = 0; k < size.width; k++ ) + row_buf[k] = tsrc1[k] - tdelta1[0]; + else + for( k = 0; k < size.width; k++ ) + row_buf[k] = tsrc1[k] - tdelta1[k]; + + for( j = i; j < size.height; j++ ) + { + double s = 0; + const sT *tsrc2 = src + j*srcstep; + const dT *tdelta2 = delta + j*deltastep; + if( delta_cols < size.width ) + { + delta_buf[0] = delta_buf[1] = + delta_buf[2] = delta_buf[3] = tdelta2[0]; + tdelta2 = delta_buf; + } + for( k = 0; k <= size.width-4; k += 4, tdelta2 += delta_shift ) + s += (double)row_buf[k]*(tsrc2[k] - tdelta2[0]) + + (double)row_buf[k+1]*(tsrc2[k+1] - tdelta2[1]) + + (double)row_buf[k+2]*(tsrc2[k+2] - tdelta2[2]) + + (double)row_buf[k+3]*(tsrc2[k+3] - tdelta2[3]); + for( ; k < size.width; k++, tdelta2++ ) + s += (double)row_buf[k]*(tsrc2[k] - tdelta2[0]); + tdst[j] = (dT)(s*scale); + } + } + } +} + +typedef void (*MulTransposedFunc)(const Mat& src, Mat& dst, const Mat& delta, double scale); + +} + +void cv::mulTransposed( InputArray _src, OutputArray _dst, bool ata, + InputArray _delta, double scale, int dtype ) +{ + Mat src = _src.getMat(), delta = _delta.getMat(); + const int gemm_level = 100; // boundary above which GEMM is faster. + int stype = src.type(); + dtype = std::max(std::max(CV_MAT_DEPTH(dtype >= 0 ? dtype : stype), delta.depth()), CV_32F); + CV_Assert( src.channels() == 1 ); + + if( delta.data ) + { + CV_Assert( delta.channels() == 1 && + (delta.rows == src.rows || delta.rows == 1) && + (delta.cols == src.cols || delta.cols == 1)); + if( delta.type() != dtype ) + delta.convertTo(delta, dtype); + } + + int dsize = ata ? src.cols : src.rows; + _dst.create( dsize, dsize, dtype ); + Mat dst = _dst.getMat(); + + if( src.data == dst.data || (stype == dtype && + (dst.cols >= gemm_level && dst.rows >= gemm_level && + src.cols >= gemm_level && src.rows >= gemm_level))) + { + Mat src2; + const Mat* tsrc = &src; + if( delta.data ) + { + if( delta.size() == src.size() ) + subtract( src, delta, src2 ); + else + { + repeat(delta, src.rows/delta.rows, src.cols/delta.cols, src2); + subtract( src, src2, src2 ); + } + tsrc = &src2; + } + gemm( *tsrc, *tsrc, scale, Mat(), 0, dst, ata ? GEMM_1_T : GEMM_2_T ); + } + else + { + MulTransposedFunc func = 0; + if(stype == CV_8U && dtype == CV_32F) + { + if(ata) + func = MulTransposedR; + else + func = MulTransposedL; + } + else if(stype == CV_8U && dtype == CV_64F) + { + if(ata) + func = MulTransposedR; + else + func = MulTransposedL; + } + else if(stype == CV_16U && dtype == CV_32F) + { + if(ata) + func = MulTransposedR; + else + func = MulTransposedL; + } + else if(stype == CV_16U && dtype == CV_64F) + { + if(ata) + func = MulTransposedR; + else + func = MulTransposedL; + } + else if(stype == CV_16S && dtype == CV_32F) + { + if(ata) + func = MulTransposedR; + else + func = MulTransposedL; + } + else if(stype == CV_16S && dtype == CV_64F) + { + if(ata) + func = MulTransposedR; + else + func = MulTransposedL; + } + else if(stype == CV_32F && dtype == CV_32F) + { + if(ata) + func = MulTransposedR; + else + func = MulTransposedL; + } + else if(stype == CV_32F && dtype == CV_64F) + { + if(ata) + func = MulTransposedR; + else + func = MulTransposedL; + } + else if(stype == CV_64F && dtype == CV_64F) + { + if(ata) + func = MulTransposedR; + else + func = MulTransposedL; + } + if( !func ) + CV_Error( CV_StsUnsupportedFormat, "" ); + + func( src, dst, delta, scale ); + completeSymm( dst, false ); + } +} + +/****************************************************************************************\ +* Dot Product * +\****************************************************************************************/ + +namespace cv +{ + +template double +dotProd_(const T* src1, const T* src2, int len) +{ + int i = 0; + double result = 0; + for( ; i <= len - 4; i += 4 ) + result += (double)src1[i]*src2[i] + (double)src1[i+1]*src2[i+1] + + (double)src1[i+2]*src2[i+2] + (double)src1[i+3]*src2[i+3]; + for( ; i < len; i++ ) + result += (double)src1[i]*src2[i]; + + return result; +} + + +static double dotProd_8u(const uchar* src1, const uchar* src2, int len) +{ + double r = 0; +#if ARITHM_USE_IPP + ippiDotProd_8u64f_C1R(src1, (int)(len*sizeof(src1[0])), + src2, (int)(len*sizeof(src2[0])), + ippiSize(len, 1), &r); + return r; +#else + int i = 0; + +#if CV_SSE2 + if( USE_SSE2 ) + { + int j, len0 = len & -4, blockSize0 = (1 << 15), blockSize; + __m128i z = _mm_setzero_si128(); + while( i < len0 ) + { + blockSize = std::min(len0 - i, blockSize0); + __m128i s = _mm_setzero_si128(); + for( j = 0; j <= blockSize - 16; j += 16 ) + { + __m128i b0 = _mm_loadu_si128((const __m128i*)(src1 + j)); + __m128i b1 = _mm_loadu_si128((const __m128i*)(src2 + j)); + __m128i s0, s1, s2, s3; + s0 = _mm_unpacklo_epi8(b0, z); + s2 = _mm_unpackhi_epi8(b0, z); + s1 = _mm_unpacklo_epi8(b1, z); + s3 = _mm_unpackhi_epi8(b1, z); + s0 = _mm_madd_epi16(s0, s1); + s1 = _mm_madd_epi16(s2, s3); + s = _mm_add_epi32(s, s0); + s = _mm_add_epi32(s, s2); + } + + for( ; j < blockSize; j += 4 ) + { + __m128i s0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(const int*)(src1 + j)), z); + __m128i s1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(const int*)(src2 + j)), z); + s0 = _mm_madd_epi16(s0, s1); + s = _mm_add_epi32(s, s0); + } + CV_DECL_ALIGNED(16) int buf[4]; + _mm_store_si128((__m128i*)buf, s); + r += buf[0] + buf[1] + buf[2] + buf[3]; + + src1 += blockSize; + src2 += blockSize; + i += blockSize; + } + } +#endif + return r + dotProd_(src1, src2, len - i); +#endif +} + + +static double dotProd_8s(const schar* src1, const schar* src2, int len) +{ + return dotProd_(src1, src2, len); +} + +static double dotProd_16u(const ushort* src1, const ushort* src2, int len) +{ + double r = 0; + IF_IPP(ippiDotProd_16u64f_C1R(src1, (int)(len*sizeof(src1[0])), + src2, (int)(len*sizeof(src2[0])), + ippiSize(len, 1), &r), + r = dotProd_(src1, src2, len)); + return r; +} + +static double dotProd_16s(const short* src1, const short* src2, int len) +{ + double r = 0; + IF_IPP(ippiDotProd_16s64f_C1R(src1, (int)(len*sizeof(src1[0])), + src2, (int)(len*sizeof(src2[0])), + ippiSize(len, 1), &r), + r = dotProd_(src1, src2, len)); + return r; +} + +static double dotProd_32s(const int* src1, const int* src2, int len) +{ + double r = 0; + IF_IPP(ippiDotProd_32s64f_C1R(src1, (int)(len*sizeof(src1[0])), + src2, (int)(len*sizeof(src2[0])), + ippiSize(len, 1), &r), + r = dotProd_(src1, src2, len)); + return r; +} + +static double dotProd_32f(const float* src1, const float* src2, int len) +{ + double r = 0; + IF_IPP(ippsDotProd_32f64f(src1, src2, len, &r), + r = dotProd_(src1, src2, len)); + return r; +} + +static double dotProd_64f(const double* src1, const double* src2, int len) +{ + double r = 0; + IF_IPP(ippsDotProd_64f(src1, src2, len, &r), + r = dotProd_(src1, src2, len)); + return r; +} + + +typedef double (*DotProdFunc)(const uchar* src1, const uchar* src2, int len); + +static DotProdFunc dotProdTab[] = +{ + (DotProdFunc)dotProd_8u, (DotProdFunc)dotProd_8s, (DotProdFunc)dotProd_16u, + (DotProdFunc)dotProd_16s, (DotProdFunc)dotProd_32s, (DotProdFunc)dotProd_32f, + (DotProdFunc)dotProd_64f, 0 +}; + +double Mat::dot(InputArray _mat) const +{ + Mat mat = _mat.getMat(); + int cn = channels(); + DotProdFunc func = dotProdTab[depth()]; + CV_Assert( mat.type() == type() && mat.size == size && func != 0 ); + + if( isContinuous() && mat.isContinuous() ) + { + size_t len = total()*cn; + if( len == (size_t)(int)len ) + return func(data, mat.data, (int)len); + } + + const Mat* arrays[] = {this, &mat, 0}; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs); + int len = (int)(it.size*cn); + double r = 0; + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + r += func( ptrs[0], ptrs[1], len ); + + return r; +} + +/****************************************************************************************\ +* PCA * +\****************************************************************************************/ + +PCA::PCA() {} + +PCA::PCA(InputArray data, InputArray mean, int flags, int maxComponents) +{ + operator()(data, mean, flags, maxComponents); +} + +PCA& PCA::operator()(InputArray _data, InputArray __mean, int flags, int maxComponents) +{ + Mat data = _data.getMat(), _mean = __mean.getMat(); + int covar_flags = CV_COVAR_SCALE; + int i, len, in_count; + Size mean_sz; + + CV_Assert( data.channels() == 1 ); + if( flags & CV_PCA_DATA_AS_COL ) + { + len = data.rows; + in_count = data.cols; + covar_flags |= CV_COVAR_COLS; + mean_sz = Size(1, len); + } + else + { + len = data.cols; + in_count = data.rows; + covar_flags |= CV_COVAR_ROWS; + mean_sz = Size(len, 1); + } + + int count = std::min(len, in_count), out_count = count; + if( maxComponents > 0 ) + out_count = std::min(count, maxComponents); + + // "scrambled" way to compute PCA (when cols(A)>rows(A)): + // B = A'A; B*x=b*x; C = AA'; C*y=c*y -> AA'*y=c*y -> A'A*(A'*y)=c*(A'*y) -> c = b, x=A'*y + if( len <= in_count ) + covar_flags |= CV_COVAR_NORMAL; + + int ctype = std::max(CV_32F, data.depth()); + mean.create( mean_sz, ctype ); + + Mat covar( count, count, ctype ); + + if( _mean.data ) + { + CV_Assert( _mean.size() == mean_sz ); + _mean.convertTo(mean, ctype); + } + + calcCovarMatrix( data, covar, mean, covar_flags, ctype ); + eigen( covar, eigenvalues, eigenvectors ); + + if( !(covar_flags & CV_COVAR_NORMAL) ) + { + // CV_PCA_DATA_AS_ROW: cols(A)>rows(A). x=A'*y -> x'=y'*A + // CV_PCA_DATA_AS_COL: rows(A)>cols(A). x=A''*y -> x'=y'*A' + Mat tmp_data, tmp_mean = repeat(mean, data.rows/mean.rows, data.cols/mean.cols); + if( data.type() != ctype || tmp_mean.data == mean.data ) + { + data.convertTo( tmp_data, ctype ); + subtract( tmp_data, tmp_mean, tmp_data ); + } + else + { + subtract( data, tmp_mean, tmp_mean ); + tmp_data = tmp_mean; + } + + Mat evects1(count, len, ctype); + gemm( eigenvectors, tmp_data, 1, Mat(), 0, evects1, + (flags & CV_PCA_DATA_AS_COL) ? CV_GEMM_B_T : 0); + eigenvectors = evects1; + + // normalize eigenvectors + for( i = 0; i < out_count; i++ ) + { + Mat vec = eigenvectors.row(i); + normalize(vec, vec); + } + } + + if( count > out_count ) + { + // use clone() to physically copy the data and thus deallocate the original matrices + eigenvalues = eigenvalues.rowRange(0,out_count).clone(); + eigenvectors = eigenvectors.rowRange(0,out_count).clone(); + } + return *this; +} + + +void PCA::project(InputArray _data, OutputArray result) const +{ + Mat data = _data.getMat(); + CV_Assert( mean.data && eigenvectors.data && + ((mean.rows == 1 && mean.cols == data.cols) || (mean.cols == 1 && mean.rows == data.rows))); + Mat tmp_data, tmp_mean = repeat(mean, data.rows/mean.rows, data.cols/mean.cols); + int ctype = mean.type(); + if( data.type() != ctype || tmp_mean.data == mean.data ) + { + data.convertTo( tmp_data, ctype ); + subtract( tmp_data, tmp_mean, tmp_data ); + } + else + { + subtract( data, tmp_mean, tmp_mean ); + tmp_data = tmp_mean; + } + if( mean.rows == 1 ) + gemm( tmp_data, eigenvectors, 1, Mat(), 0, result, GEMM_2_T ); + else + gemm( eigenvectors, tmp_data, 1, Mat(), 0, result, 0 ); +} + +Mat PCA::project(InputArray data) const +{ + Mat result; + project(data, result); + return result; +} + +void PCA::backProject(InputArray _data, OutputArray result) const +{ + Mat data = _data.getMat(); + CV_Assert( mean.data && eigenvectors.data && + ((mean.rows == 1 && eigenvectors.rows == data.cols) || + (mean.cols == 1 && eigenvectors.rows == data.rows))); + + Mat tmp_data, tmp_mean; + data.convertTo(tmp_data, mean.type()); + if( mean.rows == 1 ) + { + tmp_mean = repeat(mean, data.rows, 1); + gemm( tmp_data, eigenvectors, 1, tmp_mean, 1, result, 0 ); + } + else + { + tmp_mean = repeat(mean, 1, data.cols); + gemm( eigenvectors, tmp_data, 1, tmp_mean, 1, result, GEMM_1_T ); + } +} + +Mat PCA::backProject(InputArray data) const +{ + Mat result; + backProject(data, result); + return result; +} + +} + +void cv::PCACompute(InputArray data, InputOutputArray mean, + OutputArray eigenvectors, int maxComponents) +{ + PCA pca; + pca(data, mean, 0, maxComponents); + pca.mean.copyTo(mean); + pca.eigenvectors.copyTo(eigenvectors); +} + +void cv::PCAProject(InputArray data, InputArray mean, + InputArray eigenvectors, OutputArray result) +{ + PCA pca; + pca.mean = mean.getMat(); + pca.eigenvectors = eigenvectors.getMat(); + pca.project(data, result); +} + +void cv::PCABackProject(InputArray data, InputArray mean, + InputArray eigenvectors, OutputArray result) +{ + PCA pca; + pca.mean = mean.getMat(); + pca.eigenvectors = eigenvectors.getMat(); + pca.backProject(data, result); +} + + +/****************************************************************************************\ +* Earlier API * +\****************************************************************************************/ + +CV_IMPL void cvGEMM( const CvArr* Aarr, const CvArr* Barr, double alpha, + const CvArr* Carr, double beta, CvArr* Darr, int flags ) +{ + cv::Mat A = cv::cvarrToMat(Aarr), B = cv::cvarrToMat(Barr); + cv::Mat C, D = cv::cvarrToMat(Darr); + + if( Carr ) + C = cv::cvarrToMat(Carr); + + CV_Assert( (D.rows == ((flags & CV_GEMM_A_T) == 0 ? A.rows : A.cols)) && + (D.cols == ((flags & CV_GEMM_B_T) == 0 ? B.cols : B.rows)) && + D.type() == A.type() ); + + gemm( A, B, alpha, C, beta, D, flags ); +} + + +CV_IMPL void +cvTransform( const CvArr* srcarr, CvArr* dstarr, + const CvMat* transmat, const CvMat* shiftvec ) +{ + cv::Mat m = cv::cvarrToMat(transmat), src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + + if( shiftvec ) + { + cv::Mat v = cv::cvarrToMat(shiftvec).reshape(1,m.rows), + _m(m.rows, m.cols + 1, m.type()), m1 = _m.colRange(0,m.cols), v1 = _m.col(m.cols); + m.convertTo(m1, m1.type()); + v.convertTo(v1, v1.type()); + m = _m; + } + + CV_Assert( dst.depth() == src.depth() && dst.channels() == m.rows ); + cv::transform( src, dst, m ); +} + + +CV_IMPL void +cvPerspectiveTransform( const CvArr* srcarr, CvArr* dstarr, const CvMat* mat ) +{ + cv::Mat m = cv::cvarrToMat(mat), src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + + CV_Assert( dst.type() == src.type() && dst.channels() == m.rows-1 ); + cv::perspectiveTransform( src, dst, m ); +} + + +CV_IMPL void cvScaleAdd( const CvArr* srcarr1, CvScalar scale, + const CvArr* srcarr2, CvArr* dstarr ) +{ + cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr); + + CV_Assert( src1.size == dst.size && src1.type() == dst.type() ); + cv::scaleAdd( src1, scale.val[0], cv::cvarrToMat(srcarr2), dst ); +} + + +CV_IMPL void +cvCalcCovarMatrix( const CvArr** vecarr, int count, + CvArr* covarr, CvArr* avgarr, int flags ) +{ + cv::Mat cov0 = cv::cvarrToMat(covarr), cov = cov0, mean0, mean; + CV_Assert( vecarr != 0 && count >= 1 ); + + if( avgarr ) + mean = mean0 = cv::cvarrToMat(avgarr); + + if( (flags & CV_COVAR_COLS) != 0 || (flags & CV_COVAR_ROWS) != 0 ) + { + + cv::Mat data = cv::cvarrToMat(vecarr[0]); + cv::calcCovarMatrix( data, cov, mean, flags, cov.type() ); + } + else + { + std::vector data(count); + for( int i = 0; i < count; i++ ) + data[i] = cv::cvarrToMat(vecarr[i]); + cv::calcCovarMatrix( &data[0], count, cov, mean, flags, cov.type() ); + } + + if( mean.data != mean0.data && mean0.data ) + mean.convertTo(mean0, mean0.type()); + + if( cov.data != cov0.data ) + cov.convertTo(cov0, cov0.type()); +} + + +CV_IMPL double +cvMahalanobis( const CvArr* srcAarr, const CvArr* srcBarr, const CvArr* matarr ) +{ + return cv::Mahalanobis(cv::cvarrToMat(srcAarr), + cv::cvarrToMat(srcBarr), cv::cvarrToMat(matarr)); +} + +CV_IMPL void +cvMulTransposed( const CvArr* srcarr, CvArr* dstarr, + int order, const CvArr* deltaarr, double scale ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst0 = cv::cvarrToMat(dstarr), dst = dst0, delta; + if( deltaarr ) + delta = cv::cvarrToMat(deltaarr); + cv::mulTransposed( src, dst, order != 0, delta, scale, dst.type()); + if( dst.data != dst0.data ) + dst.convertTo(dst0, dst0.type()); +} + +CV_IMPL double cvDotProduct( const CvArr* srcAarr, const CvArr* srcBarr ) +{ + return cv::cvarrToMat(srcAarr).dot(cv::cvarrToMat(srcBarr)); +} + + +CV_IMPL void +cvCalcPCA( const CvArr* data_arr, CvArr* avg_arr, CvArr* eigenvals, CvArr* eigenvects, int flags ) +{ + cv::Mat data = cv::cvarrToMat(data_arr), mean0 = cv::cvarrToMat(avg_arr); + cv::Mat evals0 = cv::cvarrToMat(eigenvals), evects0 = cv::cvarrToMat(eigenvects); + cv::Mat mean = mean0, evals = evals0, evects = evects0; + + cv::PCA pca; + pca.mean = mean; + pca.eigenvalues = evals; + pca.eigenvectors = evects; + + pca(data, (flags & CV_PCA_USE_AVG) ? mean : cv::Mat(), + flags, evals.data ? evals.rows + evals.cols - 1 : 0); + + if( pca.mean.size() == mean.size() ) + pca.mean.convertTo( mean, mean.type() ); + else + { + cv::Mat temp; pca.mean.convertTo( temp, mean.type() ); + transpose( temp, mean ); + } + + evals = pca.eigenvalues; + evects = pca.eigenvectors; + int ecount0 = evals0.cols + evals0.rows - 1; + int ecount = evals.cols + evals.rows - 1; + + CV_Assert( (evals0.cols == 1 || evals0.rows == 1) && + ecount0 <= ecount && + evects0.cols == evects.cols && + evects0.rows == ecount0 ); + + cv::Mat temp = evals0; + if( evals.rows == 1 ) + evals.colRange(0, ecount0).convertTo(temp, evals0.type()); + else + evals.rowRange(0, ecount0).convertTo(temp, evals0.type()); + if( temp.data != evals0.data ) + transpose(temp, evals0); + evects.rowRange(0, ecount0).convertTo( evects0, evects0.type() ); + + // otherwise some datatype's or size's were incorrect, so the output arrays have been reallocated + CV_Assert( mean0.data == mean.data ); +} + + +CV_IMPL void +cvProjectPCA( const CvArr* data_arr, const CvArr* avg_arr, + const CvArr* eigenvects, CvArr* result_arr ) +{ + cv::Mat data = cv::cvarrToMat(data_arr), mean = cv::cvarrToMat(avg_arr); + cv::Mat evects = cv::cvarrToMat(eigenvects), dst0 = cv::cvarrToMat(result_arr), dst = dst0; + + cv::PCA pca; + pca.mean = mean; + int n; + if( mean.rows == 1 ) + { + CV_Assert(dst.cols <= evects.rows && dst.rows == data.rows); + n = dst.cols; + } + else + { + CV_Assert(dst.rows <= evects.rows && dst.cols == data.cols); + n = dst.rows; + } + pca.eigenvectors = evects.rowRange(0, n); + + cv::Mat result = pca.project(data); + if( result.cols != dst.cols ) + result = result.reshape(1, 1); + result.convertTo(dst, dst.type()); + + CV_Assert(dst0.data == dst.data); +} + + +CV_IMPL void +cvBackProjectPCA( const CvArr* proj_arr, const CvArr* avg_arr, + const CvArr* eigenvects, CvArr* result_arr ) +{ + cv::Mat data = cv::cvarrToMat(proj_arr), mean = cv::cvarrToMat(avg_arr); + cv::Mat evects = cv::cvarrToMat(eigenvects), dst0 = cv::cvarrToMat(result_arr), dst = dst0; + + cv::PCA pca; + pca.mean = mean; + int n; + if( mean.rows == 1 ) + { + CV_Assert(data.cols <= evects.rows && dst.rows == data.rows); + n = data.cols; + } + else + { + CV_Assert(data.rows <= evects.rows && dst.cols == data.cols); + n = data.rows; + } + pca.eigenvectors = evects.rowRange(0, n); + + cv::Mat result = pca.backProject(data); + result.convertTo(dst, dst.type()); + + CV_Assert(dst0.data == dst.data); +} + +/* End of file. */ diff --git a/opencv/core/matop.cpp b/opencv/core/matop.cpp new file mode 100644 index 0000000..e4ee189 --- /dev/null +++ b/opencv/core/matop.cpp @@ -0,0 +1,1649 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2010, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* //////////////////////////////////////////////////////////////////// +// +// Mat basic operations: Copy, Set +// +// */ + +#include "precomp.hpp" + +namespace cv +{ + +class MatOp_Identity : public MatOp +{ +public: + MatOp_Identity() {} + virtual ~MatOp_Identity() {} + + bool elementWise(const MatExpr& /*expr*/) const { return true; } + void assign(const MatExpr& expr, Mat& m, int type=-1) const; + + static void makeExpr(MatExpr& res, const Mat& m); +}; + +static MatOp_Identity g_MatOp_Identity; + +class MatOp_AddEx : public MatOp +{ +public: + MatOp_AddEx() {} + virtual ~MatOp_AddEx() {} + + bool elementWise(const MatExpr& /*expr*/) const { return true; } + void assign(const MatExpr& expr, Mat& m, int type=-1) const; + + void add(const MatExpr& e1, const Scalar& s, MatExpr& res) const; + void subtract(const Scalar& s, const MatExpr& expr, MatExpr& res) const; + void multiply(const MatExpr& e1, double s, MatExpr& res) const; + void divide(double s, const MatExpr& e, MatExpr& res) const; + + void transpose(const MatExpr& e1, MatExpr& res) const; + void abs(const MatExpr& expr, MatExpr& res) const; + + static void makeExpr(MatExpr& res, const Mat& a, const Mat& b, double alpha, double beta, const Scalar& s=Scalar()); +}; + +static MatOp_AddEx g_MatOp_AddEx; + +class MatOp_Bin : public MatOp +{ +public: + MatOp_Bin() {} + virtual ~MatOp_Bin() {} + + bool elementWise(const MatExpr& /*expr*/) const { return true; } + void assign(const MatExpr& expr, Mat& m, int type=-1) const; + + void multiply(const MatExpr& e1, double s, MatExpr& res) const; + void divide(double s, const MatExpr& e, MatExpr& res) const; + + static void makeExpr(MatExpr& res, char op, const Mat& a, const Mat& b, double scale=1); + static void makeExpr(MatExpr& res, char op, const Mat& a, const Scalar& s); +}; + +static MatOp_Bin g_MatOp_Bin; + +class MatOp_Cmp : public MatOp +{ +public: + MatOp_Cmp() {} + virtual ~MatOp_Cmp() {} + + bool elementWise(const MatExpr& /*expr*/) const { return true; } + void assign(const MatExpr& expr, Mat& m, int type=-1) const; + + static void makeExpr(MatExpr& res, int cmpop, const Mat& a, const Mat& b); + static void makeExpr(MatExpr& res, int cmpop, const Mat& a, double alpha); +}; + +static MatOp_Cmp g_MatOp_Cmp; + +class MatOp_GEMM : public MatOp +{ +public: + MatOp_GEMM() {} + virtual ~MatOp_GEMM() {} + + bool elementWise(const MatExpr& /*expr*/) const { return false; } + void assign(const MatExpr& expr, Mat& m, int type=-1) const; + + void add(const MatExpr& e1, const MatExpr& e2, MatExpr& res) const; + void subtract(const MatExpr& e1, const MatExpr& e2, MatExpr& res) const; + void multiply(const MatExpr& e, double s, MatExpr& res) const; + + void transpose(const MatExpr& expr, MatExpr& res) const; + + static void makeExpr(MatExpr& res, int flags, const Mat& a, const Mat& b, + double alpha=1, const Mat& c=Mat(), double beta=1); +}; + +static MatOp_GEMM g_MatOp_GEMM; + +class MatOp_Invert : public MatOp +{ +public: + MatOp_Invert() {} + virtual ~MatOp_Invert() {} + + bool elementWise(const MatExpr& /*expr*/) const { return false; } + void assign(const MatExpr& expr, Mat& m, int type=-1) const; + + void matmul(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + + static void makeExpr(MatExpr& res, int method, const Mat& m); +}; + +static MatOp_Invert g_MatOp_Invert; + +class MatOp_T : public MatOp +{ +public: + MatOp_T() {} + virtual ~MatOp_T() {} + + bool elementWise(const MatExpr& /*expr*/) const { return false; } + void assign(const MatExpr& expr, Mat& m, int type=-1) const; + + void multiply(const MatExpr& e1, double s, MatExpr& res) const; + void transpose(const MatExpr& expr, MatExpr& res) const; + + static void makeExpr(MatExpr& res, const Mat& a, double alpha=1); +}; + +static MatOp_T g_MatOp_T; + +class MatOp_Solve : public MatOp +{ +public: + MatOp_Solve() {} + virtual ~MatOp_Solve() {} + + bool elementWise(const MatExpr& /*expr*/) const { return false; } + void assign(const MatExpr& expr, Mat& m, int type=-1) const; + + static void makeExpr(MatExpr& res, int method, const Mat& a, const Mat& b); +}; + +static MatOp_Solve g_MatOp_Solve; + +class MatOp_Initializer : public MatOp +{ +public: + MatOp_Initializer() {} + virtual ~MatOp_Initializer() {} + + bool elementWise(const MatExpr& /*expr*/) const { return false; } + void assign(const MatExpr& expr, Mat& m, int type=-1) const; + + void multiply(const MatExpr& e, double s, MatExpr& res) const; + + static void makeExpr(MatExpr& res, int method, Size sz, int type, double alpha=1); +}; + +static MatOp_Initializer g_MatOp_Initializer; + +static inline bool isIdentity(const MatExpr& e) { return e.op == &g_MatOp_Identity; } +static inline bool isAddEx(const MatExpr& e) { return e.op == &g_MatOp_AddEx; } +static inline bool isScaled(const MatExpr& e) { return isAddEx(e) && (!e.b.data || e.beta == 0) && e.s == Scalar(); } +static inline bool isBin(const MatExpr& e, char c) { return e.op == &g_MatOp_Bin && e.flags == c; } +static inline bool isCmp(const MatExpr& e) { return e.op == &g_MatOp_Cmp; } +static inline bool isReciprocal(const MatExpr& e) { return isBin(e,'/') && (!e.b.data || e.beta == 0); } +static inline bool isT(const MatExpr& e) { return e.op == &g_MatOp_T; } +static inline bool isInv(const MatExpr& e) { return e.op == &g_MatOp_Invert; } +static inline bool isSolve(const MatExpr& e) { return e.op == &g_MatOp_Solve; } +static inline bool isGEMM(const MatExpr& e) { return e.op == &g_MatOp_GEMM; } +static inline bool isMatProd(const MatExpr& e) { return e.op == &g_MatOp_GEMM && (!e.c.data || e.beta == 0); } +static inline bool isInitializer(const MatExpr& e) { return e.op == &g_MatOp_Initializer; } + +///////////////////////////////////////////////////////////////////////////////////////////////////// + +bool MatOp::elementWise(const MatExpr& /*expr*/) const +{ + return false; +} + +void MatOp::roi(const MatExpr& expr, const Range& rowRange, const Range& colRange, MatExpr& e) const +{ + if( elementWise(expr) ) + { + e = MatExpr(expr.op, expr.flags, Mat(), Mat(), Mat(), + expr.alpha, expr.beta, expr.s); + if(expr.a.data) + e.a = expr.a(rowRange, colRange); + if(expr.b.data) + e.b = expr.b(rowRange, colRange); + if(expr.c.data) + e.c = expr.c(rowRange, colRange); + } + else + { + Mat m; + expr.op->assign(expr, m); + e = MatExpr(&g_MatOp_Identity, 0, m(rowRange, colRange), Mat(), Mat()); + } +} + +void MatOp::diag(const MatExpr& expr, int d, MatExpr& e) const +{ + if( elementWise(expr) ) + { + e = MatExpr(expr.op, expr.flags, Mat(), Mat(), Mat(), + expr.alpha, expr.beta, expr.s); + if(expr.a.data) + e.a = expr.a.diag(d); + if(expr.b.data) + e.b = expr.b.diag(d); + if(expr.c.data) + e.c = expr.c.diag(d); + } + else + { + Mat m; + expr.op->assign(expr, m); + e = MatExpr(&g_MatOp_Identity, 0, m.diag(d), Mat(), Mat()); + } +} + + +void MatOp::augAssignAdd(const MatExpr& expr, Mat& m) const +{ + Mat temp; + expr.op->assign(expr, temp); + m += temp; +} + + +void MatOp::augAssignSubtract(const MatExpr& expr, Mat& m) const +{ + Mat temp; + expr.op->assign(expr, temp); + m -= temp; +} + + +void MatOp::augAssignMultiply(const MatExpr& expr, Mat& m) const +{ + Mat temp; + expr.op->assign(expr, temp); + m *= temp; +} + + +void MatOp::augAssignDivide(const MatExpr& expr, Mat& m) const +{ + Mat temp; + expr.op->assign(expr, temp); + m /= temp; +} + + +void MatOp::augAssignAnd(const MatExpr& expr, Mat& m) const +{ + Mat temp; + expr.op->assign(expr, temp); + m &= temp; +} + + +void MatOp::augAssignOr(const MatExpr& expr, Mat& m) const +{ + Mat temp; + expr.op->assign(expr, temp); + m |= temp; +} + + +void MatOp::augAssignXor(const MatExpr& expr, Mat& m) const +{ + Mat temp; + expr.op->assign(expr, temp); + m /= temp; +} + + +void MatOp::add(const MatExpr& e1, const MatExpr& e2, MatExpr& res) const +{ + if( this == e2.op ) + { + double alpha = 1, beta = 1; + Scalar s; + Mat m1, m2; + if( isAddEx(e1) && (!e1.b.data || e1.beta == 0) ) + { + m1 = e1.a; + alpha = e1.alpha; + s = e1.s; + } + else + e1.op->assign(e1, m1); + + if( isAddEx(e2) && (!e2.b.data || e2.beta == 0) ) + { + m2 = e2.a; + beta = e2.alpha; + s += e2.s; + } + else + e2.op->assign(e2, m2); + MatOp_AddEx::makeExpr(res, m1, m2, alpha, beta, s); + } + else + e2.op->add(e1, e2, res); +} + + +void MatOp::add(const MatExpr& expr1, const Scalar& s, MatExpr& res) const +{ + Mat m1; + expr1.op->assign(expr1, m1); + MatOp_AddEx::makeExpr(res, m1, Mat(), 1, 0, s); +} + + +void MatOp::subtract(const MatExpr& e1, const MatExpr& e2, MatExpr& res) const +{ + if( this == e2.op ) + { + double alpha = 1, beta = -1; + Scalar s; + Mat m1, m2; + if( isAddEx(e1) && (!e1.b.data || e1.beta == 0) ) + { + m1 = e1.a; + alpha = e1.alpha; + s = e1.s; + } + else + e1.op->assign(e1, m1); + + if( isAddEx(e2) && (!e2.b.data || e2.beta == 0) ) + { + m2 = e2.a; + beta = -e2.alpha; + s -= e2.s; + } + else + e2.op->assign(e2, m2); + MatOp_AddEx::makeExpr(res, m1, m2, alpha, beta, s); + } + else + e2.op->subtract(e1, e2, res); +} + + +void MatOp::subtract(const Scalar& s, const MatExpr& expr, MatExpr& res) const +{ + Mat m; + expr.op->assign(expr, m); + MatOp_AddEx::makeExpr(res, m, Mat(), -1, 0, s); +} + + +void MatOp::multiply(const MatExpr& e1, const MatExpr& e2, MatExpr& res, double scale) const +{ + if( this == e2.op ) + { + Mat m1, m2; + + if( isReciprocal(e1) ) + { + if( isScaled(e2) ) + { + scale *= e2.alpha; + m2 = e2.a; + } + else + e2.op->assign(e2, m2); + + MatOp_Bin::makeExpr(res, '/', m2, e1.a, scale/e1.alpha); + } + else + { + char op = '*'; + if( isScaled(e1) ) + { + m1 = e1.a; + scale *= e1.alpha; + } + else + e1.op->assign(e1, m1); + + if( isScaled(e2) ) + { + m2 = e2.a; + scale *= e2.alpha; + } + else if( isReciprocal(e2) ) + { + op = '/'; + m2 = e2.a; + scale /= e2.alpha; + } + else + e2.op->assign(e2, m2); + + MatOp_Bin::makeExpr(res, op, m1, m2, scale); + } + } + else + e2.op->multiply(e1, e2, res, scale); +} + + +void MatOp::multiply(const MatExpr& expr, double s, MatExpr& res) const +{ + Mat m; + expr.op->assign(expr, m); + MatOp_AddEx::makeExpr(res, m, Mat(), s, 0); +} + + +void MatOp::divide(const MatExpr& e1, const MatExpr& e2, MatExpr& res, double scale) const +{ + if( this == e2.op ) + { + if( isReciprocal(e1) && isReciprocal(e2) ) + MatOp_Bin::makeExpr(res, '/', e2.a, e1.a, e1.alpha/e2.alpha); + else + { + Mat m1, m2; + char op = '/'; + + if( isScaled(e1) ) + { + m1 = e1.a; + scale *= e1.alpha; + } + else + e1.op->assign(e1, m1); + + if( isScaled(e2) ) + { + m2 = e2.a; + scale /= e2.alpha; + } + else if( isReciprocal(e2) ) + { + m2 = e2.a; + scale /= e2.alpha; + op = '*'; + } + else + e2.op->assign(e2, m2); + MatOp_Bin::makeExpr(res, op, m1, m2, scale); + } + } + else + e2.op->divide(e1, e2, res, scale); +} + + +void MatOp::divide(double s, const MatExpr& expr, MatExpr& res) const +{ + Mat m; + expr.op->assign(expr, m); + MatOp_Bin::makeExpr(res, '/', m, Mat(), s); +} + + +void MatOp::abs(const MatExpr& expr, MatExpr& res) const +{ + Mat m; + expr.op->assign(expr, m); + MatOp_Bin::makeExpr(res, 'a', m, Mat()); +} + + +void MatOp::transpose(const MatExpr& expr, MatExpr& res) const +{ + Mat m; + expr.op->assign(expr, m); + MatOp_T::makeExpr(res, m, 1); +} + + +void MatOp::matmul(const MatExpr& e1, const MatExpr& e2, MatExpr& res) const +{ + if( this == e2.op ) + { + double scale = 1; + int flags = 0; + Mat m1, m2; + + if( isT(e1) ) + { + flags = CV_GEMM_A_T; + scale = e1.alpha; + m1 = e1.a; + } + else if( isScaled(e1) ) + { + scale = e1.alpha; + m1 = e1.a; + } + else + e1.op->assign(e1, m1); + + if( isT(e2) ) + { + flags |= CV_GEMM_B_T; + scale *= e2.alpha; + m2 = e2.a; + } + else if( isScaled(e2) ) + { + scale *= e2.alpha; + m2 = e2.a; + } + else + e2.op->assign(e2, m2); + + MatOp_GEMM::makeExpr(res, flags, m1, m2, scale); + } + else + e2.op->matmul(e1, e2, res); +} + + +void MatOp::invert(const MatExpr& expr, int method, MatExpr& res) const +{ + Mat m; + expr.op->assign(expr, m); + MatOp_Invert::makeExpr(res, method, m); +} + + +Size MatOp::size(const MatExpr& expr) const +{ + return !expr.a.empty() ? expr.a.size() : expr.b.empty() ? expr.b.size() : expr.c.size(); +} + +int MatOp::type(const MatExpr& expr) const +{ + return !expr.a.empty() ? expr.a.type() : expr.b.empty() ? expr.b.type() : expr.c.type(); +} + +////////////////////////////////////////////////////////////////////////////////////////////////// + +MatExpr::MatExpr(const Mat& m) : op(&g_MatOp_Identity), flags(0), a(m), b(Mat()), c(Mat()), alpha(1), beta(0), s(Scalar()) +{ +} + +MatExpr MatExpr::row(int y) const +{ + MatExpr e; + op->roi(*this, Range(y, y+1), Range::all(), e); + return e; +} + +MatExpr MatExpr::col(int x) const +{ + MatExpr e; + op->roi(*this, Range::all(), Range(x, x+1), e); + return e; +} + +MatExpr MatExpr::diag(int d) const +{ + MatExpr e; + op->diag(*this, d, e); + return e; +} + +MatExpr MatExpr::operator()( const Range& rowRange, const Range& colRange ) const +{ + MatExpr e; + op->roi(*this, rowRange, colRange, e); + return e; +} + +MatExpr MatExpr::operator()( const Rect& roi ) const +{ + MatExpr e; + op->roi(*this, Range(roi.y, roi.y + roi.height), Range(roi.x, roi.x + roi.width), e); + return e; +} + +Mat MatExpr::cross(const Mat& m) const +{ + return ((Mat)*this).cross(m); +} + +double MatExpr::dot(const Mat& m) const +{ + return ((Mat)*this).dot(m); +} + +MatExpr MatExpr::t() const +{ + MatExpr e; + op->transpose(*this, e); + return e; +} + +MatExpr MatExpr::inv(int method) const +{ + MatExpr e; + op->invert(*this, method, e); + return e; +} + +MatExpr MatExpr::mul(const MatExpr& e, double scale) const +{ + MatExpr en; + op->multiply(*this, e, en, scale); + return en; +} + +MatExpr MatExpr::mul(const Mat& m, double scale) const +{ + MatExpr e; + op->multiply(*this, MatExpr(m), e, scale); + return e; +} + +MatExpr operator + (const Mat& a, const Mat& b) +{ + MatExpr e; + MatOp_AddEx::makeExpr(e, a, b, 1, 1); + return e; +} + +MatExpr operator + (const Mat& a, const Scalar& s) +{ + MatExpr e; + MatOp_AddEx::makeExpr(e, a, Mat(), 1, 0, s); + return e; +} + +MatExpr operator + (const Scalar& s, const Mat& a) +{ + MatExpr e; + MatOp_AddEx::makeExpr(e, a, Mat(), 1, 0, s); + return e; +} + +MatExpr operator + (const MatExpr& e, const Mat& m) +{ + MatExpr en; + e.op->add(e, MatExpr(m), en); + return en; +} + +MatExpr operator + (const Mat& m, const MatExpr& e) +{ + MatExpr en; + e.op->add(e, MatExpr(m), en); + return en; +} + +MatExpr operator + (const MatExpr& e, const Scalar& s) +{ + MatExpr en; + e.op->add(e, s, en); + return en; +} + +MatExpr operator + (const Scalar& s, const MatExpr& e) +{ + MatExpr en; + e.op->add(e, s, en); + return en; +} + +MatExpr operator + (const MatExpr& e1, const MatExpr& e2) +{ + MatExpr en; + e1.op->add(e1, e2, en); + return en; +} + +MatExpr operator - (const Mat& a, const Mat& b) +{ + MatExpr e; + MatOp_AddEx::makeExpr(e, a, b, 1, -1); + return e; +} + +MatExpr operator - (const Mat& a, const Scalar& s) +{ + MatExpr e; + MatOp_AddEx::makeExpr(e, a, Mat(), 1, 0, -s); + return e; +} + +MatExpr operator - (const Scalar& s, const Mat& a) +{ + MatExpr e; + MatOp_AddEx::makeExpr(e, a, Mat(), -1, 0, s); + return e; +} + +MatExpr operator - (const MatExpr& e, const Mat& m) +{ + MatExpr en; + e.op->subtract(e, MatExpr(m), en); + return en; +} + +MatExpr operator - (const Mat& m, const MatExpr& e) +{ + MatExpr en; + e.op->subtract(MatExpr(m), e, en); + return en; +} + +MatExpr operator - (const MatExpr& e, const Scalar& s) +{ + MatExpr en; + e.op->add(e, -s, en); + return en; +} + +MatExpr operator - (const Scalar& s, const MatExpr& e) +{ + MatExpr en; + e.op->subtract(s, e, en); + return en; +} + +MatExpr operator - (const MatExpr& e1, const MatExpr& e2) +{ + MatExpr en; + e1.op->subtract(e1, e2, en); + return en; +} + +MatExpr operator - (const Mat& m) +{ + MatExpr e; + MatOp_AddEx::makeExpr(e, m, Mat(), -1, 0); + return e; +} + +MatExpr operator - (const MatExpr& e) +{ + MatExpr en; + e.op->subtract(Scalar(0), e, en); + return en; +} + +MatExpr operator * (const Mat& a, const Mat& b) +{ + MatExpr e; + MatOp_GEMM::makeExpr(e, 0, a, b); + return e; +} + +MatExpr operator * (const Mat& a, double s) +{ + MatExpr e; + MatOp_AddEx::makeExpr(e, a, Mat(), s, 0); + return e; +} + +MatExpr operator * (double s, const Mat& a) +{ + MatExpr e; + MatOp_AddEx::makeExpr(e, a, Mat(), s, 0); + return e; +} + +MatExpr operator * (const MatExpr& e, const Mat& m) +{ + MatExpr en; + e.op->matmul(e, MatExpr(m), en); + return en; +} + +MatExpr operator * (const Mat& m, const MatExpr& e) +{ + MatExpr en; + e.op->matmul(MatExpr(m), e, en); + return en; +} + +MatExpr operator * (const MatExpr& e, double s) +{ + MatExpr en; + e.op->multiply(e, s, en); + return en; +} + +MatExpr operator * (double s, const MatExpr& e) +{ + MatExpr en; + e.op->multiply(e, s, en); + return en; +} + +MatExpr operator * (const MatExpr& e1, const MatExpr& e2) +{ + MatExpr en; + e1.op->matmul(e1, e2, en); + return en; +} + +MatExpr operator / (const Mat& a, const Mat& b) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, '/', a, b); + return e; +} + +MatExpr operator / (const Mat& a, double s) +{ + MatExpr e; + MatOp_AddEx::makeExpr(e, a, Mat(), 1./s, 0); + return e; +} + +MatExpr operator / (double s, const Mat& a) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, '/', a, Mat(), s); + return e; +} + +MatExpr operator / (const MatExpr& e, const Mat& m) +{ + MatExpr en; + e.op->divide(e, MatExpr(m), en); + return en; +} + +MatExpr operator / (const Mat& m, const MatExpr& e) +{ + MatExpr en; + e.op->divide(MatExpr(m), e, en); + return en; +} + +MatExpr operator / (const MatExpr& e, double s) +{ + MatExpr en; + e.op->multiply(e, 1./s, en); + return en; +} + +MatExpr operator / (double s, const MatExpr& e) +{ + MatExpr en; + e.op->divide(s, e, en); + return en; +} + +MatExpr operator / (const MatExpr& e1, const MatExpr& e2) +{ + MatExpr en; + e1.op->divide(e1, e2, en); + return en; +} + +MatExpr operator < (const Mat& a, const Mat& b) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_LT, a, b); + return e; +} + +MatExpr operator < (const Mat& a, double s) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_LT, a, s); + return e; +} + +MatExpr operator < (double s, const Mat& a) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_GT, a, s); + return e; +} + +MatExpr operator <= (const Mat& a, const Mat& b) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_LE, a, b); + return e; +} + +MatExpr operator <= (const Mat& a, double s) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_LE, a, s); + return e; +} + +MatExpr operator <= (double s, const Mat& a) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_GE, a, s); + return e; +} + +MatExpr operator == (const Mat& a, const Mat& b) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_EQ, a, b); + return e; +} + +MatExpr operator == (const Mat& a, double s) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_EQ, a, s); + return e; +} + +MatExpr operator == (double s, const Mat& a) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_EQ, a, s); + return e; +} + +MatExpr operator != (const Mat& a, const Mat& b) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_NE, a, b); + return e; +} + +MatExpr operator != (const Mat& a, double s) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_NE, a, s); + return e; +} + +MatExpr operator != (double s, const Mat& a) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_NE, a, s); + return e; +} + +MatExpr operator >= (const Mat& a, const Mat& b) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_GE, a, b); + return e; +} + +MatExpr operator >= (const Mat& a, double s) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_GE, a, s); + return e; +} + +MatExpr operator >= (double s, const Mat& a) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_LE, a, s); + return e; +} + +MatExpr operator > (const Mat& a, const Mat& b) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_GT, a, b); + return e; +} + +MatExpr operator > (const Mat& a, double s) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_GT, a, s); + return e; +} + +MatExpr operator > (double s, const Mat& a) +{ + MatExpr e; + MatOp_Cmp::makeExpr(e, CV_CMP_LT, a, s); + return e; +} + +MatExpr min(const Mat& a, const Mat& b) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, 'm', a, b); + return e; +} + +MatExpr min(const Mat& a, double s) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, 'm', a, s); + return e; +} + +MatExpr min(double s, const Mat& a) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, 'm', a, s); + return e; +} + +MatExpr max(const Mat& a, const Mat& b) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, 'M', a, b); + return e; +} + +MatExpr max(const Mat& a, double s) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, 'M', a, s); + return e; +} + +MatExpr max(double s, const Mat& a) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, 'M', a, s); + return e; +} + +MatExpr operator & (const Mat& a, const Mat& b) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, '&', a, b); + return e; +} + +MatExpr operator & (const Mat& a, const Scalar& s) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, '&', a, s); + return e; +} + +MatExpr operator & (const Scalar& s, const Mat& a) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, '&', a, s); + return e; +} + +MatExpr operator | (const Mat& a, const Mat& b) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, '|', a, b); + return e; +} + +MatExpr operator | (const Mat& a, const Scalar& s) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, '|', a, s); + return e; +} + +MatExpr operator | (const Scalar& s, const Mat& a) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, '|', a, s); + return e; +} + +MatExpr operator ^ (const Mat& a, const Mat& b) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, '^', a, b); + return e; +} + +MatExpr operator ^ (const Mat& a, const Scalar& s) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, '^', a, s); + return e; +} + +MatExpr operator ^ (const Scalar& s, const Mat& a) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, '^', a, s); + return e; +} + +MatExpr operator ~(const Mat& a) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, '~', a, Scalar()); + return e; +} + +MatExpr abs(const Mat& a) +{ + MatExpr e; + MatOp_Bin::makeExpr(e, 'a', a, Scalar()); + return e; +} + +MatExpr abs(const MatExpr& e) +{ + MatExpr en; + e.op->abs(e, en); + return en; +} + + +Size MatExpr::size() const +{ + if( isT(*this) || isInv(*this) ) + return Size(a.rows, a.cols); + if( isGEMM(*this) ) + return Size(b.cols, a.rows); + if( isSolve(*this) ) + return Size(b.cols, a.cols); + if( isInitializer(*this) ) + return a.size(); + return op ? op->size(*this) : Size(); +} + + +int MatExpr::type() const +{ + if( isInitializer(*this) ) + return a.type(); + if( isCmp(*this) ) + return CV_8U; + return op ? op->type(*this) : -1; +} + + +///////////////////////////////////////////////////////////////////////////////////////////////////// + +void MatOp_Identity::assign(const MatExpr& e, Mat& m, int type) const +{ + if( type == -1 || type == e.a.type() ) + m = e.a; + else + { + CV_Assert( CV_MAT_CN(type) == e.a.channels() ); + e.a.convertTo(m, type); + } +} + +inline void MatOp_Identity::makeExpr(MatExpr& res, const Mat& m) +{ + res = MatExpr(&g_MatOp_Identity, 0, m, Mat(), Mat(), 1, 0); +} + +///////////////////////////////////////////////////////////////////////////////////////////////////// + +void MatOp_AddEx::assign(const MatExpr& e, Mat& m, int type) const +{ + Mat temp, &dst = type == -1 || e.a.type() == type ? m : temp; + if( e.b.data ) + { + if( e.s == Scalar() || !e.s.isReal() ) + { + if( e.alpha == 1 ) + { + if( e.beta == 1 ) + cv::add(e.a, e.b, dst); + else if( e.beta == -1 ) + cv::subtract(e.a, e.b, dst); + else + cv::scaleAdd(e.b, e.beta, e.a, dst); + } + else if( e.beta == 1 ) + { + if( e.alpha == -1 ) + cv::subtract(e.b, e.a, dst); + else + cv::scaleAdd(e.a, e.alpha, e.b, dst); + } + else + cv::addWeighted(e.a, e.alpha, e.b, e.beta, 0, dst); + + if( !e.s.isReal() ) + cv::add(dst, e.s, dst); + } + else + cv::addWeighted(e.a, e.alpha, e.b, e.beta, e.s[0], dst); + } + else if( e.s.isReal() && (dst.data != m.data || fabs(e.alpha) != 1)) + { + e.a.convertTo(m, type, e.alpha, e.s[0]); + return; + } + else if( e.alpha == 1 ) + cv::add(e.a, e.s, dst); + else if( e.alpha == -1 ) + cv::subtract(e.s, e.a, dst); + else + { + e.a.convertTo(dst, e.a.type(), e.alpha); + cv::add(dst, e.s, dst); + } + + if( dst.data != m.data ) + dst.convertTo(m, m.type()); +} + + +void MatOp_AddEx::add(const MatExpr& e, const Scalar& s, MatExpr& res) const +{ + res = e; + res.s += s; +} + + +void MatOp_AddEx::subtract(const Scalar& s, const MatExpr& e, MatExpr& res) const +{ + res = e; + res.alpha = -res.alpha; + res.beta = -res.beta; + res.s = s - res.s; +} + +void MatOp_AddEx::multiply(const MatExpr& e, double s, MatExpr& res) const +{ + res = e; + res.alpha *= s; + res.beta *= s; + res.s *= s; +} + +void MatOp_AddEx::divide(double s, const MatExpr& e, MatExpr& res) const +{ + if( isScaled(e) ) + MatOp_Bin::makeExpr(res, '/', e.a, Mat(), s/e.alpha); + else + MatOp::divide(s, e, res); +} + + +void MatOp_AddEx::transpose(const MatExpr& e, MatExpr& res) const +{ + if( isScaled(e) ) + MatOp_T::makeExpr(res, e.a, e.alpha); + else + MatOp::transpose(e, res); +} + +void MatOp_AddEx::abs(const MatExpr& e, MatExpr& res) const +{ + if( (!e.b.data || e.beta == 0) && fabs(e.alpha) == 1 ) + MatOp_Bin::makeExpr(res, 'a', e.a, -e.s*e.alpha); + else if( e.b.data && e.alpha + e.beta == 0 && e.alpha*e.beta == -1 ) + MatOp_Bin::makeExpr(res, 'a', e.a, e.b); + else + MatOp::abs(e, res); +} + +inline void MatOp_AddEx::makeExpr(MatExpr& res, const Mat& a, const Mat& b, double alpha, double beta, const Scalar& s) +{ + res = MatExpr(&g_MatOp_AddEx, 0, a, b, Mat(), alpha, beta, s); +} + +////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +void MatOp_Bin::assign(const MatExpr& e, Mat& m, int type) const +{ + Mat temp, &dst = type == -1 || e.a.type() == type ? m : temp; + + if( e.flags == '*' ) + cv::multiply(e.a, e.b, dst, e.alpha); + else if( e.flags == '/' && e.b.data ) + cv::divide(e.a, e.b, dst, e.alpha); + else if( e.flags == '/' && !e.b.data ) + cv::divide(e.alpha, e.a, dst ); + else if( e.flags == '&' && e.b.data ) + bitwise_and(e.a, e.b, dst); + else if( e.flags == '&' && !e.b.data ) + bitwise_and(e.a, e.s, dst); + else if( e.flags == '|' && e.b.data ) + bitwise_or(e.a, e.b, dst); + else if( e.flags == '|' && !e.b.data ) + bitwise_or(e.a, e.s, dst); + else if( e.flags == '^' && e.b.data ) + bitwise_xor(e.a, e.b, dst); + else if( e.flags == '^' && !e.b.data ) + bitwise_xor(e.a, e.s, dst); + else if( e.flags == '~' && !e.b.data ) + bitwise_not(e.a, dst); + else if( e.flags == 'm' && e.b.data ) + cv::min(e.a, e.b, dst); + else if( e.flags == 'm' && !e.b.data ) + cv::min(e.a, e.s[0], dst); + else if( e.flags == 'M' && e.b.data ) + cv::max(e.a, e.b, dst); + else if( e.flags == 'M' && !e.b.data ) + cv::max(e.a, e.s[0], dst); + else if( e.flags == 'a' && e.b.data ) + cv::absdiff(e.a, e.b, dst); + else if( e.flags == 'a' && !e.b.data ) + cv::absdiff(e.a, e.s, dst); + else + CV_Error(CV_StsError, "Unknown operation"); + + if( dst.data != m.data ) + dst.convertTo(m, type); +} + +void MatOp_Bin::multiply(const MatExpr& e, double s, MatExpr& res) const +{ + if( e.flags == '*' || e.flags == '/' ) + { + res = e; + res.alpha *= s; + } + else + MatOp::multiply(e, s, res); +} + +void MatOp_Bin::divide(double s, const MatExpr& e, MatExpr& res) const +{ + if( e.flags == '/' && (!e.b.data || e.beta == 0) ) + MatOp_AddEx::makeExpr(res, e.a, Mat(), s/e.alpha, 0); + else + MatOp::divide(s, e, res); +} + +inline void MatOp_Bin::makeExpr(MatExpr& res, char op, const Mat& a, const Mat& b, double scale) +{ + res = MatExpr(&g_MatOp_Bin, op, a, b, Mat(), scale, b.data ? 1 : 0); +} + +inline void MatOp_Bin::makeExpr(MatExpr& res, char op, const Mat& a, const Scalar& s) +{ + res = MatExpr(&g_MatOp_Bin, op, a, Mat(), Mat(), 1, 0, s); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////////// + +void MatOp_Cmp::assign(const MatExpr& e, Mat& m, int type) const +{ + Mat temp, &dst = type == -1 || type == CV_8U ? m : temp; + + if( e.b.data ) + cv::compare(e.a, e.b, dst, e.flags); + else + cv::compare(e.a, e.alpha, dst, e.flags); + + if( dst.data != m.data ) + dst.convertTo(m, type); +} + +inline void MatOp_Cmp::makeExpr(MatExpr& res, int cmpop, const Mat& a, const Mat& b) +{ + res = MatExpr(&g_MatOp_Cmp, cmpop, a, b, Mat(), 1, 1); +} + +inline void MatOp_Cmp::makeExpr(MatExpr& res, int cmpop, const Mat& a, double alpha) +{ + res = MatExpr(&g_MatOp_Cmp, cmpop, a, Mat(), Mat(), alpha, 1); +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////// + +void MatOp_T::assign(const MatExpr& e, Mat& m, int type) const +{ + Mat temp, &dst = type == -1 || type == e.a.type() ? m : temp; + + cv::transpose(e.a, dst); + + if( dst.data != m.data || e.alpha != 1 ) + dst.convertTo(m, type, e.alpha); +} + +void MatOp_T::multiply(const MatExpr& e, double s, MatExpr& res) const +{ + res = e; + res.alpha *= s; +} + +void MatOp_T::transpose(const MatExpr& e, MatExpr& res) const +{ + if( e.alpha == 1 ) + MatOp_Identity::makeExpr(res, e.a); + else + MatOp_AddEx::makeExpr(res, e.a, Mat(), e.alpha, 0); +} + +inline void MatOp_T::makeExpr(MatExpr& res, const Mat& a, double alpha) +{ + res = MatExpr(&g_MatOp_T, 0, a, Mat(), Mat(), alpha, 0); +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////// + +void MatOp_GEMM::assign(const MatExpr& e, Mat& m, int type) const +{ + Mat temp, &dst = type == -1 || type == e.a.type() ? m : temp; + + cv::gemm(e.a, e.b, e.alpha, e.c, e.beta, dst, e.flags); + if( dst.data != m.data ) + dst.convertTo(m, type); +} + +void MatOp_GEMM::add(const MatExpr& e1, const MatExpr& e2, MatExpr& res) const +{ + bool i1 = isIdentity(e1), i2 = isIdentity(e2); + double alpha1 = i1 ? 1 : e1.alpha, alpha2 = i2 ? 1 : e2.alpha; + + if( isMatProd(e1) && (i2 || isScaled(e2) || isT(e2)) ) + MatOp_GEMM::makeExpr(res, (e1.flags & ~CV_GEMM_C_T)|(isT(e2) ? CV_GEMM_C_T : 0), + e1.a, e1.b, alpha1, e2.a, alpha2); + else if( isMatProd(e2) && (i1 || isScaled(e1) || isT(e1)) ) + MatOp_GEMM::makeExpr(res, (e2.flags & ~CV_GEMM_C_T)|(isT(e1) ? CV_GEMM_C_T : 0), + e2.a, e2.b, alpha2, e1.a, alpha1); + else if( this == e2.op ) + MatOp::add(e1, e2, res); + else + e2.op->add(e1, e2, res); +} + +void MatOp_GEMM::subtract(const MatExpr& e1, const MatExpr& e2, MatExpr& res) const +{ + bool i1 = isIdentity(e1), i2 = isIdentity(e2); + double alpha1 = i1 ? 1 : e1.alpha, alpha2 = i2 ? 1 : e2.alpha; + + if( isMatProd(e1) && (i2 || isScaled(e2) || isT(e2)) ) + MatOp_GEMM::makeExpr(res, (e1.flags & ~CV_GEMM_C_T)|(isT(e2) ? CV_GEMM_C_T : 0), + e1.a, e1.b, alpha1, e2.a, -alpha2); + else if( isMatProd(e2) && (i1 || isScaled(e1) || isT(e1)) ) + MatOp_GEMM::makeExpr(res, (e2.flags & ~CV_GEMM_C_T)|(isT(e1) ? CV_GEMM_C_T : 0), + e2.a, e2.b, -alpha2, e1.a, alpha1); + else if( this == e2.op ) + MatOp::subtract(e1, e2, res); + else + e2.op->subtract(e1, e2, res); +} + +void MatOp_GEMM::multiply(const MatExpr& e, double s, MatExpr& res) const +{ + res = e; + res.alpha *= s; + res.beta *= s; +} + +void MatOp_GEMM::transpose(const MatExpr& e, MatExpr& res) const +{ + res = e; + res.flags ^= CV_GEMM_A_T | CV_GEMM_B_T | CV_GEMM_C_T; + swap(res.a, res.b); +} + +inline void MatOp_GEMM::makeExpr(MatExpr& res, int flags, const Mat& a, const Mat& b, + double alpha, const Mat& c, double beta) +{ + res = MatExpr(&g_MatOp_GEMM, flags, a, b, c, alpha, beta); +} + +/////////////////////////////////////////////////////////////////////////////////////////////////////// + +void MatOp_Invert::assign(const MatExpr& e, Mat& m, int type) const +{ + Mat temp, &dst = type == -1 || type == e.a.type() ? m : temp; + + cv::invert(e.a, dst, e.flags); + if( dst.data != m.data ) + dst.convertTo(m, type); +} + +void MatOp_Invert::matmul(const MatExpr& e1, const MatExpr& e2, MatExpr& res) const +{ + if( isInv(e1) && isIdentity(e2) ) + MatOp_Solve::makeExpr(res, e1.flags, e1.a, e2.a); + else if( this == e2.op ) + MatOp::matmul(e1, e2, res); + else + e2.op->matmul(e1, e2, res); +} + +inline void MatOp_Invert::makeExpr(MatExpr& res, int method, const Mat& m) +{ + res = MatExpr(&g_MatOp_Invert, method, m, Mat(), Mat(), 1, 0); +} + +///////////////////////////////////////////////////////////////////////////////////////////////////////// + +void MatOp_Solve::assign(const MatExpr& e, Mat& m, int type) const +{ + Mat temp, &dst = type == -1 || type == e.a.type() ? m : temp; + + cv::solve(e.a, e.b, dst, e.flags); + if( dst.data != m.data ) + dst.convertTo(m, type); +} + +inline void MatOp_Solve::makeExpr(MatExpr& res, int method, const Mat& a, const Mat& b) +{ + res = MatExpr(&g_MatOp_Solve, method, a, b, Mat(), 1, 1); +} + +////////////////////////////////////////////////////////////////////////////////////////////////////////// + +void MatOp_Initializer::assign(const MatExpr& e, Mat& m, int type) const +{ + if( type == -1 ) + type = e.a.type(); + m.create(e.a.size(), type); + if( e.flags == 'I' ) + setIdentity(m, Scalar(e.alpha)); + else if( e.flags == '0' ) + m = Scalar(); + else if( e.flags == '1' ) + m = Scalar(e.alpha); + else + CV_Error(CV_StsError, "Invalid matrix initializer type"); +} + +void MatOp_Initializer::multiply(const MatExpr& e, double s, MatExpr& res) const +{ + res = e; + res.alpha *= s; +} + +inline void MatOp_Initializer::makeExpr(MatExpr& res, int method, Size sz, int type, double alpha) +{ + res = MatExpr(&g_MatOp_Initializer, method, Mat(sz, type, (void*)0), Mat(), Mat(), alpha, 0); +} + + +/////////////////////////////////////////////////////////////////////////////////////////////////////////// + +MatExpr Mat::t() const +{ + MatExpr e; + MatOp_T::makeExpr(e, *this); + return e; +} + +MatExpr Mat::inv(int method) const +{ + MatExpr e; + MatOp_Invert::makeExpr(e, method, *this); + return e; +} + + +MatExpr Mat::mul(InputArray m, double scale) const +{ + MatExpr e; + if(m.kind() == _InputArray::EXPR) + { + const MatExpr& me = *(const MatExpr*)m.obj; + me.op->multiply(MatExpr(*this), me, e, scale); + } + else + MatOp_Bin::makeExpr(e, '*', *this, m.getMat(), scale); + return e; +} + +MatExpr Mat::zeros(int rows, int cols, int type) +{ + MatExpr e; + MatOp_Initializer::makeExpr(e, '0', Size(cols, rows), type); + return e; +} + +MatExpr Mat::zeros(Size size, int type) +{ + MatExpr e; + MatOp_Initializer::makeExpr(e, '0', size, type); + return e; +} + +MatExpr Mat::ones(int rows, int cols, int type) +{ + MatExpr e; + MatOp_Initializer::makeExpr(e, '1', Size(cols, rows), type); + return e; +} + +MatExpr Mat::ones(Size size, int type) +{ + MatExpr e; + MatOp_Initializer::makeExpr(e, '1', size, type); + return e; +} + +MatExpr Mat::eye(int rows, int cols, int type) +{ + MatExpr e; + MatOp_Initializer::makeExpr(e, 'I', Size(cols, rows), type); + return e; +} + +MatExpr Mat::eye(Size size, int type) +{ + MatExpr e; + MatOp_Initializer::makeExpr(e, 'I', size, type); + return e; +} + +} + +/* End of file. */ diff --git a/opencv/core/matrix.cpp b/opencv/core/matrix.cpp new file mode 100644 index 0000000..95b0c4e --- /dev/null +++ b/opencv/core/matrix.cpp @@ -0,0 +1,3777 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +/****************************************************************************************\ +* [scaled] Identity matrix initialization * +\****************************************************************************************/ + +namespace cv { + +void swap( Mat& a, Mat& b ) +{ + std::swap(a.flags, b.flags); + std::swap(a.dims, b.dims); + std::swap(a.rows, b.rows); + std::swap(a.cols, b.cols); + std::swap(a.data, b.data); + std::swap(a.refcount, b.refcount); + std::swap(a.datastart, b.datastart); + std::swap(a.dataend, b.dataend); + std::swap(a.datalimit, b.datalimit); + std::swap(a.allocator, b.allocator); + + std::swap(a.size.p, b.size.p); + std::swap(a.step.p, b.step.p); + std::swap(a.step.buf[0], b.step.buf[0]); + std::swap(a.step.buf[1], b.step.buf[1]); + + if( a.step.p == b.step.buf ) + { + a.step.p = a.step.buf; + a.size.p = &a.rows; + } + + if( b.step.p == a.step.buf ) + { + b.step.p = b.step.buf; + b.size.p = &b.rows; + } +} + + +static inline void setSize( Mat& m, int _dims, const int* _sz, + const size_t* _steps, bool autoSteps=false ) +{ + CV_Assert( 0 <= _dims && _dims <= CV_MAX_DIM ); + if( m.dims != _dims ) + { + if( m.step.p != m.step.buf ) + { + fastFree(m.step.p); + m.step.p = m.step.buf; + m.size.p = &m.rows; + } + if( _dims > 2 ) + { + m.step.p = (size_t*)fastMalloc(_dims*sizeof(m.step.p[0]) + (_dims+1)*sizeof(m.size.p[0])); + m.size.p = (int*)(m.step.p + _dims) + 1; + m.size.p[-1] = _dims; + m.rows = m.cols = -1; + } + } + + m.dims = _dims; + if( !_sz ) + return; + + size_t esz = CV_ELEM_SIZE(m.flags), total = esz; + int i; + for( i = _dims-1; i >= 0; i-- ) + { + int s = _sz[i]; + CV_Assert( s >= 0 ); + m.size.p[i] = s; + + if( _steps ) + m.step.p[i] = i < _dims-1 ? _steps[i] : esz; + else if( autoSteps ) + { + m.step.p[i] = total; + int64 total1 = (int64)total*s; + if( (uint64)total1 != (size_t)total1 ) + CV_Error( CV_StsOutOfRange, "The total matrix size does not fit to \"size_t\" type" ); + total = (size_t)total1; + } + } + + if( _dims == 1 ) + { + m.dims = 2; + m.cols = 1; + m.step[1] = esz; + } +} + +static void updateContinuityFlag(Mat& m) +{ + int i, j; + for( i = 0; i < m.dims; i++ ) + { + if( m.size[i] > 1 ) + break; + } + + for( j = m.dims-1; j > i; j-- ) + { + if( m.step[j]*m.size[j] < m.step[j-1] ) + break; + } + + int64 t = (int64)m.step[0]*m.size[0]; + if( j <= i && t == (int)t ) + m.flags |= Mat::CONTINUOUS_FLAG; + else + m.flags &= ~Mat::CONTINUOUS_FLAG; +} + +static void finalizeHdr(Mat& m) +{ + updateContinuityFlag(m); + int d = m.dims; + if( d > 2 ) + m.rows = m.cols = -1; + if( m.data ) + { + m.datalimit = m.datastart + m.size[0]*m.step[0]; + if( m.size[0] > 0 ) + { + m.dataend = m.data + m.size[d-1]*m.step[d-1]; + for( int i = 0; i < d-1; i++ ) + m.dataend += (m.size[i] - 1)*m.step[i]; + } + else + m.dataend = m.datalimit; + } + else + m.dataend = m.datalimit = 0; +} + + +void Mat::create(int d, const int* _sizes, int _type) +{ + int i; + CV_Assert(0 <= d && _sizes && d <= CV_MAX_DIM && _sizes); + _type = CV_MAT_TYPE(_type); + + if( data && (d == dims || (d == 1 && dims <= 2)) && _type == type() ) + { + if( d == 2 && rows == _sizes[0] && cols == _sizes[1] ) + return; + for( i = 0; i < d; i++ ) + if( size[i] != _sizes[i] ) + break; + if( i == d && (d > 1 || size[1] == 1)) + return; + } + + release(); + if( d == 0 ) + return; + flags = (_type & CV_MAT_TYPE_MASK) | MAGIC_VAL; + setSize(*this, d, _sizes, 0, allocator == 0); + + if( total() > 0 ) + { + if( !allocator ) + { + size_t total = alignSize(step.p[0]*size.p[0], (int)sizeof(*refcount)); + data = datastart = (uchar*)fastMalloc(total + (int)sizeof(*refcount)); + refcount = (int*)(data + total); + *refcount = 1; + } + else + { + allocator->allocate(dims, size, _type, refcount, datastart, data, step.p); + CV_Assert( step[dims-1] == (size_t)CV_ELEM_SIZE(flags) ); + } + } + + finalizeHdr(*this); +} + +void Mat::copySize(const Mat& m) +{ + setSize(*this, m.dims, 0, 0); + for( int i = 0; i < dims; i++ ) + { + size[i] = m.size[i]; + step[i] = m.step[i]; + } +} + +void Mat::deallocate() +{ + if( allocator ) + allocator->deallocate(refcount, datastart, data); + else + { + CV_DbgAssert(refcount != 0); + fastFree(datastart); + } +} + + +Mat::Mat(const Mat& m, const Range& rowRange, const Range& colRange) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + CV_Assert( m.dims >= 2 ); + if( m.dims > 2 ) + { + AutoBuffer rs(m.dims); + rs[0] = rowRange; + rs[1] = colRange; + for( int i = 2; i < m.dims; i++ ) + rs[i] = Range::all(); + *this = m(rs); + return; + } + + *this = m; + if( rowRange != Range::all() && rowRange != Range(0,rows) ) + { + CV_Assert( 0 <= rowRange.start && rowRange.start <= rowRange.end && rowRange.end <= m.rows ); + rows = rowRange.size(); + data += step*rowRange.start; + flags |= SUBMATRIX_FLAG; + } + + if( colRange != Range::all() && colRange != Range(0,cols) ) + { + CV_Assert( 0 <= colRange.start && colRange.start <= colRange.end && colRange.end <= m.cols ); + cols = colRange.size(); + data += colRange.start*elemSize(); + flags &= cols < m.cols ? ~CONTINUOUS_FLAG : -1; + flags |= SUBMATRIX_FLAG; + } + + if( rows == 1 ) + flags |= CONTINUOUS_FLAG; + + if( rows <= 0 || cols <= 0 ) + { + release(); + rows = cols = 0; + } +} + + +Mat::Mat(const Mat& m, const Rect& roi) + : flags(m.flags), dims(2), rows(roi.height), cols(roi.width), + data(m.data + roi.y*m.step[0]), refcount(m.refcount), + datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit), + allocator(m.allocator), size(&rows) +{ + CV_Assert( m.dims <= 2 ); + flags &= roi.width < m.cols ? ~CONTINUOUS_FLAG : -1; + flags |= roi.height == 1 ? CONTINUOUS_FLAG : 0; + + size_t esz = CV_ELEM_SIZE(flags); + data += roi.x*esz; + CV_Assert( 0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= m.cols && + 0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= m.rows ); + if( refcount ) + CV_XADD(refcount, 1); + if( roi.width < m.cols || roi.height < m.rows ) + flags |= SUBMATRIX_FLAG; + + step[0] = m.step[0]; step[1] = esz; + + if( rows <= 0 || cols <= 0 ) + { + release(); + rows = cols = 0; + } +} + + +Mat::Mat(int _dims, const int* _sizes, int _type, void* _data, const size_t* _steps) + : flags(MAGIC_VAL|CV_MAT_TYPE(_type)), dims(0), + rows(0), cols(0), data((uchar*)_data), refcount(0), + datastart((uchar*)_data), dataend((uchar*)_data), datalimit((uchar*)_data), + allocator(0), size(&rows) +{ + setSize(*this, _dims, _sizes, _steps, true); + finalizeHdr(*this); +} + + +Mat::Mat(const Mat& m, const Range* ranges) + : flags(m.flags), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + int i, d = m.dims; + + CV_Assert(ranges); + for( i = 0; i < d; i++ ) + { + Range r = ranges[i]; + CV_Assert( r == Range::all() || (0 <= r.start && r.start < r.end && r.end <= m.size[i]) ); + } + *this = m; + for( i = 0; i < d; i++ ) + { + Range r = ranges[i]; + if( r != Range::all() && r != Range(0, size.p[i])) + { + size.p[i] = r.end - r.start; + data += r.start*step.p[i]; + flags |= SUBMATRIX_FLAG; + } + } + updateContinuityFlag(*this); +} + + +Mat::Mat(const CvMatND* m, bool copyData) + : flags(MAGIC_VAL|CV_MAT_TYPE(m->type)), dims(0), rows(0), cols(0), + data((uchar*)m->data.ptr), refcount(0), + datastart((uchar*)m->data.ptr), allocator(0), + size(&rows) +{ + int _sizes[CV_MAX_DIM]; + size_t _steps[CV_MAX_DIM]; + + int i, d = m->dims; + for( i = 0; i < d; i++ ) + { + _sizes[i] = m->dim[i].size; + _steps[i] = m->dim[i].step; + } + + setSize(*this, d, _sizes, _steps); + finalizeHdr(*this); + + if( copyData ) + { + Mat temp(*this); + temp.copyTo(*this); + } +} + + +Mat Mat::diag(int d) const +{ + CV_Assert( dims <= 2 ); + Mat m = *this; + size_t esz = elemSize(); + int len; + + if( d >= 0 ) + { + len = std::min(cols - d, rows); + m.data += esz*d; + } + else + { + len = std::min(rows + d, cols); + m.data -= step[0]*d; + } + CV_DbgAssert( len > 0 ); + + m.size[0] = m.rows = len; + m.size[1] = m.cols = 1; + m.step[0] += (len > 1 ? esz : 0); + + if( m.rows > 1 ) + m.flags &= ~CONTINUOUS_FLAG; + else + m.flags |= CONTINUOUS_FLAG; + + if( size() != Size(1,1) ) + m.flags |= SUBMATRIX_FLAG; + + return m; +} + + +Mat::Mat(const IplImage* img, bool copyData) + : flags(MAGIC_VAL), dims(2), rows(0), cols(0), + data(0), refcount(0), datastart(0), dataend(0), allocator(0), size(&rows) +{ + CV_DbgAssert(CV_IS_IMAGE(img) && img->imageData != 0); + + int depth = IPL2CV_DEPTH(img->depth); + size_t esz; + step[0] = img->widthStep; + + if(!img->roi) + { + CV_Assert(img->dataOrder == IPL_DATA_ORDER_PIXEL); + flags = MAGIC_VAL + CV_MAKETYPE(depth, img->nChannels); + rows = img->height; cols = img->width; + datastart = data = (uchar*)img->imageData; + esz = CV_ELEM_SIZE(flags); + } + else + { + CV_Assert(img->dataOrder == IPL_DATA_ORDER_PIXEL || img->roi->coi != 0); + bool selectedPlane = img->roi->coi && img->dataOrder == IPL_DATA_ORDER_PLANE; + flags = MAGIC_VAL + CV_MAKETYPE(depth, selectedPlane ? 1 : img->nChannels); + rows = img->roi->height; cols = img->roi->width; + esz = CV_ELEM_SIZE(flags); + data = datastart = (uchar*)img->imageData + + (selectedPlane ? (img->roi->coi - 1)*step*img->height : 0) + + img->roi->yOffset*step[0] + img->roi->xOffset*esz; + } + datalimit = datastart + step.p[0]*rows; + dataend = datastart + step.p[0]*(rows-1) + esz*cols; + flags |= (cols*esz == step.p[0] || rows == 1 ? CONTINUOUS_FLAG : 0); + step[1] = esz; + + if( copyData ) + { + Mat m = *this; + release(); + if( !img->roi || !img->roi->coi || + img->dataOrder == IPL_DATA_ORDER_PLANE) + m.copyTo(*this); + else + { + int ch[] = {img->roi->coi - 1, 0}; + create(m.rows, m.cols, m.type()); + mixChannels(&m, 1, this, 1, ch, 1); + } + } +} + + +Mat::operator IplImage() const +{ + CV_Assert( dims <= 2 ); + IplImage img; + cvInitImageHeader(&img, size(), cvIplDepth(flags), channels()); + cvSetData(&img, data, (int)step[0]); + return img; +} + + +void Mat::pop_back(size_t nelems) +{ + CV_Assert( nelems <= (size_t)size.p[0] ); + + if( isSubmatrix() ) + *this = rowRange(0, size.p[0] - (int)nelems); + else + { + size.p[0] -= (int)nelems; + dataend -= nelems*step.p[0]; + /*if( size.p[0] <= 1 ) + { + if( dims <= 2 ) + flags |= CONTINUOUS_FLAG; + else + updateContinuityFlag(*this); + }*/ + } +} + + +void Mat::push_back_(const void* elem) +{ + int r = size.p[0]; + if( isSubmatrix() || dataend + step.p[0] > datalimit ) + reserve( std::max(r + 1, (r*3+1)/2) ); + + size_t esz = elemSize(); + memcpy(data + r*step.p[0], elem, esz); + size.p[0] = r + 1; + dataend += step.p[0]; + if( esz < step.p[0] ) + flags &= ~CONTINUOUS_FLAG; +} + +void Mat::reserve(size_t nelems) +{ + const size_t MIN_SIZE = 64; + + CV_Assert( (int)nelems >= 0 ); + if( !isSubmatrix() && data + step.p[0]*nelems <= datalimit ) + return; + + int r = size.p[0]; + + if( (size_t)r >= nelems ) + return; + + size.p[0] = std::max((int)nelems, 1); + size_t newsize = total()*elemSize(); + + if( newsize < MIN_SIZE ) + size.p[0] = (int)((MIN_SIZE + newsize - 1)*nelems/newsize); + + Mat m(dims, size.p, type()); + size.p[0] = r; + if( r > 0 ) + { + Mat mpart = m.rowRange(0, r); + copyTo(mpart); + } + + *this = m; + size.p[0] = r; + dataend = data + step.p[0]*r; +} + + +void Mat::resize(size_t nelems) +{ + int saveRows = size.p[0]; + if( saveRows == (int)nelems ) + return; + CV_Assert( (int)nelems >= 0 ); + + if( isSubmatrix() || data + step.p[0]*nelems > datalimit ) + reserve(nelems); + + size.p[0] = (int)nelems; + dataend += (size.p[0] - saveRows)*step.p[0]; + + //updateContinuityFlag(*this); +} + + +void Mat::resize(size_t nelems, const Scalar& s) +{ + int saveRows = size.p[0]; + resize(nelems); + + if( size.p[0] > saveRows ) + { + Mat part = rowRange(saveRows, size.p[0]); + part = s; + } +} + +void Mat::push_back(const Mat& elems) +{ + int r = size.p[0], delta = elems.size.p[0]; + if( delta == 0 ) + return; + if( this == &elems ) + { + Mat tmp = elems; + push_back(tmp); + return; + } + if( !data ) + { + *this = elems.clone(); + return; + } + + size.p[0] = elems.size.p[0]; + bool eq = size == elems.size; + size.p[0] = r; + if( !eq ) + CV_Error(CV_StsUnmatchedSizes, ""); + if( type() != elems.type() ) + CV_Error(CV_StsUnmatchedFormats, ""); + + if( isSubmatrix() || dataend + step.p[0]*delta > datalimit ) + reserve( std::max(r + delta, (r*3+1)/2) ); + + size.p[0] += delta; + dataend += step.p[0]*delta; + + //updateContinuityFlag(*this); + + if( isContinuous() && elems.isContinuous() ) + memcpy(data + r*step.p[0], elems.data, elems.total()*elems.elemSize()); + else + { + Mat part = rowRange(r, r + delta); + elems.copyTo(part); + } +} + + +Mat cvarrToMat(const CvArr* arr, bool copyData, + bool /*allowND*/, int coiMode) +{ + if( !arr ) + return Mat(); + if( CV_IS_MAT(arr) ) + return Mat((const CvMat*)arr, copyData ); + if( CV_IS_MATND(arr) ) + return Mat((const CvMatND*)arr, copyData ); + if( CV_IS_IMAGE(arr) ) + { + const IplImage* iplimg = (const IplImage*)arr; + if( coiMode == 0 && iplimg->roi && iplimg->roi->coi > 0 ) + CV_Error(CV_BadCOI, "COI is not supported by the function"); + return Mat(iplimg, copyData); + } + if( CV_IS_SEQ(arr) ) + { + CvSeq* seq = (CvSeq*)arr; + CV_Assert(seq->total > 0 && CV_ELEM_SIZE(seq->flags) == seq->elem_size); + if(!copyData && seq->first->next == seq->first) + return Mat(seq->total, 1, CV_MAT_TYPE(seq->flags), seq->first->data); + Mat buf(seq->total, 1, CV_MAT_TYPE(seq->flags)); + cvCvtSeqToArray(seq, buf.data, CV_WHOLE_SEQ); + return buf; + } + CV_Error(CV_StsBadArg, "Unknown array type"); + return Mat(); +} + +void Mat::locateROI( Size& wholeSize, Point& ofs ) const +{ + CV_Assert( dims <= 2 && step[0] > 0 ); + size_t esz = elemSize(), minstep; + ptrdiff_t delta1 = data - datastart, delta2 = dataend - datastart; + + if( delta1 == 0 ) + ofs.x = ofs.y = 0; + else + { + ofs.y = (int)(delta1/step[0]); + ofs.x = (int)((delta1 - step[0]*ofs.y)/esz); + CV_DbgAssert( data == datastart + ofs.y*step[0] + ofs.x*esz ); + } + minstep = (ofs.x + cols)*esz; + wholeSize.height = (int)((delta2 - minstep)/step[0] + 1); + wholeSize.height = std::max(wholeSize.height, ofs.y + rows); + wholeSize.width = (int)((delta2 - step*(wholeSize.height-1))/esz); + wholeSize.width = std::max(wholeSize.width, ofs.x + cols); +} + +Mat& Mat::adjustROI( int dtop, int dbottom, int dleft, int dright ) +{ + CV_Assert( dims <= 2 && step[0] > 0 ); + Size wholeSize; Point ofs; + size_t esz = elemSize(); + locateROI( wholeSize, ofs ); + int row1 = std::max(ofs.y - dtop, 0), row2 = std::min(ofs.y + rows + dbottom, wholeSize.height); + int col1 = std::max(ofs.x - dleft, 0), col2 = std::min(ofs.x + cols + dright, wholeSize.width); + data += (row1 - ofs.y)*step + (col1 - ofs.x)*esz; + rows = row2 - row1; cols = col2 - col1; + size.p[0] = rows; size.p[1] = cols; + if( esz*cols == step[0] || rows == 1 ) + flags |= CONTINUOUS_FLAG; + else + flags &= ~CONTINUOUS_FLAG; + return *this; +} + +} + +void cv::extractImageCOI(const CvArr* arr, OutputArray _ch, int coi) +{ + Mat mat = cvarrToMat(arr, false, true, 1); + _ch.create(mat.dims, mat.size, mat.depth()); + Mat ch = _ch.getMat(); + if(coi < 0) + { + CV_Assert( CV_IS_IMAGE(arr) ); + coi = cvGetImageCOI((const IplImage*)arr)-1; + } + CV_Assert(0 <= coi && coi < mat.channels()); + int _pairs[] = { coi, 0 }; + mixChannels( &mat, 1, &ch, 1, _pairs, 1 ); +} + +void cv::insertImageCOI(InputArray _ch, CvArr* arr, int coi) +{ + Mat ch = _ch.getMat(), mat = cvarrToMat(arr, false, true, 1); + if(coi < 0) + { + CV_Assert( CV_IS_IMAGE(arr) ); + coi = cvGetImageCOI((const IplImage*)arr)-1; + } + CV_Assert(ch.size == mat.size && ch.depth() == mat.depth() && 0 <= coi && coi < mat.channels()); + int _pairs[] = { 0, coi }; + mixChannels( &ch, 1, &mat, 1, _pairs, 1 ); +} + +namespace cv +{ + +Mat Mat::reshape(int new_cn, int new_rows) const +{ + int cn = channels(); + Mat hdr = *this; + + if( dims > 2 && new_rows == 0 && new_cn != 0 && size[dims-1]*cn % new_cn == 0 ) + { + hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT); + hdr.step[dims-1] = CV_ELEM_SIZE(hdr.flags); + hdr.size[dims-1] = hdr.size[dims-1]*cn / new_cn; + return hdr; + } + + CV_Assert( dims <= 2 ); + + if( new_cn == 0 ) + new_cn = cn; + + int total_width = cols * cn; + + if( (new_cn > total_width || total_width % new_cn != 0) && new_rows == 0 ) + new_rows = rows * total_width / new_cn; + + if( new_rows != 0 && new_rows != rows ) + { + int total_size = total_width * rows; + if( !isContinuous() ) + CV_Error( CV_BadStep, + "The matrix is not continuous, thus its number of rows can not be changed" ); + + if( (unsigned)new_rows > (unsigned)total_size ) + CV_Error( CV_StsOutOfRange, "Bad new number of rows" ); + + total_width = total_size / new_rows; + + if( total_width * new_rows != total_size ) + CV_Error( CV_StsBadArg, "The total number of matrix elements " + "is not divisible by the new number of rows" ); + + hdr.rows = new_rows; + hdr.step[0] = total_width * elemSize1(); + } + + int new_width = total_width / new_cn; + + if( new_width * new_cn != total_width ) + CV_Error( CV_BadNumChannels, + "The total width is not divisible by the new number of channels" ); + + hdr.cols = new_width; + hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT); + hdr.step[1] = CV_ELEM_SIZE(hdr.flags); + return hdr; +} + + +int Mat::checkVector(int _elemChannels, int _depth, bool _requireContinuous) const +{ + return (depth() == _depth || _depth <= 0) && + (isContinuous() || !_requireContinuous) && + ((dims == 2 && (((rows == 1 || cols == 1) && channels() == _elemChannels) || (cols == _elemChannels))) || + (dims == 3 && channels() == 1 && size.p[2] == _elemChannels && (size.p[0] == 1 || size.p[1] == 1) && + (isContinuous() || step.p[1] == step.p[2]*size.p[2]))) + ? (int)(total()*channels()/_elemChannels) : -1; +} + + +void scalarToRawData(const Scalar& s, void* _buf, int type, int unroll_to) +{ + int i, depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); + CV_Assert(cn <= 4); + switch(depth) + { + case CV_8U: + { + uchar* buf = (uchar*)_buf; + for(i = 0; i < cn; i++) + buf[i] = saturate_cast(s.val[i]); + for(; i < unroll_to; i++) + buf[i] = buf[i-cn]; + } + break; + case CV_8S: + { + schar* buf = (schar*)_buf; + for(i = 0; i < cn; i++) + buf[i] = saturate_cast(s.val[i]); + for(; i < unroll_to; i++) + buf[i] = buf[i-cn]; + } + break; + case CV_16U: + { + ushort* buf = (ushort*)_buf; + for(i = 0; i < cn; i++) + buf[i] = saturate_cast(s.val[i]); + for(; i < unroll_to; i++) + buf[i] = buf[i-cn]; + } + break; + case CV_16S: + { + short* buf = (short*)_buf; + for(i = 0; i < cn; i++) + buf[i] = saturate_cast(s.val[i]); + for(; i < unroll_to; i++) + buf[i] = buf[i-cn]; + } + break; + case CV_32S: + { + int* buf = (int*)_buf; + for(i = 0; i < cn; i++) + buf[i] = saturate_cast(s.val[i]); + for(; i < unroll_to; i++) + buf[i] = buf[i-cn]; + } + break; + case CV_32F: + { + float* buf = (float*)_buf; + for(i = 0; i < cn; i++) + buf[i] = saturate_cast(s.val[i]); + for(; i < unroll_to; i++) + buf[i] = buf[i-cn]; + } + break; + case CV_64F: + { + double* buf = (double*)_buf; + for(i = 0; i < cn; i++) + buf[i] = saturate_cast(s.val[i]); + for(; i < unroll_to; i++) + buf[i] = buf[i-cn]; + break; + } + default: + CV_Error(CV_StsUnsupportedFormat,""); + } +} + + +/*************************************************************************************************\ + Input/Output Array +\*************************************************************************************************/ + +_InputArray::_InputArray() : flags(0), obj(0) {} +_InputArray::_InputArray(const Mat& m) : flags(MAT), obj((void*)&m) {} +_InputArray::_InputArray(const vector& vec) : flags(STD_VECTOR_MAT), obj((void*)&vec) {} +_InputArray::_InputArray(const double& val) : flags(MATX+CV_64F), obj((void*)&val), sz(Size(1,1)) {} +_InputArray::_InputArray(const MatExpr& expr) : flags(EXPR), obj((void*)&expr) {} + +Mat _InputArray::getMat(int i) const +{ + int k = kind(); + + if( k == MAT ) + { + CV_Assert( i < 0 ); + return *(const Mat*)obj; + } + + if( k == EXPR ) + { + CV_Assert( i < 0 ); + return (Mat)*((const MatExpr*)obj); + } + + if( k == MATX ) + { + CV_Assert( i < 0 ); + return Mat(sz, flags, obj); + } + + if( k == STD_VECTOR ) + { + CV_Assert( i < 0 ); + int t = CV_MAT_TYPE(flags); + const vector& v = *(const vector*)obj; + + return !v.empty() ? Mat(size(), t, (void*)&v[0]) : Mat(); + } + + if( k == NONE ) + return Mat(); + + if( k == STD_VECTOR_VECTOR ) + { + int t = type(i); + const vector >& vv = *(const vector >*)obj; + CV_Assert( 0 <= i && i < (int)vv.size() ); + const vector& v = vv[i]; + + return !v.empty() ? Mat(size(i), t, (void*)&v[0]) : Mat(); + } + + CV_Assert( k == STD_VECTOR_MAT ); + //if( k == STD_VECTOR_MAT ) + { + const vector& v = *(const vector*)obj; + CV_Assert( 0 <= i && i < (int)v.size() ); + + return v[i]; + } +} + + +void _InputArray::getMatVector(vector& mv) const +{ + int k = kind(); + + if( k == MAT ) + { + const Mat& m = *(const Mat*)obj; + int i, n = (int)m.size[0]; + mv.resize(n); + + for( i = 0; i < n; i++ ) + mv[i] = m.dims == 2 ? Mat(1, m.cols, m.type(), (void*)m.ptr(i)) : + Mat(m.dims-1, &m.size[1], m.type(), (void*)m.ptr(i), &m.step[1]); + return; + } + + if( k == EXPR ) + { + Mat m = *(const MatExpr*)obj; + int i, n = m.size[0]; + mv.resize(n); + + for( i = 0; i < n; i++ ) + mv[i] = m.row(i); + return; + } + + if( k == MATX ) + { + size_t i, n = sz.height, esz = CV_ELEM_SIZE(flags); + mv.resize(n); + + for( i = 0; i < n; i++ ) + mv[i] = Mat(1, sz.width, CV_MAT_TYPE(flags), (uchar*)obj + esz*sz.width*i); + return; + } + + if( k == STD_VECTOR ) + { + const vector& v = *(const vector*)obj; + + size_t i, n = v.size(), esz = CV_ELEM_SIZE(flags); + int t = CV_MAT_DEPTH(flags), cn = CV_MAT_CN(flags); + mv.resize(n); + + for( i = 0; i < n; i++ ) + mv[i] = Mat(1, cn, t, (void*)(&v[0] + esz*i)); + return; + } + + if( k == NONE ) + { + mv.clear(); + return; + } + + if( k == STD_VECTOR_VECTOR ) + { + const vector >& vv = *(const vector >*)obj; + int i, n = (int)vv.size(); + int t = CV_MAT_TYPE(flags); + mv.resize(n); + + for( i = 0; i < n; i++ ) + { + const vector& v = vv[i]; + mv[i] = Mat(size(i), t, (void*)&v[0]); + } + return; + } + + CV_Assert( k == STD_VECTOR_MAT ); + //if( k == STD_VECTOR_MAT ) + { + const vector& v = *(const vector*)obj; + mv.resize(v.size()); + std::copy(v.begin(), v.end(), mv.begin()); + return; + } +} + +int _InputArray::kind() const +{ + return flags & -(1 << KIND_SHIFT); +} + +Size _InputArray::size(int i) const +{ + int k = kind(); + + if( k == MAT ) + { + CV_Assert( i < 0 ); + return ((const Mat*)obj)->size(); + } + + if( k == EXPR ) + { + CV_Assert( i < 0 ); + return ((const MatExpr*)obj)->size(); + } + + if( k == MATX ) + { + CV_Assert( i < 0 ); + return sz; + } + + if( k == STD_VECTOR ) + { + CV_Assert( i < 0 ); + const vector& v = *(const vector*)obj; + const vector& iv = *(const vector*)obj; + size_t szb = v.size(), szi = iv.size(); + return szb == szi ? Size((int)szb, 1) : Size((int)(szb/CV_ELEM_SIZE(flags)), 1); + } + + if( k == NONE ) + return Size(); + + if( k == STD_VECTOR_VECTOR ) + { + const vector >& vv = *(const vector >*)obj; + if( i < 0 ) + return vv.empty() ? Size() : Size((int)vv.size(), 1); + CV_Assert( i < (int)vv.size() ); + const vector >& ivv = *(const vector >*)obj; + + size_t szb = vv[i].size(), szi = ivv[i].size(); + return szb == szi ? Size((int)szb, 1) : Size((int)(szb/CV_ELEM_SIZE(flags)), 1); + } + + CV_Assert( k == STD_VECTOR_MAT ); + //if( k == STD_VECTOR_MAT ) + { + const vector& vv = *(const vector*)obj; + if( i < 0 ) + return vv.empty() ? Size() : Size((int)vv.size(), 1); + CV_Assert( i < (int)vv.size() ); + + return vv[i].size(); + } +} + +size_t _InputArray::total(int i) const +{ + return size(i).area(); +} + +int _InputArray::type(int i) const +{ + int k = kind(); + + if( k == MAT ) + return ((const Mat*)obj)->type(); + + if( k == EXPR ) + return ((const MatExpr*)obj)->type(); + + if( k == MATX || k == STD_VECTOR || k == STD_VECTOR_VECTOR ) + return CV_MAT_TYPE(flags); + + if( k == NONE ) + return -1; + + CV_Assert( k == STD_VECTOR_MAT ); + //if( k == STD_VECTOR_MAT ) + { + const vector& vv = *(const vector*)obj; + CV_Assert( i < (int)vv.size() ); + + return vv[i >= 0 ? i : 0].type(); + } +} + +int _InputArray::depth(int i) const +{ + return CV_MAT_DEPTH(type(i)); +} + +int _InputArray::channels(int i) const +{ + return CV_MAT_CN(type(i)); +} + +bool _InputArray::empty() const +{ + int k = kind(); + + if( k == MAT ) + return ((const Mat*)obj)->empty(); + + if( k == EXPR ) + return false; + + if( k == MATX ) + return false; + + if( k == STD_VECTOR ) + { + const vector& v = *(const vector*)obj; + return v.empty(); + } + + if( k == NONE ) + return true; + + if( k == STD_VECTOR_VECTOR ) + { + const vector >& vv = *(const vector >*)obj; + return vv.empty(); + } + + CV_Assert( k == STD_VECTOR_MAT ); + //if( k == STD_VECTOR_MAT ) + { + const vector& vv = *(const vector*)obj; + return vv.empty(); + } +} + + +_OutputArray::_OutputArray() {} +_OutputArray::_OutputArray(Mat& m) : _InputArray(m) {} +_OutputArray::_OutputArray(vector& vec) : _InputArray(vec) {} + +bool _OutputArray::fixedSize() const +{ + int k = kind(); + return k == MATX; +} + +bool _OutputArray::fixedType() const +{ + int k = kind(); + return k != MAT && k != STD_VECTOR_MAT; +} + +void _OutputArray::create(Size _sz, int type, int i, bool allowTransposed, int fixedDepthMask) const +{ + int k = kind(); + if( k == MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 ) + { + ((Mat*)obj)->create(_sz, type); + return; + } + int sz[] = {_sz.height, _sz.width}; + create(2, sz, type, i, allowTransposed, fixedDepthMask); +} + +void _OutputArray::create(int rows, int cols, int type, int i, bool allowTransposed, int fixedDepthMask) const +{ + int k = kind(); + if( k == MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 ) + { + ((Mat*)obj)->create(rows, cols, type); + return; + } + int sz[] = {rows, cols}; + create(2, sz, type, i, allowTransposed, fixedDepthMask); +} + +void _OutputArray::create(int dims, const int* size, int type, int i, bool allocateVector, int fixedDepthMask) const +{ + int k = kind(); + type = CV_MAT_TYPE(type); + + if( k == MAT ) + { + CV_Assert( i < 0 ); + Mat& m = *(Mat*)obj; + if( allocateVector ) + { + if( !m.isContinuous() ) + m.release(); + + if( dims == 2 && m.dims == 2 && m.data && + m.type() == type && m.rows == size[1] && m.cols == size[0] ) + return; + } + m.create(dims, size, type); + return; + } + + if( k == MATX ) + { + CV_Assert( i < 0 ); + int type0 = CV_MAT_TYPE(flags); + CV_Assert( type == type0 || (CV_MAT_CN(type) == 1 && ((1 << type0) & fixedDepthMask) != 0) ); + CV_Assert( dims == 2 && ((size[0] == sz.height && size[1] == sz.width) || + (allocateVector && size[0] == sz.width && size[1] == sz.height))); + return; + } + + if( k == STD_VECTOR || k == STD_VECTOR_VECTOR ) + { + CV_Assert( dims == 2 && (size[0] == 1 || size[1] == 1 || size[0]*size[1] == 0) ); + size_t len = size[0]*size[1] > 0 ? size[0] + size[1] - 1 : 0; + vector* v = (vector*)obj; + + if( k == STD_VECTOR_VECTOR ) + { + vector >& vv = *(vector >*)obj; + if( i < 0 ) + { + vv.resize(len); + return; + } + CV_Assert( i < (int)vv.size() ); + v = &vv[i]; + } + else + CV_Assert( i < 0 ); + + int type0 = CV_MAT_TYPE(flags); + CV_Assert( type == type0 || (CV_MAT_CN(type) == CV_MAT_CN(type0) && ((1 << type0) & fixedDepthMask) != 0) ); + + int esz = CV_ELEM_SIZE(type0); + switch( esz ) + { + case 1: + ((vector*)v)->resize(len); + break; + case 2: + ((vector*)v)->resize(len); + break; + case 3: + ((vector*)v)->resize(len); + break; + case 4: + ((vector*)v)->resize(len); + break; + case 6: + ((vector*)v)->resize(len); + break; + case 8: + ((vector*)v)->resize(len); + break; + case 12: + ((vector*)v)->resize(len); + break; + case 16: + ((vector*)v)->resize(len); + break; + case 24: + ((vector*)v)->resize(len); + break; + case 32: + ((vector*)v)->resize(len); + break; + case 36: + ((vector >*)v)->resize(len); + break; + case 48: + ((vector >*)v)->resize(len); + break; + case 64: + ((vector >*)v)->resize(len); + break; + case 128: + ((vector >*)v)->resize(len); + break; + case 256: + ((vector >*)v)->resize(len); + break; + case 512: + ((vector >*)v)->resize(len); + break; + default: + CV_Error_(CV_StsBadArg, ("Vectors with element size %d are not supported. Please, modify OutputArray::create()\n", esz)); + } + return; + } + + if( k == NONE ) + { + CV_Error(CV_StsNullPtr, "create() called for the missing output array" ); + return; + } + + CV_Assert( k == STD_VECTOR_MAT ); + //if( k == STD_VECTOR_MAT ) + { + vector& v = *(vector*)obj; + + if( i < 0 ) + { + CV_Assert( dims == 2 && (size[0] == 1 || size[1] == 1 || size[0]*size[1] == 0) ); + size_t len = size[0]*size[1] > 0 ? size[0] + size[1] - 1 : 0; + + v.resize(len); + return; + } + + CV_Assert( i < (int)v.size() ); + Mat& m = v[i]; + + if( allocateVector ) + { + if( !m.isContinuous() ) + m.release(); + + if( dims == 2 && m.dims == 2 && m.data && + m.type() == type && m.rows == size[1] && m.cols == size[0] ) + return; + } + m.create(dims, size, type); + } +} + +void _OutputArray::release() const +{ + int k = kind(); + + if( k == MAT ) + { + ((Mat*)obj)->release(); + return; + } + + if( k == NONE ) + return; + + if( k == STD_VECTOR ) + { + create(Size(), CV_MAT_TYPE(flags)); + return; + } + + if( k == STD_VECTOR_VECTOR ) + { + ((vector >*)obj)->clear(); + return; + } + + CV_Assert( k == STD_VECTOR_MAT ); + //if( k == STD_VECTOR_MAT ) + { + ((vector*)obj)->clear(); + } +} + +void _OutputArray::clear() const +{ + int k = kind(); + + if( k == MAT ) + { + ((Mat*)obj)->resize(0); + return; + } + + release(); +} + +bool _OutputArray::needed() const +{ + return kind() != NONE; +} + +Mat& _OutputArray::getMatRef(int i) const +{ + int k = kind(); + if( i < 0 ) + { + CV_Assert( k == MAT ); + return *(Mat*)obj; + } + else + { + CV_Assert( k == STD_VECTOR_MAT ); + vector& v = *(vector*)obj; + CV_Assert( i < (int)v.size() ); + return v[i]; + } +} + +static _OutputArray _none; +OutputArray noArray() { return _none; } + +} + +/*************************************************************************************************\ + Matrix Operations +\*************************************************************************************************/ + +void cv::hconcat(const Mat* src, size_t nsrc, OutputArray _dst) +{ + if( nsrc == 0 || !src ) + { + _dst.release(); + return; + } + + int totalCols = 0, cols = 0; + size_t i; + for( i = 0; i < nsrc; i++ ) + { + CV_Assert( !src[i].empty() && src[i].dims <= 2 && + src[i].rows == src[0].rows && + src[i].type() == src[0].type()); + totalCols += src[i].cols; + } + _dst.create( src[0].rows, totalCols, src[0].type()); + Mat dst = _dst.getMat(); + for( i = 0; i < nsrc; i++ ) + { + Mat dpart = dst(Rect(cols, 0, src[i].cols, src[i].rows)); + src[i].copyTo(dpart); + cols += src[i].cols; + } +} + +void cv::hconcat(InputArray src1, InputArray src2, OutputArray dst) +{ + Mat src[] = {src1.getMat(), src2.getMat()}; + hconcat(src, 2, dst); +} + +void cv::hconcat(InputArray _src, OutputArray dst) +{ + vector src; + _src.getMatVector(src); + hconcat(!src.empty() ? &src[0] : 0, src.size(), dst); +} + +void cv::vconcat(const Mat* src, size_t nsrc, OutputArray _dst) +{ + if( nsrc == 0 || !src ) + { + _dst.release(); + return; + } + + int totalRows = 0, rows = 0; + size_t i; + for( i = 0; i < nsrc; i++ ) + { + CV_Assert( !src[i].empty() && src[i].dims <= 2 && + src[i].cols == src[0].cols && + src[i].type() == src[0].type()); + totalRows += src[i].rows; + } + _dst.create( totalRows, src[0].cols, src[0].type()); + Mat dst = _dst.getMat(); + for( i = 0; i < nsrc; i++ ) + { + Mat dpart(dst, Rect(0, rows, src[i].cols, src[i].rows)); + src[i].copyTo(dpart); + rows += src[i].rows; + } +} + +void cv::vconcat(InputArray src1, InputArray src2, OutputArray dst) +{ + Mat src[] = {src1.getMat(), src2.getMat()}; + vconcat(src, 2, dst); +} + +void cv::vconcat(InputArray _src, OutputArray dst) +{ + vector src; + _src.getMatVector(src); + vconcat(!src.empty() ? &src[0] : 0, src.size(), dst); +} + +//////////////////////////////////////// set identity //////////////////////////////////////////// +void cv::setIdentity( InputOutputArray _m, const Scalar& s ) +{ + Mat m = _m.getMat(); + CV_Assert( m.dims <= 2 ); + int i, j, rows = m.rows, cols = m.cols, type = m.type(); + + if( type == CV_32FC1 ) + { + float* data = (float*)m.data; + float val = (float)s[0]; + size_t step = m.step/sizeof(data[0]); + + for( i = 0; i < rows; i++, data += step ) + { + for( j = 0; j < cols; j++ ) + data[j] = 0; + if( i < cols ) + data[i] = val; + } + } + else if( type == CV_64FC1 ) + { + double* data = (double*)m.data; + double val = s[0]; + size_t step = m.step/sizeof(data[0]); + + for( i = 0; i < rows; i++, data += step ) + { + for( j = 0; j < cols; j++ ) + data[j] = j == i ? val : 0; + } + } + else + { + m = Scalar(0); + m.diag() = s; + } +} + +//////////////////////////////////////////// trace /////////////////////////////////////////// + +cv::Scalar cv::trace( InputArray _m ) +{ + Mat m = _m.getMat(); + CV_Assert( m.dims <= 2 ); + int i, type = m.type(); + int nm = std::min(m.rows, m.cols); + + if( type == CV_32FC1 ) + { + const float* ptr = (const float*)m.data; + size_t step = m.step/sizeof(ptr[0]) + 1; + double _s = 0; + for( i = 0; i < nm; i++ ) + _s += ptr[i*step]; + return _s; + } + + if( type == CV_64FC1 ) + { + const double* ptr = (const double*)m.data; + size_t step = m.step/sizeof(ptr[0]) + 1; + double _s = 0; + for( i = 0; i < nm; i++ ) + _s += ptr[i*step]; + return _s; + } + + return cv::sum(m.diag()); +} + +////////////////////////////////////// transpose ///////////////////////////////////////// + +namespace cv +{ + +template static void +transpose_( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz ) +{ + int i, j, m = sz.width, n = sz.height; + + for( i = 0; i <= m - 4; i += 4 ) + { + T* d0 = (T*)(dst + dstep*i); + T* d1 = (T*)(dst + dstep*(i+1)); + T* d2 = (T*)(dst + dstep*(i+2)); + T* d3 = (T*)(dst + dstep*(i+3)); + + for( j = 0; j <= n - 4; j += 4 ) + { + const T* s0 = (const T*)(src + i*sizeof(T) + sstep*j); + const T* s1 = (const T*)(src + i*sizeof(T) + sstep*(j+1)); + const T* s2 = (const T*)(src + i*sizeof(T) + sstep*(j+2)); + const T* s3 = (const T*)(src + i*sizeof(T) + sstep*(j+3)); + + d0[j] = s0[0]; d0[j+1] = s1[0]; d0[j+2] = s2[0]; d0[j+3] = s3[0]; + d1[j] = s0[1]; d1[j+1] = s1[1]; d1[j+2] = s2[1]; d1[j+3] = s3[1]; + d2[j] = s0[2]; d2[j+1] = s1[2]; d2[j+2] = s2[2]; d2[j+3] = s3[2]; + d3[j] = s0[3]; d3[j+1] = s1[3]; d3[j+2] = s2[3]; d3[j+3] = s3[3]; + } + + for( ; j < n; j++ ) + { + const T* s0 = (const T*)(src + i*sizeof(T) + j*sstep); + d0[j] = s0[0]; d1[j] = s0[1]; d2[j] = s0[2]; d3[j] = s0[3]; + } + } + + for( ; i < m; i++ ) + { + T* d0 = (T*)(dst + dstep*i); + + for( j = 0; j <= n - 4; j += 4 ) + { + const T* s0 = (const T*)(src + i*sizeof(T) + sstep*j); + const T* s1 = (const T*)(src + i*sizeof(T) + sstep*(j+1)); + const T* s2 = (const T*)(src + i*sizeof(T) + sstep*(j+2)); + const T* s3 = (const T*)(src + i*sizeof(T) + sstep*(j+3)); + + d0[j] = s0[0]; d0[j+1] = s1[0]; d0[j+2] = s2[0]; d0[j+3] = s3[0]; + } + + for( ; j < n; j++ ) + { + const T* s0 = (const T*)(src + i*sizeof(T) + j*sstep); + d0[j] = s0[0]; + } + } +} + +template static void +transposeI_( uchar* data, size_t step, int n ) +{ + int i, j; + for( i = 0; i < n; i++ ) + { + T* row = (T*)(data + step*i); + uchar* data1 = data + i*sizeof(T); + for( j = i+1; j < n; j++ ) + std::swap( row[j], *(T*)(data1 + step*j) ); + } +} + +typedef void (*TransposeFunc)( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz ); +typedef void (*TransposeInplaceFunc)( uchar* data, size_t step, int n ); + +#define DEF_TRANSPOSE_FUNC(suffix, type) \ +static void transpose_##suffix( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz ) \ +{ transpose_(src, sstep, dst, dstep, sz); } \ +\ +static void transposeI_##suffix( uchar* data, size_t step, int n ) \ +{ transposeI_(data, step, n); } + +DEF_TRANSPOSE_FUNC(8u, uchar) +DEF_TRANSPOSE_FUNC(16u, ushort) +DEF_TRANSPOSE_FUNC(8uC3, Vec3b) +DEF_TRANSPOSE_FUNC(32s, int) +DEF_TRANSPOSE_FUNC(16uC3, Vec3s) +DEF_TRANSPOSE_FUNC(32sC2, Vec2i) +DEF_TRANSPOSE_FUNC(32sC3, Vec3i) +DEF_TRANSPOSE_FUNC(32sC4, Vec4i) +DEF_TRANSPOSE_FUNC(32sC6, Vec6i) +DEF_TRANSPOSE_FUNC(32sC8, Vec8i) + +static TransposeFunc transposeTab[] = +{ + 0, transpose_8u, transpose_16u, transpose_8uC3, transpose_32s, 0, transpose_16uC3, 0, + transpose_32sC2, 0, 0, 0, transpose_32sC3, 0, 0, 0, transpose_32sC4, + 0, 0, 0, 0, 0, 0, 0, transpose_32sC6, 0, 0, 0, 0, 0, 0, 0, transpose_32sC8 +}; + +static TransposeInplaceFunc transposeInplaceTab[] = +{ + 0, transposeI_8u, transposeI_16u, transposeI_8uC3, transposeI_32s, 0, transposeI_16uC3, 0, + transposeI_32sC2, 0, 0, 0, transposeI_32sC3, 0, 0, 0, transposeI_32sC4, + 0, 0, 0, 0, 0, 0, 0, transposeI_32sC6, 0, 0, 0, 0, 0, 0, 0, transposeI_32sC8 +}; + +} + +void cv::transpose( InputArray _src, OutputArray _dst ) +{ + Mat src = _src.getMat(); + size_t esz = src.elemSize(); + CV_Assert( src.dims <= 2 && esz <= (size_t)32 ); + + _dst.create(src.cols, src.rows, src.type()); + Mat dst = _dst.getMat(); + + if( dst.data == src.data ) + { + TransposeInplaceFunc func = transposeInplaceTab[esz]; + CV_Assert( func != 0 ); + func( dst.data, dst.step, dst.rows ); + } + else + { + TransposeFunc func = transposeTab[esz]; + CV_Assert( func != 0 ); + func( src.data, src.step, dst.data, dst.step, src.size() ); + } +} + + +void cv::completeSymm( InputOutputArray _m, bool LtoR ) +{ + Mat m = _m.getMat(); + CV_Assert( m.dims <= 2 ); + + int i, j, nrows = m.rows, type = m.type(); + int j0 = 0, j1 = nrows; + CV_Assert( m.rows == m.cols ); + + if( type == CV_32FC1 || type == CV_32SC1 ) + { + int* data = (int*)m.data; + size_t step = m.step/sizeof(data[0]); + for( i = 0; i < nrows; i++ ) + { + if( !LtoR ) j1 = i; else j0 = i+1; + for( j = j0; j < j1; j++ ) + data[i*step + j] = data[j*step + i]; + } + } + else if( type == CV_64FC1 ) + { + double* data = (double*)m.data; + size_t step = m.step/sizeof(data[0]); + for( i = 0; i < nrows; i++ ) + { + if( !LtoR ) j1 = i; else j0 = i+1; + for( j = j0; j < j1; j++ ) + data[i*step + j] = data[j*step + i]; + } + } + else + CV_Error( CV_StsUnsupportedFormat, "" ); +} + + +cv::Mat cv::Mat::cross(InputArray _m) const +{ + Mat m = _m.getMat(); + int t = type(), d = CV_MAT_DEPTH(t); + CV_Assert( dims <= 2 && m.dims <= 2 && size() == m.size() && t == m.type() && + ((rows == 3 && cols == 1) || (cols*channels() == 3 && rows == 1))); + Mat result(rows, cols, t); + + if( d == CV_32F ) + { + const float *a = (const float*)data, *b = (const float*)m.data; + float* c = (float*)result.data; + size_t lda = rows > 1 ? step/sizeof(a[0]) : 1; + size_t ldb = rows > 1 ? m.step/sizeof(b[0]) : 1; + + c[0] = a[lda] * b[ldb*2] - a[lda*2] * b[ldb]; + c[1] = a[lda*2] * b[0] - a[0] * b[ldb*2]; + c[2] = a[0] * b[ldb] - a[lda] * b[0]; + } + else if( d == CV_64F ) + { + const double *a = (const double*)data, *b = (const double*)m.data; + double* c = (double*)result.data; + size_t lda = rows > 1 ? step/sizeof(a[0]) : 1; + size_t ldb = rows > 1 ? m.step/sizeof(b[0]) : 1; + + c[0] = a[lda] * b[ldb*2] - a[lda*2] * b[ldb]; + c[1] = a[lda*2] * b[0] - a[0] * b[ldb*2]; + c[2] = a[0] * b[ldb] - a[lda] * b[0]; + } + + return result; +} + + +////////////////////////////////////////// reduce //////////////////////////////////////////// + +namespace cv +{ + +template static void +reduceR_( const Mat& srcmat, Mat& dstmat ) +{ + typedef typename Op::rtype WT; + Size size = srcmat.size(); + size.width *= srcmat.channels(); + AutoBuffer buffer(size.width); + WT* buf = buffer; + ST* dst = (ST*)dstmat.data; + const T* src = (const T*)srcmat.data; + size_t srcstep = srcmat.step/sizeof(src[0]); + int i; + Op op; + + for( i = 0; i < size.width; i++ ) + buf[i] = src[i]; + + for( ; --size.height; ) + { + src += srcstep; + for( i = 0; i <= size.width - 4; i += 4 ) + { + WT s0, s1; + s0 = op(buf[i], (WT)src[i]); + s1 = op(buf[i+1], (WT)src[i+1]); + buf[i] = s0; buf[i+1] = s1; + + s0 = op(buf[i+2], (WT)src[i+2]); + s1 = op(buf[i+3], (WT)src[i+3]); + buf[i+2] = s0; buf[i+3] = s1; + } + + for( ; i < size.width; i++ ) + buf[i] = op(buf[i], (WT)src[i]); + } + + for( i = 0; i < size.width; i++ ) + dst[i] = (ST)buf[i]; +} + + +template static void +reduceC_( const Mat& srcmat, Mat& dstmat ) +{ + typedef typename Op::rtype WT; + Size size = srcmat.size(); + int i, k, cn = srcmat.channels(); + size.width *= cn; + Op op; + + for( int y = 0; y < size.height; y++ ) + { + const T* src = (const T*)(srcmat.data + srcmat.step*y); + ST* dst = (ST*)(dstmat.data + dstmat.step*y); + if( size.width == cn ) + for( k = 0; k < cn; k++ ) + dst[k] = src[k]; + else + { + for( k = 0; k < cn; k++ ) + { + WT a0 = src[k], a1 = src[k+cn]; + for( i = 2*cn; i <= size.width - 4*cn; i += 4*cn ) + { + a0 = op(a0, (WT)src[i+k]); + a1 = op(a1, (WT)src[i+k+cn]); + a0 = op(a0, (WT)src[i+k+cn*2]); + a1 = op(a1, (WT)src[i+k+cn*3]); + } + + for( ; i < size.width; i += cn ) + { + a0 = op(a0, (WT)src[i]); + } + a0 = op(a0, a1); + dst[k] = (ST)a0; + } + } + } +} + +typedef void (*ReduceFunc)( const Mat& src, Mat& dst ); + +} + +void cv::reduce(InputArray _src, OutputArray _dst, int dim, int op, int dtype) +{ + Mat src = _src.getMat(); + CV_Assert( src.dims <= 2 ); + int op0 = op; + int stype = src.type(), sdepth = src.depth(), cn = src.channels(); + if( dtype < 0 ) + dtype = _dst.fixedType() ? _dst.type() : stype; + int ddepth = CV_MAT_DEPTH(dtype); + + _dst.create(dim == 0 ? 1 : src.rows, dim == 0 ? src.cols : 1, + CV_MAKETYPE(dtype >= 0 ? dtype : stype, cn)); + Mat dst = _dst.getMat(), temp = dst; + + CV_Assert( op == CV_REDUCE_SUM || op == CV_REDUCE_MAX || + op == CV_REDUCE_MIN || op == CV_REDUCE_AVG ); + CV_Assert( src.channels() == dst.channels() ); + + if( op == CV_REDUCE_AVG ) + { + op = CV_REDUCE_SUM; + if( sdepth < CV_32S && ddepth < CV_32S ) + { + temp.create(dst.rows, dst.cols, CV_32SC(cn)); + ddepth = CV_32S; + } + } + + ReduceFunc func = 0; + if( dim == 0 ) + { + if( op == CV_REDUCE_SUM ) + { + if(sdepth == CV_8U && ddepth == CV_32S) + func = reduceR_ >; + else if(sdepth == CV_8U && ddepth == CV_32F) + func = reduceR_ >; + else if(sdepth == CV_8U && ddepth == CV_64F) + func = reduceR_ >; + else if(sdepth == CV_16U && ddepth == CV_32F) + func = reduceR_ >; + else if(sdepth == CV_16U && ddepth == CV_64F) + func = reduceR_ >; + else if(sdepth == CV_16S && ddepth == CV_32F) + func = reduceR_ >; + else if(sdepth == CV_16S && ddepth == CV_64F) + func = reduceR_ >; + else if(sdepth == CV_32F && ddepth == CV_32F) + func = reduceR_ >; + else if(sdepth == CV_32F && ddepth == CV_64F) + func = reduceR_ >; + else if(sdepth == CV_64F && ddepth == CV_64F) + func = reduceR_ >; + } + else if(op == CV_REDUCE_MAX) + { + if(sdepth == CV_8U && ddepth == CV_8U) + func = reduceR_ >; + else if(sdepth == CV_16U && ddepth == CV_16U) + func = reduceR_ >; + else if(sdepth == CV_16S && ddepth == CV_16S) + func = reduceR_ >; + else if(sdepth == CV_32F && ddepth == CV_32F) + func = reduceR_ >; + else if(sdepth == CV_64F && ddepth == CV_64F) + func = reduceR_ >; + } + else if(op == CV_REDUCE_MIN) + { + if(sdepth == CV_8U && ddepth == CV_8U) + func = reduceR_ >; + else if(sdepth == CV_16U && ddepth == CV_16U) + func = reduceR_ >; + else if(sdepth == CV_16S && ddepth == CV_16S) + func = reduceR_ >; + else if(sdepth == CV_32F && ddepth == CV_32F) + func = reduceR_ >; + else if(sdepth == CV_64F && ddepth == CV_64F) + func = reduceR_ >; + } + } + else + { + if(op == CV_REDUCE_SUM) + { + if(sdepth == CV_8U && ddepth == CV_32S) + func = reduceC_ >; + else if(sdepth == CV_8U && ddepth == CV_32F) + func = reduceC_ >; + else if(sdepth == CV_8U && ddepth == CV_64F) + func = reduceC_ >; + else if(sdepth == CV_16U && ddepth == CV_32F) + func = reduceC_ >; + else if(sdepth == CV_16U && ddepth == CV_64F) + func = reduceC_ >; + else if(sdepth == CV_16S && ddepth == CV_32F) + func = reduceC_ >; + else if(sdepth == CV_16S && ddepth == CV_64F) + func = reduceC_ >; + else if(sdepth == CV_32F && ddepth == CV_32F) + func = reduceC_ >; + else if(sdepth == CV_32F && ddepth == CV_64F) + func = reduceC_ >; + else if(sdepth == CV_64F && ddepth == CV_64F) + func = reduceC_ >; + } + else if(op == CV_REDUCE_MAX) + { + if(sdepth == CV_8U && ddepth == CV_8U) + func = reduceC_ >; + else if(sdepth == CV_16U && ddepth == CV_16U) + func = reduceC_ >; + else if(sdepth == CV_16S && ddepth == CV_16S) + func = reduceC_ >; + else if(sdepth == CV_32F && ddepth == CV_32F) + func = reduceC_ >; + else if(sdepth == CV_64F && ddepth == CV_64F) + func = reduceC_ >; + } + else if(op == CV_REDUCE_MIN) + { + if(sdepth == CV_8U && ddepth == CV_8U) + func = reduceC_ >; + else if(sdepth == CV_16U && ddepth == CV_16U) + func = reduceC_ >; + else if(sdepth == CV_16S && ddepth == CV_16S) + func = reduceC_ >; + else if(sdepth == CV_32F && ddepth == CV_32F) + func = reduceC_ >; + else if(sdepth == CV_64F && ddepth == CV_64F) + func = reduceC_ >; + } + } + + if( !func ) + CV_Error( CV_StsUnsupportedFormat, + "Unsupported combination of input and output array formats" ); + + func( src, temp ); + + if( op0 == CV_REDUCE_AVG ) + temp.convertTo(dst, dst.type(), 1./(dim == 0 ? src.rows : src.cols)); +} + + +//////////////////////////////////////// sort /////////////////////////////////////////// + +namespace cv +{ + +template static void sort_( const Mat& src, Mat& dst, int flags ) +{ + AutoBuffer buf; + T* bptr; + int i, j, n, len; + bool sortRows = (flags & 1) == CV_SORT_EVERY_ROW; + bool inplace = src.data == dst.data; + bool sortDescending = (flags & CV_SORT_DESCENDING) != 0; + + if( sortRows ) + n = src.rows, len = src.cols; + else + { + n = src.cols, len = src.rows; + buf.allocate(len); + } + bptr = (T*)buf; + + for( i = 0; i < n; i++ ) + { + T* ptr = bptr; + if( sortRows ) + { + T* dptr = (T*)(dst.data + dst.step*i); + if( !inplace ) + { + const T* sptr = (const T*)(src.data + src.step*i); + for( j = 0; j < len; j++ ) + dptr[j] = sptr[j]; + } + ptr = dptr; + } + else + { + for( j = 0; j < len; j++ ) + ptr[j] = ((const T*)(src.data + src.step*j))[i]; + } + std::sort( ptr, ptr + len, LessThan() ); + if( sortDescending ) + for( j = 0; j < len/2; j++ ) + std::swap(ptr[j], ptr[len-1-j]); + if( !sortRows ) + for( j = 0; j < len; j++ ) + ((T*)(dst.data + dst.step*j))[i] = ptr[j]; + } +} + + +template static void sortIdx_( const Mat& src, Mat& dst, int flags ) +{ + AutoBuffer buf; + AutoBuffer ibuf; + T* bptr; + int* _iptr; + int i, j, n, len; + bool sortRows = (flags & 1) == CV_SORT_EVERY_ROW; + bool sortDescending = (flags & CV_SORT_DESCENDING) != 0; + + CV_Assert( src.data != dst.data ); + + if( sortRows ) + n = src.rows, len = src.cols; + else + { + n = src.cols, len = src.rows; + buf.allocate(len); + ibuf.allocate(len); + } + bptr = (T*)buf; + _iptr = (int*)ibuf; + + for( i = 0; i < n; i++ ) + { + T* ptr = bptr; + int* iptr = _iptr; + + if( sortRows ) + { + ptr = (T*)(src.data + src.step*i); + iptr = (int*)(dst.data + dst.step*i); + } + else + { + for( j = 0; j < len; j++ ) + ptr[j] = ((const T*)(src.data + src.step*j))[i]; + } + for( j = 0; j < len; j++ ) + iptr[j] = j; + std::sort( iptr, iptr + len, LessThanIdx(ptr) ); + if( sortDescending ) + for( j = 0; j < len/2; j++ ) + std::swap(iptr[j], iptr[len-1-j]); + if( !sortRows ) + for( j = 0; j < len; j++ ) + ((int*)(dst.data + dst.step*j))[i] = iptr[j]; + } +} + +typedef void (*SortFunc)(const Mat& src, Mat& dst, int flags); + +} + +void cv::sort( InputArray _src, OutputArray _dst, int flags ) +{ + static SortFunc tab[] = + { + sort_, sort_, sort_, sort_, + sort_, sort_, sort_, 0 + }; + Mat src = _src.getMat(); + SortFunc func = tab[src.depth()]; + CV_Assert( src.dims <= 2 && src.channels() == 1 && func != 0 ); + _dst.create( src.size(), src.type() ); + Mat dst = _dst.getMat(); + func( src, dst, flags ); +} + +void cv::sortIdx( InputArray _src, OutputArray _dst, int flags ) +{ + static SortFunc tab[] = + { + sortIdx_, sortIdx_, sortIdx_, sortIdx_, + sortIdx_, sortIdx_, sortIdx_, 0 + }; + Mat src = _src.getMat(); + SortFunc func = tab[src.depth()]; + CV_Assert( src.dims <= 2 && src.channels() == 1 && func != 0 ); + + Mat dst = _dst.getMat(); + if( dst.data == src.data ) + _dst.release(); + _dst.create( src.size(), CV_32S ); + dst = _dst.getMat(); + func( src, dst, flags ); +} + + +////////////////////////////////////////// kmeans //////////////////////////////////////////// + +namespace cv +{ + +static void generateRandomCenter(const vector& box, float* center, RNG& rng) +{ + size_t j, dims = box.size(); + float margin = 1.f/dims; + for( j = 0; j < dims; j++ ) + center[j] = ((float)rng*(1.f+margin*2.f)-margin)*(box[j][1] - box[j][0]) + box[j][0]; +} + + +static inline float distance(const float* a, const float* b, int n) +{ + int j = 0; float d = 0.f; +#if CV_SSE + if( USE_SSE2 ) + { + float CV_DECL_ALIGNED(16) buf[4]; + __m128 d0 = _mm_setzero_ps(), d1 = _mm_setzero_ps(); + + for( ; j <= n - 8; j += 8 ) + { + __m128 t0 = _mm_sub_ps(_mm_loadu_ps(a + j), _mm_loadu_ps(b + j)); + __m128 t1 = _mm_sub_ps(_mm_loadu_ps(a + j + 4), _mm_loadu_ps(b + j + 4)); + d0 = _mm_add_ps(d0, _mm_mul_ps(t0, t0)); + d1 = _mm_add_ps(d1, _mm_mul_ps(t1, t1)); + } + _mm_store_ps(buf, _mm_add_ps(d0, d1)); + d = buf[0] + buf[1] + buf[2] + buf[3]; + } + else +#endif + { + for( ; j <= n - 4; j += 4 ) + { + float t0 = a[j] - b[j], t1 = a[j+1] - b[j+1], t2 = a[j+2] - b[j+2], t3 = a[j+3] - b[j+3]; + d += t0*t0 + t1*t1 + t2*t2 + t3*t3; + } + } + + for( ; j < n; j++ ) + { + float t = a[j] - b[j]; + d += t*t; + } + return d; +} + +/* +k-means center initialization using the following algorithm: +Arthur & Vassilvitskii (2007) k-means++: The Advantages of Careful Seeding +*/ +static void generateCentersPP(const Mat& _data, Mat& _out_centers, + int K, RNG& rng, int trials) +{ + int i, j, k, dims = _data.cols, N = _data.rows; + const float* data = _data.ptr(0); + size_t step = _data.step/sizeof(data[0]); + vector _centers(K); + int* centers = &_centers[0]; + vector _dist(N*3); + float* dist = &_dist[0], *tdist = dist + N, *tdist2 = tdist + N; + double sum0 = 0; + + centers[0] = (unsigned)rng % N; + + for( i = 0; i < N; i++ ) + { + dist[i] = distance(data + step*i, data + step*centers[0], dims); + sum0 += dist[i]; + } + + for( k = 1; k < K; k++ ) + { + double bestSum = DBL_MAX; + int bestCenter = -1; + + for( j = 0; j < trials; j++ ) + { + double p = (double)rng*sum0, s = 0; + for( i = 0; i < N-1; i++ ) + if( (p -= dist[i]) <= 0 ) + break; + int ci = i; + for( i = 0; i < N; i++ ) + { + tdist2[i] = std::min(distance(data + step*i, data + step*ci, dims), dist[i]); + s += tdist2[i]; + } + + if( s < bestSum ) + { + bestSum = s; + bestCenter = ci; + std::swap(tdist, tdist2); + } + } + centers[k] = bestCenter; + sum0 = bestSum; + std::swap(dist, tdist); + } + + for( k = 0; k < K; k++ ) + { + const float* src = data + step*centers[k]; + float* dst = _out_centers.ptr(k); + for( j = 0; j < dims; j++ ) + dst[j] = src[j]; + } +} + +} + +double cv::kmeans( InputArray _data, int K, + InputOutputArray _bestLabels, + TermCriteria criteria, int attempts, + int flags, OutputArray _centers ) +{ + const int SPP_TRIALS = 3; + Mat data = _data.getMat(); + int N = data.rows > 1 ? data.rows : data.cols; + int dims = (data.rows > 1 ? data.cols : 1)*data.channels(); + int type = data.depth(); + + attempts = std::max(attempts, 1); + CV_Assert( data.dims <= 2 && type == CV_32F && K > 0 ); + + _bestLabels.create(N, 1, CV_32S, -1, true); + + Mat _labels, best_labels = _bestLabels.getMat(); + if( flags & CV_KMEANS_USE_INITIAL_LABELS ) + { + CV_Assert( (best_labels.cols == 1 || best_labels.rows == 1) && + best_labels.cols*best_labels.rows == N && + best_labels.type() == CV_32S && + best_labels.isContinuous()); + best_labels.copyTo(_labels); + } + else + { + if( !((best_labels.cols == 1 || best_labels.rows == 1) && + best_labels.cols*best_labels.rows == N && + best_labels.type() == CV_32S && + best_labels.isContinuous())) + best_labels.create(N, 1, CV_32S); + _labels.create(best_labels.size(), best_labels.type()); + } + int* labels = _labels.ptr(); + + Mat centers(K, dims, type), old_centers(K, dims, type); + vector counters(K); + vector _box(dims); + Vec2f* box = &_box[0]; + + double best_compactness = DBL_MAX, compactness = 0; + RNG& rng = theRNG(); + int a, iter, i, j, k; + + if( criteria.type & TermCriteria::EPS ) + criteria.epsilon = std::max(criteria.epsilon, 0.); + else + criteria.epsilon = FLT_EPSILON; + criteria.epsilon *= criteria.epsilon; + + if( criteria.type & TermCriteria::COUNT ) + criteria.maxCount = std::min(std::max(criteria.maxCount, 2), 100); + else + criteria.maxCount = 100; + + if( K == 1 ) + { + attempts = 1; + criteria.maxCount = 2; + } + + const float* sample = data.ptr(0); + for( j = 0; j < dims; j++ ) + box[j] = Vec2f(sample[j], sample[j]); + + for( i = 1; i < N; i++ ) + { + sample = data.ptr(i); + for( j = 0; j < dims; j++ ) + { + float v = sample[j]; + box[j][0] = std::min(box[j][0], v); + box[j][1] = std::max(box[j][1], v); + } + } + + for( a = 0; a < attempts; a++ ) + { + double max_center_shift = DBL_MAX; + for( iter = 0; iter < criteria.maxCount && max_center_shift > criteria.epsilon; iter++ ) + { + swap(centers, old_centers); + + if( iter == 0 && (a > 0 || !(flags & KMEANS_USE_INITIAL_LABELS)) ) + { + if( flags & KMEANS_PP_CENTERS ) + generateCentersPP(data, centers, K, rng, SPP_TRIALS); + else + { + for( k = 0; k < K; k++ ) + generateRandomCenter(_box, centers.ptr(k), rng); + } + } + else + { + if( iter == 0 && a == 0 && (flags & KMEANS_USE_INITIAL_LABELS) ) + { + for( i = 0; i < N; i++ ) + CV_Assert( (unsigned)labels[i] < (unsigned)K ); + } + + // compute centers + centers = Scalar(0); + for( k = 0; k < K; k++ ) + counters[k] = 0; + + for( i = 0; i < N; i++ ) + { + sample = data.ptr(i); + k = labels[i]; + float* center = centers.ptr(k); + for( j = 0; j <= dims - 4; j += 4 ) + { + float t0 = center[j] + sample[j]; + float t1 = center[j+1] + sample[j+1]; + + center[j] = t0; + center[j+1] = t1; + + t0 = center[j+2] + sample[j+2]; + t1 = center[j+3] + sample[j+3]; + + center[j+2] = t0; + center[j+3] = t1; + } + for( ; j < dims; j++ ) + center[j] += sample[j]; + counters[k]++; + } + + if( iter > 0 ) + max_center_shift = 0; + + for( k = 0; k < K; k++ ) + { + float* center = centers.ptr(k); + if( counters[k] != 0 ) + { + float scale = 1.f/counters[k]; + for( j = 0; j < dims; j++ ) + center[j] *= scale; + } + else + generateRandomCenter(_box, center, rng); + + if( iter > 0 ) + { + double dist = 0; + const float* old_center = old_centers.ptr(k); + for( j = 0; j < dims; j++ ) + { + double t = center[j] - old_center[j]; + dist += t*t; + } + max_center_shift = std::max(max_center_shift, dist); + } + } + } + + // assign labels + compactness = 0; + for( i = 0; i < N; i++ ) + { + sample = data.ptr(i); + int k_best = 0; + double min_dist = DBL_MAX; + + for( k = 0; k < K; k++ ) + { + const float* center = centers.ptr(k); + double dist = distance(sample, center, dims); + + if( min_dist > dist ) + { + min_dist = dist; + k_best = k; + } + } + + compactness += min_dist; + labels[i] = k_best; + } + } + + if( compactness < best_compactness ) + { + best_compactness = compactness; + if( _centers.needed() ) + centers.copyTo(_centers); + _labels.copyTo(best_labels); + } + } + + return best_compactness; +} + + +CV_IMPL void cvSetIdentity( CvArr* arr, CvScalar value ) +{ + cv::Mat m = cv::cvarrToMat(arr); + cv::setIdentity(m, value); +} + + +CV_IMPL CvScalar cvTrace( const CvArr* arr ) +{ + return cv::trace(cv::cvarrToMat(arr)); +} + + +CV_IMPL void cvTranspose( const CvArr* srcarr, CvArr* dstarr ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + + CV_Assert( src.rows == dst.cols && src.cols == dst.rows && src.type() == dst.type() ); + transpose( src, dst ); +} + + +CV_IMPL void cvCompleteSymm( CvMat* matrix, int LtoR ) +{ + cv::Mat m(matrix); + cv::completeSymm( m, LtoR != 0 ); +} + + +CV_IMPL void cvCrossProduct( const CvArr* srcAarr, const CvArr* srcBarr, CvArr* dstarr ) +{ + cv::Mat srcA = cv::cvarrToMat(srcAarr), dst = cv::cvarrToMat(dstarr); + + CV_Assert( srcA.size() == dst.size() && srcA.type() == dst.type() ); + srcA.cross(cv::cvarrToMat(srcBarr)).copyTo(dst); +} + + +CV_IMPL void +cvReduce( const CvArr* srcarr, CvArr* dstarr, int dim, int op ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + + if( dim < 0 ) + dim = src.rows > dst.rows ? 0 : src.cols > dst.cols ? 1 : dst.cols == 1; + + if( dim > 1 ) + CV_Error( CV_StsOutOfRange, "The reduced dimensionality index is out of range" ); + + if( (dim == 0 && (dst.cols != src.cols || dst.rows != 1)) || + (dim == 1 && (dst.rows != src.rows || dst.cols != 1)) ) + CV_Error( CV_StsBadSize, "The output array size is incorrect" ); + + if( src.channels() != dst.channels() ) + CV_Error( CV_StsUnmatchedFormats, "Input and output arrays must have the same number of channels" ); + + cv::reduce(src, dst, dim, op, dst.type()); +} + + +CV_IMPL CvArr* +cvRange( CvArr* arr, double start, double end ) +{ + int ok = 0; + + CvMat stub, *mat = (CvMat*)arr; + double delta; + int type, step; + double val = start; + int i, j; + int rows, cols; + + if( !CV_IS_MAT(mat) ) + mat = cvGetMat( mat, &stub); + + rows = mat->rows; + cols = mat->cols; + type = CV_MAT_TYPE(mat->type); + delta = (end-start)/(rows*cols); + + if( CV_IS_MAT_CONT(mat->type) ) + { + cols *= rows; + rows = 1; + step = 1; + } + else + step = mat->step / CV_ELEM_SIZE(type); + + if( type == CV_32SC1 ) + { + int* idata = mat->data.i; + int ival = cvRound(val), idelta = cvRound(delta); + + if( fabs(val - ival) < DBL_EPSILON && + fabs(delta - idelta) < DBL_EPSILON ) + { + for( i = 0; i < rows; i++, idata += step ) + for( j = 0; j < cols; j++, ival += idelta ) + idata[j] = ival; + } + else + { + for( i = 0; i < rows; i++, idata += step ) + for( j = 0; j < cols; j++, val += delta ) + idata[j] = cvRound(val); + } + } + else if( type == CV_32FC1 ) + { + float* fdata = mat->data.fl; + for( i = 0; i < rows; i++, fdata += step ) + for( j = 0; j < cols; j++, val += delta ) + fdata[j] = (float)val; + } + else + CV_Error( CV_StsUnsupportedFormat, "The function only supports 32sC1 and 32fC1 datatypes" ); + + ok = 1; + return ok ? arr : 0; +} + + +CV_IMPL void +cvSort( const CvArr* _src, CvArr* _dst, CvArr* _idx, int flags ) +{ + cv::Mat src = cv::cvarrToMat(_src), dst, idx; + + if( _idx ) + { + cv::Mat idx0 = cv::cvarrToMat(_idx), idx = idx0; + CV_Assert( src.size() == idx.size() && idx.type() == CV_32S && src.data != idx.data ); + cv::sortIdx( src, idx, flags ); + CV_Assert( idx0.data == idx.data ); + } + + if( _dst ) + { + cv::Mat dst0 = cv::cvarrToMat(_dst), dst = dst0; + CV_Assert( src.size() == dst.size() && src.type() == dst.type() ); + cv::sort( src, dst, flags ); + CV_Assert( dst0.data == dst.data ); + } +} + + +CV_IMPL int +cvKMeans2( const CvArr* _samples, int cluster_count, CvArr* _labels, + CvTermCriteria termcrit, int attempts, CvRNG*, + int flags, CvArr* _centers, double* _compactness ) +{ + cv::Mat data = cv::cvarrToMat(_samples), labels = cv::cvarrToMat(_labels), centers; + if( _centers ) + { + centers = cv::cvarrToMat(_centers); + centers = centers.reshape(1); + } + CV_Assert( labels.isContinuous() && labels.type() == CV_32S && + (labels.cols == 1 || labels.rows == 1) && + labels.cols + labels.rows - 1 == data.rows ); + + double compactness = cv::kmeans(data, cluster_count, labels, termcrit, attempts, + flags, _centers ? cv::_OutputArray(centers) : cv::_OutputArray() ); + if( _compactness ) + *_compactness = compactness; + return 1; +} + +///////////////////////////// n-dimensional matrices //////////////////////////// + +namespace cv +{ + +Mat Mat::reshape(int, int, const int*) const +{ + CV_Error(CV_StsNotImplemented, ""); + // TBD + return Mat(); +} + +Mat::operator CvMatND() const +{ + CvMatND mat; + cvInitMatNDHeader( &mat, dims, size, type(), data ); + int i, d = dims; + for( i = 0; i < d; i++ ) + mat.dim[i].step = (int)step[i]; + mat.type |= flags & CONTINUOUS_FLAG; + return mat; +} + +NAryMatIterator::NAryMatIterator() + : arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0) +{ +} + +NAryMatIterator::NAryMatIterator(const Mat** _arrays, Mat* _planes, int _narrays) +: arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0) +{ + init(_arrays, _planes, 0, _narrays); +} + +NAryMatIterator::NAryMatIterator(const Mat** _arrays, uchar** _ptrs, int _narrays) + : arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0) +{ + init(_arrays, 0, _ptrs, _narrays); +} + +void NAryMatIterator::init(const Mat** _arrays, Mat* _planes, uchar** _ptrs, int _narrays) +{ + CV_Assert( _arrays && (_ptrs || _planes) ); + int i, j, d1=0, i0 = -1, d = -1; + + arrays = _arrays; + ptrs = _ptrs; + planes = _planes; + narrays = _narrays; + nplanes = 0; + size = 0; + + if( narrays < 0 ) + { + for( i = 0; _arrays[i] != 0; i++ ) + ; + narrays = i; + CV_Assert(narrays <= 1000); + } + + iterdepth = 0; + + for( i = 0; i < narrays; i++ ) + { + CV_Assert(arrays[i] != 0); + const Mat& A = *arrays[i]; + if( ptrs ) + ptrs[i] = A.data; + + if( !A.data ) + continue; + + if( i0 < 0 ) + { + i0 = i; + d = A.dims; + + // find the first dimensionality which is different from 1; + // in any of the arrays the first "d1" step do not affect the continuity + for( d1 = 0; d1 < d; d1++ ) + if( A.size[d1] > 1 ) + break; + } + else + CV_Assert( A.size == arrays[i0]->size ); + + if( !A.isContinuous() ) + { + CV_Assert( A.step[d-1] == A.elemSize() ); + for( j = d-1; j > d1; j-- ) + if( A.step[j]*A.size[j] < A.step[j-1] ) + break; + iterdepth = std::max(iterdepth, j); + } + } + + if( i0 >= 0 ) + { + size = arrays[i0]->size[d-1]; + for( j = d-1; j > iterdepth; j-- ) + { + int64 total1 = (int64)size*arrays[i0]->size[j-1]; + if( total1 != (int)total1 ) + break; + size = (int)total1; + } + + iterdepth = j; + if( iterdepth == d1 ) + iterdepth = 0; + + nplanes = 1; + for( j = iterdepth-1; j >= 0; j-- ) + nplanes *= arrays[i0]->size[j]; + } + else + iterdepth = 0; + + idx = 0; + + if( !planes ) + return; + + for( i = 0; i < narrays; i++ ) + { + CV_Assert(arrays[i] != 0); + const Mat& A = *arrays[i]; + + if( !A.data ) + { + planes[i] = Mat(); + continue; + } + + planes[i] = Mat(1, (int)size, A.type(), A.data); + } +} + + +NAryMatIterator& NAryMatIterator::operator ++() +{ + if( idx >= nplanes-1 ) + return *this; + ++idx; + + if( iterdepth == 1 ) + { + if( ptrs ) + { + for( int i = 0; i < narrays; i++ ) + { + if( !ptrs[i] ) + continue; + ptrs[i] = arrays[i]->data + arrays[i]->step[0]*idx; + } + } + if( planes ) + { + for( int i = 0; i < narrays; i++ ) + { + if( !planes[i].data ) + continue; + planes[i].data = arrays[i]->data + arrays[i]->step[0]*idx; + } + } + } + else + { + for( int i = 0; i < narrays; i++ ) + { + const Mat& A = *arrays[i]; + if( !A.data ) + continue; + int _idx = (int)idx; + uchar* data = A.data; + for( int j = iterdepth-1; j >= 0 && _idx > 0; j-- ) + { + int szi = A.size[j], t = _idx/szi; + data += (_idx - t * szi)*A.step[j]; + _idx = t; + } + if( ptrs ) + ptrs[i] = data; + if( planes ) + planes[i].data = data; + } + } + + return *this; +} + +NAryMatIterator NAryMatIterator::operator ++(int) +{ + NAryMatIterator it = *this; + ++*this; + return it; +} + +/////////////////////////////////////////////////////////////////////////// +// MatConstIterator // +/////////////////////////////////////////////////////////////////////////// + +Point MatConstIterator::pos() const +{ + if( !m ) + return Point(); + CV_DbgAssert(m->dims <= 2); + + ptrdiff_t ofs = ptr - m->data; + int y = (int)(ofs/m->step[0]); + return Point((int)((ofs - y*m->step[0])/elemSize), y); +} + +void MatConstIterator::pos(int* _idx) const +{ + CV_Assert(m != 0 && _idx); + ptrdiff_t ofs = ptr - m->data; + for( int i = 0; i < m->dims; i++ ) + { + size_t s = m->step[i], v = ofs/s; + ofs -= v*s; + _idx[i] = (int)v; + } +} + +ptrdiff_t MatConstIterator::lpos() const +{ + if(!m) + return 0; + if( m->isContinuous() ) + return (ptr - sliceStart)/elemSize; + ptrdiff_t ofs = ptr - m->data; + int i, d = m->dims; + if( d == 2 ) + { + ptrdiff_t y = ofs/m->step[0]; + return y*m->cols + (ofs - y*m->step[0])/elemSize; + } + ptrdiff_t result = 0; + for( i = 0; i < d; i++ ) + { + size_t s = m->step[i], v = ofs/s; + ofs -= v*s; + result = result*m->size[i] + v; + } + return result; +} + +void MatConstIterator::seek(ptrdiff_t ofs, bool relative) +{ + if( m->isContinuous() ) + { + ptr = (relative ? ptr : sliceStart) + ofs*elemSize; + if( ptr < sliceStart ) + ptr = sliceStart; + else if( ptr > sliceEnd ) + ptr = sliceEnd; + return; + } + + int d = m->dims; + if( d == 2 ) + { + ptrdiff_t ofs0, y; + if( relative ) + { + ofs0 = ptr - m->data; + y = ofs0/m->step[0]; + ofs += y*m->cols + (ofs0 - y*m->step[0])/elemSize; + } + y = ofs/m->cols; + int y1 = std::min(std::max((int)y, 0), m->rows-1); + sliceStart = m->data + y1*m->step[0]; + sliceEnd = sliceStart + m->cols*elemSize; + ptr = y < 0 ? sliceStart : y >= m->rows ? sliceEnd : + sliceStart + (ofs - y*m->cols)*elemSize; + return; + } + + if( relative ) + ofs += lpos(); + + if( ofs < 0 ) + ofs = 0; + + int szi = m->size[d-1]; + ptrdiff_t t = ofs/szi; + int v = (int)(ofs - t*szi); + ofs = t; + ptr = m->data + v*elemSize; + sliceStart = m->data; + + for( int i = d-2; i >= 0; i-- ) + { + szi = m->size[i]; + t = ofs/szi; + v = (int)(ofs - t*szi); + ofs = t; + sliceStart += v*m->step[i]; + } + + sliceEnd = sliceStart + m->size[d-1]*elemSize; + if( ofs > 0 ) + ptr = sliceEnd; + else + ptr = sliceStart + (ptr - m->data); +} + +void MatConstIterator::seek(const int* _idx, bool relative) +{ + int i, d = m->dims; + ptrdiff_t ofs = 0; + if( !_idx ) + ; + else if( d == 2 ) + ofs = _idx[0]*m->size[1] + _idx[1]; + else + { + for( i = 0; i < d; i++ ) + ofs = ofs*m->size[i] + _idx[i]; + } + seek(ofs, relative); +} + +ptrdiff_t operator - (const MatConstIterator& b, const MatConstIterator& a) +{ + if( a.m != b.m ) + return INT_MAX; + if( a.sliceEnd == b.sliceEnd ) + return (b.ptr - a.ptr)/b.elemSize; + + return b.lpos() - a.lpos(); +} + +//////////////////////////////// SparseMat //////////////////////////////// + +template void +convertData_(const void* _from, void* _to, int cn) +{ + const T1* from = (const T1*)_from; + T2* to = (T2*)_to; + if( cn == 1 ) + *to = saturate_cast(*from); + else + for( int i = 0; i < cn; i++ ) + to[i] = saturate_cast(from[i]); +} + +template void +convertScaleData_(const void* _from, void* _to, int cn, double alpha, double beta) +{ + const T1* from = (const T1*)_from; + T2* to = (T2*)_to; + if( cn == 1 ) + *to = saturate_cast(*from*alpha + beta); + else + for( int i = 0; i < cn; i++ ) + to[i] = saturate_cast(from[i]*alpha + beta); +} + +ConvertData getConvertData(int fromType, int toType) +{ + static ConvertData tab[][8] = + {{ convertData_, convertData_, + convertData_, convertData_, + convertData_, convertData_, + convertData_, 0 }, + + { convertData_, convertData_, + convertData_, convertData_, + convertData_, convertData_, + convertData_, 0 }, + + { convertData_, convertData_, + convertData_, convertData_, + convertData_, convertData_, + convertData_, 0 }, + + { convertData_, convertData_, + convertData_, convertData_, + convertData_, convertData_, + convertData_, 0 }, + + { convertData_, convertData_, + convertData_, convertData_, + convertData_, convertData_, + convertData_, 0 }, + + { convertData_, convertData_, + convertData_, convertData_, + convertData_, convertData_, + convertData_, 0 }, + + { convertData_, convertData_, + convertData_, convertData_, + convertData_, convertData_, + convertData_, 0 }, + + { 0, 0, 0, 0, 0, 0, 0, 0 }}; + + ConvertData func = tab[CV_MAT_DEPTH(fromType)][CV_MAT_DEPTH(toType)]; + CV_Assert( func != 0 ); + return func; +} + +ConvertScaleData getConvertScaleData(int fromType, int toType) +{ + static ConvertScaleData tab[][8] = + {{ convertScaleData_, convertScaleData_, + convertScaleData_, convertScaleData_, + convertScaleData_, convertScaleData_, + convertScaleData_, 0 }, + + { convertScaleData_, convertScaleData_, + convertScaleData_, convertScaleData_, + convertScaleData_, convertScaleData_, + convertScaleData_, 0 }, + + { convertScaleData_, convertScaleData_, + convertScaleData_, convertScaleData_, + convertScaleData_, convertScaleData_, + convertScaleData_, 0 }, + + { convertScaleData_, convertScaleData_, + convertScaleData_, convertScaleData_, + convertScaleData_, convertScaleData_, + convertScaleData_, 0 }, + + { convertScaleData_, convertScaleData_, + convertScaleData_, convertScaleData_, + convertScaleData_, convertScaleData_, + convertScaleData_, 0 }, + + { convertScaleData_, convertScaleData_, + convertScaleData_, convertScaleData_, + convertScaleData_, convertScaleData_, + convertScaleData_, 0 }, + + { convertScaleData_, convertScaleData_, + convertScaleData_, convertScaleData_, + convertScaleData_, convertScaleData_, + convertScaleData_, 0 }, + + { 0, 0, 0, 0, 0, 0, 0, 0 }}; + + ConvertScaleData func = tab[CV_MAT_DEPTH(fromType)][CV_MAT_DEPTH(toType)]; + CV_Assert( func != 0 ); + return func; +} + +enum { HASH_SIZE0 = 8 }; + +static inline void copyElem(const uchar* from, uchar* to, size_t elemSize) +{ + size_t i; + for( i = 0; (int)i <= (int)(elemSize - sizeof(int)); i += sizeof(int) ) + *(int*)(to + i) = *(const int*)(from + i); + for( ; i < elemSize; i++ ) + to[i] = from[i]; +} + +static inline bool isZeroElem(const uchar* data, size_t elemSize) +{ + size_t i; + for( i = 0; i <= elemSize - sizeof(int); i += sizeof(int) ) + if( *(int*)(data + i) != 0 ) + return false; + for( ; i < elemSize; i++ ) + if( data[i] != 0 ) + return false; + return true; +} + +SparseMat::Hdr::Hdr( int _dims, const int* _sizes, int _type ) +{ + refcount = 1; + + dims = _dims; + valueOffset = (int)alignSize(sizeof(SparseMat::Node) + + sizeof(int)*std::max(dims - CV_MAX_DIM, 0), CV_ELEM_SIZE1(_type)); + nodeSize = alignSize(valueOffset + + CV_ELEM_SIZE(_type), (int)sizeof(size_t)); + + int i; + for( i = 0; i < dims; i++ ) + size[i] = _sizes[i]; + for( ; i < CV_MAX_DIM; i++ ) + size[i] = 0; + clear(); +} + +void SparseMat::Hdr::clear() +{ + hashtab.clear(); + hashtab.resize(HASH_SIZE0); + pool.clear(); + pool.resize(nodeSize); + nodeCount = freeList = 0; +} + + +SparseMat::SparseMat(const Mat& m) +: flags(MAGIC_VAL), hdr(0) +{ + create( m.dims, m.size, m.type() ); + + int i, idx[CV_MAX_DIM] = {0}, d = m.dims, lastSize = m.size[d - 1]; + size_t esz = m.elemSize(); + uchar* ptr = m.data; + + for(;;) + { + for( i = 0; i < lastSize; i++, ptr += esz ) + { + if( isZeroElem(ptr, esz) ) + continue; + idx[d-1] = i; + uchar* to = newNode(idx, hash(idx)); + copyElem( ptr, to, esz ); + } + + for( i = d - 2; i >= 0; i-- ) + { + ptr += m.step[i] - m.size[i+1]*m.step[i+1]; + if( ++idx[i] < m.size[i] ) + break; + idx[i] = 0; + } + if( i < 0 ) + break; + } +} + +SparseMat::SparseMat(const CvSparseMat* m) +: flags(MAGIC_VAL), hdr(0) +{ + CV_Assert(m); + create( m->dims, &m->size[0], m->type ); + + CvSparseMatIterator it; + CvSparseNode* n = cvInitSparseMatIterator(m, &it); + size_t esz = elemSize(); + + for( ; n != 0; n = cvGetNextSparseNode(&it) ) + { + const int* idx = CV_NODE_IDX(m, n); + uchar* to = newNode(idx, hash(idx)); + copyElem((const uchar*)CV_NODE_VAL(m, n), to, esz); + } +} + +void SparseMat::create(int d, const int* _sizes, int _type) +{ + int i; + CV_Assert( _sizes && 0 < d && d <= CV_MAX_DIM ); + for( i = 0; i < d; i++ ) + CV_Assert( _sizes[i] > 0 ); + _type = CV_MAT_TYPE(_type); + if( hdr && _type == type() && hdr->dims == d && hdr->refcount == 1 ) + { + for( i = 0; i < d; i++ ) + if( _sizes[i] != hdr->size[i] ) + break; + if( i == d ) + { + clear(); + return; + } + } + release(); + flags = MAGIC_VAL | _type; + hdr = new Hdr(d, _sizes, _type); +} + +void SparseMat::copyTo( SparseMat& m ) const +{ + if( hdr == m.hdr ) + return; + if( !hdr ) + { + m.release(); + return; + } + m.create( hdr->dims, hdr->size, type() ); + SparseMatConstIterator from = begin(); + size_t i, N = nzcount(), esz = elemSize(); + + for( i = 0; i < N; i++, ++from ) + { + const Node* n = from.node(); + uchar* to = m.newNode(n->idx, n->hashval); + copyElem( from.ptr, to, esz ); + } +} + +void SparseMat::copyTo( Mat& m ) const +{ + CV_Assert( hdr ); + m.create( dims(), hdr->size, type() ); + m = Scalar(0); + + SparseMatConstIterator from = begin(); + size_t i, N = nzcount(), esz = elemSize(); + + for( i = 0; i < N; i++, ++from ) + { + const Node* n = from.node(); + copyElem( from.ptr, m.ptr(n->idx), esz); + } +} + + +void SparseMat::convertTo( SparseMat& m, int rtype, double alpha ) const +{ + int cn = channels(); + if( rtype < 0 ) + rtype = type(); + rtype = CV_MAKETYPE(rtype, cn); + if( hdr == m.hdr && rtype != type() ) + { + SparseMat temp; + convertTo(temp, rtype, alpha); + m = temp; + return; + } + + CV_Assert(hdr != 0); + if( hdr != m.hdr ) + m.create( hdr->dims, hdr->size, rtype ); + + SparseMatConstIterator from = begin(); + size_t i, N = nzcount(); + + if( alpha == 1 ) + { + ConvertData cvtfunc = getConvertData(type(), rtype); + for( i = 0; i < N; i++, ++from ) + { + const Node* n = from.node(); + uchar* to = hdr == m.hdr ? from.ptr : m.newNode(n->idx, n->hashval); + cvtfunc( from.ptr, to, cn ); + } + } + else + { + ConvertScaleData cvtfunc = getConvertScaleData(type(), rtype); + for( i = 0; i < N; i++, ++from ) + { + const Node* n = from.node(); + uchar* to = hdr == m.hdr ? from.ptr : m.newNode(n->idx, n->hashval); + cvtfunc( from.ptr, to, cn, alpha, 0 ); + } + } +} + + +void SparseMat::convertTo( Mat& m, int rtype, double alpha, double beta ) const +{ + int cn = channels(); + if( rtype < 0 ) + rtype = type(); + rtype = CV_MAKETYPE(rtype, cn); + + CV_Assert( hdr ); + m.create( dims(), hdr->size, rtype ); + m = Scalar(beta); + + SparseMatConstIterator from = begin(); + size_t i, N = nzcount(); + + if( alpha == 1 && beta == 0 ) + { + ConvertData cvtfunc = getConvertData(type(), rtype); + for( i = 0; i < N; i++, ++from ) + { + const Node* n = from.node(); + uchar* to = m.ptr(n->idx); + cvtfunc( from.ptr, to, cn ); + } + } + else + { + ConvertScaleData cvtfunc = getConvertScaleData(type(), rtype); + for( i = 0; i < N; i++, ++from ) + { + const Node* n = from.node(); + uchar* to = m.ptr(n->idx); + cvtfunc( from.ptr, to, cn, alpha, beta ); + } + } +} + +void SparseMat::clear() +{ + if( hdr ) + hdr->clear(); +} + +SparseMat::operator CvSparseMat*() const +{ + if( !hdr ) + return 0; + CvSparseMat* m = cvCreateSparseMat(hdr->dims, hdr->size, type()); + + SparseMatConstIterator from = begin(); + size_t i, N = nzcount(), esz = elemSize(); + + for( i = 0; i < N; i++, ++from ) + { + const Node* n = from.node(); + uchar* to = cvPtrND(m, n->idx, 0, -2, 0); + copyElem(from.ptr, to, esz); + } + return m; +} + +uchar* SparseMat::ptr(int i0, int i1, bool createMissing, size_t* hashval) +{ + CV_Assert( hdr && hdr->dims == 2 ); + size_t h = hashval ? *hashval : hash(i0, i1); + size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx]; + uchar* pool = &hdr->pool[0]; + while( nidx != 0 ) + { + Node* elem = (Node*)(pool + nidx); + if( elem->hashval == h && elem->idx[0] == i0 && elem->idx[1] == i1 ) + return &value(elem); + nidx = elem->next; + } + + if( createMissing ) + { + int idx[] = { i0, i1 }; + return newNode( idx, h ); + } + return 0; +} + +uchar* SparseMat::ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval) +{ + CV_Assert( hdr && hdr->dims == 3 ); + size_t h = hashval ? *hashval : hash(i0, i1, i2); + size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx]; + uchar* pool = &hdr->pool[0]; + while( nidx != 0 ) + { + Node* elem = (Node*)(pool + nidx); + if( elem->hashval == h && elem->idx[0] == i0 && + elem->idx[1] == i1 && elem->idx[2] == i2 ) + return &value(elem); + nidx = elem->next; + } + + if( createMissing ) + { + int idx[] = { i0, i1, i2 }; + return newNode( idx, h ); + } + return 0; +} + +uchar* SparseMat::ptr(const int* idx, bool createMissing, size_t* hashval) +{ + CV_Assert( hdr ); + int i, d = hdr->dims; + size_t h = hashval ? *hashval : hash(idx); + size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx]; + uchar* pool = &hdr->pool[0]; + while( nidx != 0 ) + { + Node* elem = (Node*)(pool + nidx); + if( elem->hashval == h ) + { + for( i = 0; i < d; i++ ) + if( elem->idx[i] != idx[i] ) + break; + if( i == d ) + return &value(elem); + } + nidx = elem->next; + } + + return createMissing ? newNode(idx, h) : 0; +} + +void SparseMat::erase(int i0, int i1, size_t* hashval) +{ + CV_Assert( hdr && hdr->dims == 2 ); + size_t h = hashval ? *hashval : hash(i0, i1); + size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0; + uchar* pool = &hdr->pool[0]; + while( nidx != 0 ) + { + Node* elem = (Node*)(pool + nidx); + if( elem->hashval == h && elem->idx[0] == i0 && elem->idx[1] == i1 ) + break; + previdx = nidx; + nidx = elem->next; + } + + if( nidx ) + removeNode(hidx, nidx, previdx); +} + +void SparseMat::erase(int i0, int i1, int i2, size_t* hashval) +{ + CV_Assert( hdr && hdr->dims == 3 ); + size_t h = hashval ? *hashval : hash(i0, i1, i2); + size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0; + uchar* pool = &hdr->pool[0]; + while( nidx != 0 ) + { + Node* elem = (Node*)(pool + nidx); + if( elem->hashval == h && elem->idx[0] == i0 && + elem->idx[1] == i1 && elem->idx[2] == i2 ) + break; + previdx = nidx; + nidx = elem->next; + } + + if( nidx ) + removeNode(hidx, nidx, previdx); +} + +void SparseMat::erase(const int* idx, size_t* hashval) +{ + CV_Assert( hdr ); + int i, d = hdr->dims; + size_t h = hashval ? *hashval : hash(idx); + size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0; + uchar* pool = &hdr->pool[0]; + while( nidx != 0 ) + { + Node* elem = (Node*)(pool + nidx); + if( elem->hashval == h ) + { + for( i = 0; i < d; i++ ) + if( elem->idx[i] != idx[i] ) + break; + if( i == d ) + break; + } + previdx = nidx; + nidx = elem->next; + } + + if( nidx ) + removeNode(hidx, nidx, previdx); +} + +void SparseMat::resizeHashTab(size_t newsize) +{ + newsize = std::max(newsize, (size_t)8); + if((newsize & (newsize-1)) != 0) + newsize = (size_t)1 << cvCeil(std::log((double)newsize)/CV_LOG2); + + size_t i, hsize = hdr->hashtab.size(); + vector _newh(newsize); + size_t* newh = &_newh[0]; + for( i = 0; i < newsize; i++ ) + newh[i] = 0; + uchar* pool = &hdr->pool[0]; + for( i = 0; i < hsize; i++ ) + { + size_t nidx = hdr->hashtab[i]; + while( nidx ) + { + Node* elem = (Node*)(pool + nidx); + size_t next = elem->next; + size_t newhidx = elem->hashval & (newsize - 1); + elem->next = newh[newhidx]; + newh[newhidx] = nidx; + nidx = next; + } + } + hdr->hashtab = _newh; +} + +uchar* SparseMat::newNode(const int* idx, size_t hashval) +{ + const int HASH_MAX_FILL_FACTOR=3; + assert(hdr); + size_t hsize = hdr->hashtab.size(); + if( ++hdr->nodeCount > hsize*HASH_MAX_FILL_FACTOR ) + { + resizeHashTab(std::max(hsize*2, (size_t)8)); + hsize = hdr->hashtab.size(); + } + + if( !hdr->freeList ) + { + size_t i, nsz = hdr->nodeSize, psize = hdr->pool.size(), + newpsize = std::max(psize*2, 8*nsz); + hdr->pool.resize(newpsize); + uchar* pool = &hdr->pool[0]; + hdr->freeList = std::max(psize, nsz); + for( i = hdr->freeList; i < newpsize - nsz; i += nsz ) + ((Node*)(pool + i))->next = i + nsz; + ((Node*)(pool + i))->next = 0; + } + size_t nidx = hdr->freeList; + Node* elem = (Node*)&hdr->pool[nidx]; + hdr->freeList = elem->next; + elem->hashval = hashval; + size_t hidx = hashval & (hsize - 1); + elem->next = hdr->hashtab[hidx]; + hdr->hashtab[hidx] = nidx; + + int i, d = hdr->dims; + for( i = 0; i < d; i++ ) + elem->idx[i] = idx[i]; + size_t esz = elemSize(); + uchar* p = &value(elem); + if( esz == sizeof(float) ) + *((float*)p) = 0.f; + else if( esz == sizeof(double) ) + *((double*)p) = 0.; + else + memset(p, 0, esz); + + return p; +} + + +void SparseMat::removeNode(size_t hidx, size_t nidx, size_t previdx) +{ + Node* n = node(nidx); + if( previdx ) + { + Node* prev = node(previdx); + prev->next = n->next; + } + else + hdr->hashtab[hidx] = n->next; + n->next = hdr->freeList; + hdr->freeList = nidx; + --hdr->nodeCount; +} + + +SparseMatConstIterator::SparseMatConstIterator(const SparseMat* _m) +: m((SparseMat*)_m), hashidx(0), ptr(0) +{ + if(!_m || !_m->hdr) + return; + SparseMat::Hdr& hdr = *m->hdr; + const vector& htab = hdr.hashtab; + size_t i, hsize = htab.size(); + for( i = 0; i < hsize; i++ ) + { + size_t nidx = htab[i]; + if( nidx ) + { + hashidx = i; + ptr = &hdr.pool[nidx] + hdr.valueOffset; + return; + } + } +} + +SparseMatConstIterator& SparseMatConstIterator::operator ++() +{ + if( !ptr || !m || !m->hdr ) + return *this; + SparseMat::Hdr& hdr = *m->hdr; + size_t next = ((const SparseMat::Node*)(ptr - hdr.valueOffset))->next; + if( next ) + { + ptr = &hdr.pool[next] + hdr.valueOffset; + return *this; + } + size_t i = hashidx + 1, sz = hdr.hashtab.size(); + for( ; i < sz; i++ ) + { + size_t nidx = hdr.hashtab[i]; + if( nidx ) + { + hashidx = i; + ptr = &hdr.pool[nidx] + hdr.valueOffset; + return *this; + } + } + hashidx = sz; + ptr = 0; + return *this; +} + + +double norm( const SparseMat& src, int normType ) +{ + SparseMatConstIterator it = src.begin(); + + size_t i, N = src.nzcount(); + normType &= NORM_TYPE_MASK; + int type = src.type(); + double result = 0; + + CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 ); + + if( type == CV_32F ) + { + if( normType == NORM_INF ) + for( i = 0; i < N; i++, ++it ) + result = std::max(result, std::abs((double)*(const float*)it.ptr)); + else if( normType == NORM_L1 ) + for( i = 0; i < N; i++, ++it ) + result += std::abs(*(const float*)it.ptr); + else + for( i = 0; i < N; i++, ++it ) + { + double v = *(const float*)it.ptr; + result += v*v; + } + } + else if( type == CV_64F ) + { + if( normType == NORM_INF ) + for( i = 0; i < N; i++, ++it ) + result = std::max(result, std::abs(*(const double*)it.ptr)); + else if( normType == NORM_L1 ) + for( i = 0; i < N; i++, ++it ) + result += std::abs(*(const double*)it.ptr); + else + for( i = 0; i < N; i++, ++it ) + { + double v = *(const double*)it.ptr; + result += v*v; + } + } + else + CV_Error( CV_StsUnsupportedFormat, "Only 32f and 64f are supported" ); + + if( normType == NORM_L2 ) + result = std::sqrt(result); + return result; +} + +void minMaxLoc( const SparseMat& src, double* _minval, double* _maxval, int* _minidx, int* _maxidx ) +{ + SparseMatConstIterator it = src.begin(); + size_t i, N = src.nzcount(), d = src.hdr ? src.hdr->dims : 0; + int type = src.type(); + const int *minidx = 0, *maxidx = 0; + + if( type == CV_32F ) + { + float minval = FLT_MAX, maxval = -FLT_MAX; + for( i = 0; i < N; i++, ++it ) + { + float v = *(const float*)it.ptr; + if( v < minval ) + { + minval = v; + minidx = it.node()->idx; + } + if( v > maxval ) + { + maxval = v; + maxidx = it.node()->idx; + } + } + if( _minval ) + *_minval = minval; + if( _maxval ) + *_maxval = maxval; + } + else if( type == CV_64F ) + { + double minval = DBL_MAX, maxval = -DBL_MAX; + for( i = 0; i < N; i++, ++it ) + { + double v = *(const double*)it.ptr; + if( v < minval ) + { + minval = v; + minidx = it.node()->idx; + } + if( v > maxval ) + { + maxval = v; + maxidx = it.node()->idx; + } + } + if( _minval ) + *_minval = minval; + if( _maxval ) + *_maxval = maxval; + } + else + CV_Error( CV_StsUnsupportedFormat, "Only 32f and 64f are supported" ); + + if( _minidx ) + for( i = 0; i < d; i++ ) + _minidx[i] = minidx[i]; + if( _maxidx ) + for( i = 0; i < d; i++ ) + _maxidx[i] = maxidx[i]; +} + + +void normalize( const SparseMat& src, SparseMat& dst, double a, int norm_type ) +{ + double scale = 1; + if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C ) + { + scale = norm( src, norm_type ); + scale = scale > DBL_EPSILON ? a/scale : 0.; + } + else + CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" ); + + src.convertTo( dst, -1, scale ); +} + +////////////////////// RotatedRect ////////////////////// + +void RotatedRect::points(Point2f pt[]) const +{ + double _angle = angle*CV_PI/180.; + float b = (float)cos(_angle)*0.5f; + float a = (float)sin(_angle)*0.5f; + + pt[0].x = center.x - a*size.height - b*size.width; + pt[0].y = center.y + b*size.height - a*size.width; + pt[1].x = center.x + a*size.height - b*size.width; + pt[1].y = center.y - b*size.height - a*size.width; + pt[2].x = 2*center.x - pt[0].x; + pt[2].y = 2*center.y - pt[0].y; + pt[3].x = 2*center.x - pt[1].x; + pt[3].y = 2*center.y - pt[1].y; +} + +Rect RotatedRect::boundingRect() const +{ + Point2f pt[4]; + points(pt); + Rect r(cvFloor(min(min(min(pt[0].x, pt[1].x), pt[2].x), pt[3].x)), + cvFloor(min(min(min(pt[0].y, pt[1].y), pt[2].y), pt[3].y)), + cvCeil(max(max(max(pt[0].x, pt[1].x), pt[2].x), pt[3].x)), + cvCeil(max(max(max(pt[0].y, pt[1].y), pt[2].y), pt[3].y))); + r.width -= r.x - 1; + r.height -= r.y - 1; + return r; +} + +} + +/* End of file. */ diff --git a/opencv/core/opencv2/core/core.hpp b/opencv/core/opencv2/core/core.hpp new file mode 100644 index 0000000..7308ee6 --- /dev/null +++ b/opencv/core/opencv2/core/core.hpp @@ -0,0 +1,4344 @@ +/*! \file core.hpp + \brief The Core Functionality + */ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_HPP__ +#define __OPENCV_CORE_HPP__ + +#include "types_c.h" +#include "version.hpp" + +#ifdef __cplusplus + +#ifndef SKIP_INCLUDES +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif // SKIP_INCLUDES + +/*! \namespace cv + Namespace where all the C++ OpenCV functionality resides +*/ +namespace cv { + +#undef abs +#undef min +#undef max +#undef Complex + +using std::vector; +using std::string; +using std::ptrdiff_t; + +template class CV_EXPORTS Size_; +template class CV_EXPORTS Point_; +template class CV_EXPORTS Rect_; +template class CV_EXPORTS Vec; +template class CV_EXPORTS Matx; + +typedef std::string String; +typedef std::basic_string WString; + +class Mat; +class SparseMat; +typedef Mat MatND; + +class CV_EXPORTS MatExpr; +class CV_EXPORTS MatOp_Base; +class CV_EXPORTS MatArg; +class CV_EXPORTS MatConstIterator; + +template class CV_EXPORTS Mat_; +template class CV_EXPORTS MatIterator_; +template class CV_EXPORTS MatConstIterator_; +template class CV_EXPORTS MatCommaInitializer_; + +CV_EXPORTS string fromUtf16(const WString& str); +CV_EXPORTS WString toUtf16(const string& str); + +CV_EXPORTS string format( const char* fmt, ... ); +CV_EXPORTS string tempfile( const char* suffix CV_DEFAULT(0)); + +// matrix decomposition types +enum { DECOMP_LU=0, DECOMP_SVD=1, DECOMP_EIG=2, DECOMP_CHOLESKY=3, DECOMP_QR=4, DECOMP_NORMAL=16 }; +enum { NORM_INF=1, NORM_L1=2, NORM_L2=4, NORM_TYPE_MASK=7, NORM_RELATIVE=8, NORM_MINMAX=32}; +enum { CMP_EQ=0, CMP_GT=1, CMP_GE=2, CMP_LT=3, CMP_LE=4, CMP_NE=5 }; +enum { GEMM_1_T=1, GEMM_2_T=2, GEMM_3_T=4 }; +enum { DFT_INVERSE=1, DFT_SCALE=2, DFT_ROWS=4, DFT_COMPLEX_OUTPUT=16, DFT_REAL_OUTPUT=32, + DCT_INVERSE = DFT_INVERSE, DCT_ROWS=DFT_ROWS }; + + +/*! + The standard OpenCV exception class. + Instances of the class are thrown by various functions and methods in the case of critical errors. + */ +class CV_EXPORTS Exception : public std::exception +{ +public: + /*! + Default constructor + */ + Exception(); + /*! + Full constructor. Normally the constuctor is not called explicitly. + Instead, the macros CV_Error(), CV_Error_() and CV_Assert() are used. + */ + Exception(int _code, const string& _err, const string& _func, const string& _file, int _line); + virtual ~Exception() throw(); + + /*! + \return the error description and the context as a text string. + */ + virtual const char *what() const throw(); + void formatMessage(); + + string msg; ///< the formatted error message + + int code; ///< error code @see CVStatus + string err; ///< error description + string func; ///< function name. Available only when the compiler supports __func__ macro + string file; ///< source file name where the error has occured + int line; ///< line number in the source file where the error has occured +}; + + +//! Signals an error and raises the exception. + +/*! + By default the function prints information about the error to stderr, + then it either stops if setBreakOnError() had been called before or raises the exception. + It is possible to alternate error processing by using redirectError(). + + \param exc the exception raisen. + */ +CV_EXPORTS void error( const Exception& exc ); + +//! Sets/resets the break-on-error mode. + +/*! + When the break-on-error mode is set, the default error handler + issues a hardware exception, which can make debugging more convenient. + + \return the previous state + */ +CV_EXPORTS bool setBreakOnError(bool flag); + +typedef int (CV_CDECL *ErrorCallback)( int status, const char* func_name, + const char* err_msg, const char* file_name, + int line, void* userdata ); + +//! Sets the new error handler and the optional user data. + +/*! + The function sets the new error handler, called from cv::error(). + + \param errCallback the new error handler. If NULL, the default error handler is used. + \param userdata the optional user data pointer, passed to the callback. + \param prevUserdata the optional output parameter where the previous user data pointer is stored + + \return the previous error handler +*/ +CV_EXPORTS ErrorCallback redirectError( ErrorCallback errCallback, + void* userdata=0, void** prevUserdata=0); + +#ifdef __GNUC__ +#define CV_Error( code, msg ) cv::error( cv::Exception(code, msg, __func__, __FILE__, __LINE__) ) +#define CV_Error_( code, args ) cv::error( cv::Exception(code, cv::format args, __func__, __FILE__, __LINE__) ) +#define CV_Assert( expr ) if((expr)) ; else cv::error( cv::Exception(CV_StsAssert, #expr, __func__, __FILE__, __LINE__) ) +#else +#define CV_Error( code, msg ) cv::error( cv::Exception(code, msg, "", __FILE__, __LINE__) ) +#define CV_Error_( code, args ) cv::error( cv::Exception(code, cv::format args, "", __FILE__, __LINE__) ) +#define CV_Assert( expr ) if((expr)) ; else cv::error( cv::Exception(CV_StsAssert, #expr, "", __FILE__, __LINE__) ) +#endif + +#ifdef _DEBUG +#define CV_DbgAssert(expr) CV_Assert(expr) +#else +#define CV_DbgAssert(expr) +#endif + +CV_EXPORTS void setNumThreads(int nthreads); +CV_EXPORTS int getNumThreads(); +CV_EXPORTS int getThreadNum(); + +//! Returns the number of ticks. + +/*! + The function returns the number of ticks since the certain event (e.g. when the machine was turned on). + It can be used to initialize cv::RNG or to measure a function execution time by reading the tick count + before and after the function call. The granularity of ticks depends on the hardware and OS used. Use + cv::getTickFrequency() to convert ticks to seconds. +*/ +CV_EXPORTS_W int64 getTickCount(); + +/*! + Returns the number of ticks per seconds. + + The function returns the number of ticks (as returned by cv::getTickCount()) per second. + The following code computes the execution time in milliseconds: + + \code + double exec_time = (double)getTickCount(); + // do something ... + exec_time = ((double)getTickCount() - exec_time)*1000./getTickFrequency(); + \endcode +*/ +CV_EXPORTS_W double getTickFrequency(); + +/*! + Returns the number of CPU ticks. + + On platforms where the feature is available, the function returns the number of CPU ticks + since the certain event (normally, the system power-on moment). Using this function + one can accurately measure the execution time of very small code fragments, + for which cv::getTickCount() granularity is not enough. +*/ +CV_EXPORTS_W int64 getCPUTickCount(); + +/*! + Returns SSE etc. support status + + The function returns true if certain hardware features are available. + Currently, the following features are recognized: + - CV_CPU_MMX - MMX + - CV_CPU_SSE - SSE + - CV_CPU_SSE2 - SSE 2 + - CV_CPU_SSE3 - SSE 3 + - CV_CPU_SSSE3 - SSSE 3 + - CV_CPU_SSE4_1 - SSE 4.1 + - CV_CPU_SSE4_2 - SSE 4.2 + - CV_CPU_POPCNT - POPCOUNT + - CV_CPU_AVX - AVX + + \note {Note that the function output is not static. Once you called cv::useOptimized(false), + most of the hardware acceleration is disabled and thus the function will returns false, + until you call cv::useOptimized(true)} +*/ +CV_EXPORTS_W bool checkHardwareSupport(int feature); + +//! returns the number of CPUs (including hyper-threading) +CV_EXPORTS_W int getNumberOfCPUs(); + +/*! + Allocates memory buffer + + This is specialized OpenCV memory allocation function that returns properly aligned memory buffers. + The usage is identical to malloc(). The allocated buffers must be freed with cv::fastFree(). + If there is not enough memory, the function calls cv::error(), which raises an exception. + + \param bufSize buffer size in bytes + \return the allocated memory buffer. +*/ +CV_EXPORTS void* fastMalloc(size_t bufSize); + +/*! + Frees the memory allocated with cv::fastMalloc + + This is the corresponding deallocation function for cv::fastMalloc(). + When ptr==NULL, the function has no effect. +*/ +CV_EXPORTS void fastFree(void* ptr); + +template static inline _Tp* allocate(size_t n) +{ + return new _Tp[n]; +} + +template static inline void deallocate(_Tp* ptr, size_t) +{ + delete[] ptr; +} + +/*! + Aligns pointer by the certain number of bytes + + This small inline function aligns the pointer by the certian number of bytes by shifting + it forward by 0 or a positive offset. +*/ +template static inline _Tp* alignPtr(_Tp* ptr, int n=(int)sizeof(_Tp)) +{ + return (_Tp*)(((size_t)ptr + n-1) & -n); +} + +/*! + Aligns buffer size by the certain number of bytes + + This small inline function aligns a buffer size by the certian number of bytes by enlarging it. +*/ +static inline size_t alignSize(size_t sz, int n) +{ + return (sz + n-1) & -n; +} + +/*! + Turns on/off available optimization + + The function turns on or off the optimized code in OpenCV. Some optimization can not be enabled + or disabled, but, for example, most of SSE code in OpenCV can be temporarily turned on or off this way. + + \note{Since optimization may imply using special data structures, it may be unsafe + to call this function anywhere in the code. Instead, call it somewhere at the top level.} +*/ +CV_EXPORTS_W void setUseOptimized(bool onoff); + +/*! + Returns the current optimization status + + The function returns the current optimization status, which is controlled by cv::setUseOptimized(). +*/ +CV_EXPORTS_W bool useOptimized(); + +/*! + The STL-compilant memory Allocator based on cv::fastMalloc() and cv::fastFree() +*/ +template class CV_EXPORTS Allocator +{ +public: + typedef _Tp value_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef size_t size_type; + typedef ptrdiff_t difference_type; + template class rebind { typedef Allocator other; }; + + explicit Allocator() {} + ~Allocator() {} + explicit Allocator(Allocator const&) {} + template + explicit Allocator(Allocator const&) {} + + // address + pointer address(reference r) { return &r; } + const_pointer address(const_reference r) { return &r; } + + pointer allocate(size_type count, const void* =0) + { return reinterpret_cast(fastMalloc(count * sizeof (_Tp))); } + + void deallocate(pointer p, size_type) {fastFree(p); } + + size_type max_size() const + { return max(static_cast<_Tp>(-1)/sizeof(_Tp), 1); } + + void construct(pointer p, const _Tp& v) { new(static_cast(p)) _Tp(v); } + void destroy(pointer p) { p->~_Tp(); } +}; + +/////////////////////// Vec (used as element of multi-channel images ///////////////////// + +/*! + A helper class for cv::DataType + + The class is specialized for each fundamental numerical data type supported by OpenCV. + It provides DataDepth::value constant. +*/ +template class CV_EXPORTS DataDepth {}; + +template<> class DataDepth { public: enum { value = CV_8U, fmt=(int)'u' }; }; +template<> class DataDepth { public: enum { value = CV_8U, fmt=(int)'u' }; }; +template<> class DataDepth { public: enum { value = CV_8S, fmt=(int)'c' }; }; +template<> class DataDepth { public: enum { value = CV_8S, fmt=(int)'c' }; }; +template<> class DataDepth { public: enum { value = CV_16U, fmt=(int)'w' }; }; +template<> class DataDepth { public: enum { value = CV_16S, fmt=(int)'s' }; }; +template<> class DataDepth { public: enum { value = CV_32S, fmt=(int)'i' }; }; +// this is temporary solution to support 32-bit unsigned integers +template<> class DataDepth { public: enum { value = CV_32S, fmt=(int)'i' }; }; +template<> class DataDepth { public: enum { value = CV_32F, fmt=(int)'f' }; }; +template<> class DataDepth { public: enum { value = CV_64F, fmt=(int)'d' }; }; +template class DataDepth<_Tp*> { public: enum { value = CV_USRTYPE1, fmt=(int)'r' }; }; + + +////////////////////////////// Small Matrix /////////////////////////// + +/*! + A short numerical vector. + + This template class represents short numerical vectors (of 1, 2, 3, 4 ... elements) + on which you can perform basic arithmetical operations, access individual elements using [] operator etc. + The vectors are allocated on stack, as opposite to std::valarray, std::vector, cv::Mat etc., + which elements are dynamically allocated in the heap. + + The template takes 2 parameters: + -# _Tp element type + -# cn the number of elements + + In addition to the universal notation like Vec, you can use shorter aliases + for the most popular specialized variants of Vec, e.g. Vec3f ~ Vec. + */ + +struct CV_EXPORTS Matx_AddOp {}; +struct CV_EXPORTS Matx_SubOp {}; +struct CV_EXPORTS Matx_ScaleOp {}; +struct CV_EXPORTS Matx_MulOp {}; +struct CV_EXPORTS Matx_MatMulOp {}; +struct CV_EXPORTS Matx_TOp {}; + +template class CV_EXPORTS Matx +{ +public: + typedef _Tp value_type; + typedef Matx<_Tp, MIN(m, n), 1> diag_type; + typedef Matx<_Tp, m, n> mat_type; + enum { depth = DataDepth<_Tp>::value, rows = m, cols = n, channels = rows*cols, + type = CV_MAKETYPE(depth, channels) }; + + //! default constructor + Matx(); + + Matx(_Tp v0); //!< 1x1 matrix + Matx(_Tp v0, _Tp v1); //!< 1x2 or 2x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2); //!< 1x3 or 3x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 1x4, 2x2 or 4x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 1x5 or 5x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 1x6, 2x3, 3x2 or 6x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 1x7 or 7x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 1x8, 2x4, 4x2 or 8x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 1x9, 3x3 or 9x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 1x10, 2x5 or 5x2 or 10x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11); //!< 1x12, 2x6, 3x4, 4x3, 6x2 or 12x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11, + _Tp v12, _Tp v13, _Tp v14, _Tp v15); //!< 1x16, 4x4 or 16x1 matrix + explicit Matx(const _Tp* vals); //!< initialize from a plain array + + static Matx all(_Tp alpha); + static Matx zeros(); + static Matx ones(); + static Matx eye(); + static Matx diag(const diag_type& d); + static Matx randu(_Tp a, _Tp b); + static Matx randn(_Tp a, _Tp b); + + //! dot product computed with the default precision + _Tp dot(const Matx<_Tp, m, n>& v) const; + + //! dot product computed in double-precision arithmetics + double ddot(const Matx<_Tp, m, n>& v) const; + + //! convertion to another data type + template operator Matx() const; + + //! change the matrix shape + template Matx<_Tp, m1, n1> reshape() const; + + //! extract part of the matrix + template Matx<_Tp, m1, n1> get_minor(int i, int j) const; + + //! extract the matrix row + Matx<_Tp, 1, n> row(int i) const; + + //! extract the matrix column + Matx<_Tp, m, 1> col(int i) const; + + //! extract the matrix diagonal + Matx<_Tp, MIN(m,n), 1> diag() const; + + //! transpose the matrix + Matx<_Tp, n, m> t() const; + + //! invert matrix the matrix + Matx<_Tp, n, m> inv(int method=DECOMP_LU) const; + + //! solve linear system + template Matx<_Tp, n, l> solve(const Matx<_Tp, m, l>& rhs, int flags=DECOMP_LU) const; + Matx<_Tp, n, 1> solve(const Matx<_Tp, m, 1>& rhs, int method) const; + + //! multiply two matrices element-wise + Matx<_Tp, m, n> mul(const Matx<_Tp, m, n>& a) const; + + //! element access + const _Tp& operator ()(int i, int j) const; + _Tp& operator ()(int i, int j); + + //! 1D element access + const _Tp& operator ()(int i) const; + _Tp& operator ()(int i); + + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp); + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp); + template Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp); + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp); + template Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp); + Matx(const Matx<_Tp, n, m>& a, Matx_TOp); + + _Tp val[m*n]; //< matrix elements +}; + + +typedef Matx Matx12f; +typedef Matx Matx12d; +typedef Matx Matx13f; +typedef Matx Matx13d; +typedef Matx Matx14f; +typedef Matx Matx14d; +typedef Matx Matx16f; +typedef Matx Matx16d; + +typedef Matx Matx21f; +typedef Matx Matx21d; +typedef Matx Matx31f; +typedef Matx Matx31d; +typedef Matx Matx41f; +typedef Matx Matx41d; +typedef Matx Matx61f; +typedef Matx Matx61d; + +typedef Matx Matx22f; +typedef Matx Matx22d; +typedef Matx Matx23f; +typedef Matx Matx23d; +typedef Matx Matx32f; +typedef Matx Matx32d; + +typedef Matx Matx33f; +typedef Matx Matx33d; + +typedef Matx Matx34f; +typedef Matx Matx34d; +typedef Matx Matx43f; +typedef Matx Matx43d; + +typedef Matx Matx44f; +typedef Matx Matx44d; +typedef Matx Matx66f; +typedef Matx Matx66d; + + +/*! + A short numerical vector. + + This template class represents short numerical vectors (of 1, 2, 3, 4 ... elements) + on which you can perform basic arithmetical operations, access individual elements using [] operator etc. + The vectors are allocated on stack, as opposite to std::valarray, std::vector, cv::Mat etc., + which elements are dynamically allocated in the heap. + + The template takes 2 parameters: + -# _Tp element type + -# cn the number of elements + + In addition to the universal notation like Vec, you can use shorter aliases + for the most popular specialized variants of Vec, e.g. Vec3f ~ Vec. +*/ +template class CV_EXPORTS Vec : public Matx<_Tp, cn, 1> +{ +public: + typedef _Tp value_type; + enum { depth = DataDepth<_Tp>::value, channels = cn, type = CV_MAKETYPE(depth, channels) }; + + //! default constructor + Vec(); + + Vec(_Tp v0); //!< 1-element vector constructor + Vec(_Tp v0, _Tp v1); //!< 2-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2); //!< 3-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 4-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 5-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 6-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 7-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 8-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 9-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 10-element vector constructor + explicit Vec(const _Tp* values); + + Vec(const Vec<_Tp, cn>& v); + + static Vec all(_Tp alpha); + + //! per-element multiplication + Vec mul(const Vec<_Tp, cn>& v) const; + + /*! + cross product of the two 3D vectors. + + For other dimensionalities the exception is raised + */ + Vec cross(const Vec& v) const; + //! convertion to another data type + template operator Vec() const; + //! conversion to 4-element CvScalar. + operator CvScalar() const; + + /*! element access */ + const _Tp& operator [](int i) const; + _Tp& operator[](int i); + const _Tp& operator ()(int i) const; + _Tp& operator ()(int i); + + Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp); + Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp); + template Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp); +}; + + +/* \typedef + + Shorter aliases for the most popular specializations of Vec +*/ +typedef Vec Vec2b; +typedef Vec Vec3b; +typedef Vec Vec4b; + +typedef Vec Vec2s; +typedef Vec Vec3s; +typedef Vec Vec4s; + +typedef Vec Vec2w; +typedef Vec Vec3w; +typedef Vec Vec4w; + +typedef Vec Vec2i; +typedef Vec Vec3i; +typedef Vec Vec4i; +typedef Vec Vec6i; +typedef Vec Vec8i; + +typedef Vec Vec2f; +typedef Vec Vec3f; +typedef Vec Vec4f; +typedef Vec Vec6f; + +typedef Vec Vec2d; +typedef Vec Vec3d; +typedef Vec Vec4d; +typedef Vec Vec6d; + + +//////////////////////////////// Complex ////////////////////////////// + +/*! + A complex number class. + + The template class is similar and compatible with std::complex, however it provides slightly + more convenient access to the real and imaginary parts using through the simple field access, as opposite + to std::complex::real() and std::complex::imag(). +*/ +template class CV_EXPORTS Complex +{ +public: + + //! constructors + Complex(); + Complex( _Tp _re, _Tp _im=0 ); + Complex( const std::complex<_Tp>& c ); + + //! conversion to another data type + template operator Complex() const; + //! conjugation + Complex conj() const; + //! conversion to std::complex + operator std::complex<_Tp>() const; + + _Tp re, im; //< the real and the imaginary parts +}; + + +/*! + \typedef +*/ +typedef Complex Complexf; +typedef Complex Complexd; + + +//////////////////////////////// Point_ //////////////////////////////// + +/*! + template 2D point class. + + The class defines a point in 2D space. Data type of the point coordinates is specified + as a template parameter. There are a few shorter aliases available for user convenience. + See cv::Point, cv::Point2i, cv::Point2f and cv::Point2d. +*/ +template class CV_EXPORTS Point_ +{ +public: + typedef _Tp value_type; + + // various constructors + Point_(); + Point_(_Tp _x, _Tp _y); + Point_(const Point_& pt); + Point_(const CvPoint& pt); + Point_(const CvPoint2D32f& pt); + Point_(const Size_<_Tp>& sz); + Point_(const Vec<_Tp, 2>& v); + + Point_& operator = (const Point_& pt); + //! conversion to another data type + template operator Point_<_Tp2>() const; + + //! conversion to the old-style C structures + operator CvPoint() const; + operator CvPoint2D32f() const; + operator Vec<_Tp, 2>() const; + + //! dot product + _Tp dot(const Point_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point_& pt) const; + //! checks whether the point is inside the specified rectangle + bool inside(const Rect_<_Tp>& r) const; + + _Tp x, y; //< the point coordinates +}; + +/*! + template 3D point class. + + The class defines a point in 3D space. Data type of the point coordinates is specified + as a template parameter. + + \see cv::Point3i, cv::Point3f and cv::Point3d +*/ +template class CV_EXPORTS Point3_ +{ +public: + typedef _Tp value_type; + + // various constructors + Point3_(); + Point3_(_Tp _x, _Tp _y, _Tp _z); + Point3_(const Point3_& pt); + explicit Point3_(const Point_<_Tp>& pt); + Point3_(const CvPoint3D32f& pt); + Point3_(const Vec<_Tp, 3>& v); + + Point3_& operator = (const Point3_& pt); + //! conversion to another data type + template operator Point3_<_Tp2>() const; + //! conversion to the old-style CvPoint... + operator CvPoint3D32f() const; + //! conversion to cv::Vec<> + operator Vec<_Tp, 3>() const; + + //! dot product + _Tp dot(const Point3_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point3_& pt) const; + //! cross product of the 2 3D points + Point3_ cross(const Point3_& pt) const; + + _Tp x, y, z; //< the point coordinates +}; + +//////////////////////////////// Size_ //////////////////////////////// + +/*! + The 2D size class + + The class represents the size of a 2D rectangle, image size, matrix size etc. + Normally, cv::Size ~ cv::Size_ is used. +*/ +template class CV_EXPORTS Size_ +{ +public: + typedef _Tp value_type; + + //! various constructors + Size_(); + Size_(_Tp _width, _Tp _height); + Size_(const Size_& sz); + Size_(const CvSize& sz); + Size_(const CvSize2D32f& sz); + Size_(const Point_<_Tp>& pt); + + Size_& operator = (const Size_& sz); + //! the area (width*height) + _Tp area() const; + + //! conversion of another data type. + template operator Size_<_Tp2>() const; + + //! conversion to the old-style OpenCV types + operator CvSize() const; + operator CvSize2D32f() const; + + _Tp width, height; // the width and the height +}; + +//////////////////////////////// Rect_ //////////////////////////////// + +/*! + The 2D up-right rectangle class + + The class represents a 2D rectangle with coordinates of the specified data type. + Normally, cv::Rect ~ cv::Rect_ is used. +*/ +template class CV_EXPORTS Rect_ +{ +public: + typedef _Tp value_type; + + //! various constructors + Rect_(); + Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height); + Rect_(const Rect_& r); + Rect_(const CvRect& r); + Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz); + Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2); + + Rect_& operator = ( const Rect_& r ); + //! the top-left corner + Point_<_Tp> tl() const; + //! the bottom-right corner + Point_<_Tp> br() const; + + //! size (width, height) of the rectangle + Size_<_Tp> size() const; + //! area (width*height) of the rectangle + _Tp area() const; + + //! conversion to another data type + template operator Rect_<_Tp2>() const; + //! conversion to the old-style CvRect + operator CvRect() const; + + //! checks whether the rectangle contains the point + bool contains(const Point_<_Tp>& pt) const; + + _Tp x, y, width, height; //< the top-left corner, as well as width and height of the rectangle +}; + + +/*! + \typedef + + shorter aliases for the most popular cv::Point_<>, cv::Size_<> and cv::Rect_<> specializations +*/ +typedef Point_ Point2i; +typedef Point2i Point; +typedef Size_ Size2i; +typedef Size2i Size; +typedef Rect_ Rect; +typedef Point_ Point2f; +typedef Point_ Point2d; +typedef Size_ Size2f; +typedef Point3_ Point3i; +typedef Point3_ Point3f; +typedef Point3_ Point3d; + + +/*! + The rotated 2D rectangle. + + The class represents rotated (i.e. not up-right) rectangles on a plane. + Each rectangle is described by the center point (mass center), length of each side + (represented by cv::Size2f structure) and the rotation angle in degrees. +*/ +class CV_EXPORTS RotatedRect +{ +public: + //! various constructors + RotatedRect(); + RotatedRect(const Point2f& _center, const Size2f& _size, float _angle); + RotatedRect(const CvBox2D& box); + + //! returns 4 vertices of the rectangle + void points(Point2f pts[]) const; + //! returns the minimal up-right rectangle containing the rotated rectangle + Rect boundingRect() const; + //! conversion to the old-style CvBox2D structure + operator CvBox2D() const; + + Point2f center; //< the rectangle mass center + Size2f size; //< width and height of the rectangle + float angle; //< the rotation angle. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle. +}; + +//////////////////////////////// Scalar_ /////////////////////////////// + +/*! + The template scalar class. + + This is partially specialized cv::Vec class with the number of elements = 4, i.e. a short vector of four elements. + Normally, cv::Scalar ~ cv::Scalar_ is used. +*/ +template class CV_EXPORTS Scalar_ : public Vec<_Tp, 4> +{ +public: + //! various constructors + Scalar_(); + Scalar_(_Tp v0, _Tp v1, _Tp v2=0, _Tp v3=0); + Scalar_(const CvScalar& s); + Scalar_(_Tp v0); + + //! returns a scalar with all elements set to v0 + static Scalar_<_Tp> all(_Tp v0); + //! conversion to the old-style CvScalar + operator CvScalar() const; + + //! conversion to another data type + template operator Scalar_() const; + + //! per-element product + Scalar_<_Tp> mul(const Scalar_<_Tp>& t, double scale=1 ) const; + + // returns (v0, -v1, -v2, -v3) + Scalar_<_Tp> conj() const; + + // returns true iff v1 == v2 == v3 == 0 + bool isReal() const; +}; + +typedef Scalar_ Scalar; + +CV_EXPORTS void scalarToRawData(const Scalar& s, void* buf, int type, int unroll_to=0); + +//////////////////////////////// Range ///////////////////////////////// + +/*! + The 2D range class + + This is the class used to specify a continuous subsequence, i.e. part of a contour, or a column span in a matrix. +*/ +class CV_EXPORTS Range +{ +public: + Range(); + Range(int _start, int _end); + Range(const CvSlice& slice); + int size() const; + bool empty() const; + static Range all(); + operator CvSlice() const; + + int start, end; +}; + +/////////////////////////////// DataType //////////////////////////////// + +/*! + Informative template class for OpenCV "scalars". + + The class is specialized for each primitive numerical type supported by OpenCV (such as unsigned char or float), + as well as for more complex types, like cv::Complex<>, std::complex<>, cv::Vec<> etc. + The common property of all such types (called "scalars", do not confuse it with cv::Scalar_) + is that each of them is basically a tuple of numbers of the same type. Each "scalar" can be represented + by the depth id (CV_8U ... CV_64F) and the number of channels. + OpenCV matrices, 2D or nD, dense or sparse, can store "scalars", + as long as the number of channels does not exceed CV_CN_MAX. +*/ +template class DataType +{ +public: + typedef _Tp value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + + enum { generic_type = 1, depth = -1, channels = 1, fmt=0, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef bool value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef uchar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef schar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef schar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef ushort value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef short value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef int value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef float value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef double value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template class DataType > +{ +public: + typedef Vec<_Tp, cn> value_type; + typedef Vec::work_type, cn> work_type; + typedef _Tp channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = cn, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template class DataType > +{ +public: + typedef std::complex<_Tp> value_type; + typedef value_type work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Complex<_Tp> value_type; + typedef value_type work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Point_<_Tp> value_type; + typedef Point_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Point3_<_Tp> value_type; + typedef Point3_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 3, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Size_<_Tp> value_type; + typedef Size_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Rect_<_Tp> value_type; + typedef Rect_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 4, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Scalar_<_Tp> value_type; + typedef Scalar_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 4, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template<> class DataType +{ +public: + typedef Range value_type; + typedef value_type work_type; + typedef int channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + + +//////////////////// generic_type ref-counting pointer class for C/C++ objects //////////////////////// + +/*! + Smart pointer to dynamically allocated objects. + + This is template pointer-wrapping class that stores the associated reference counter along with the + object pointer. The class is similar to std::smart_ptr<> from the recent addons to the C++ standard, + but is shorter to write :) and self-contained (i.e. does add any dependency on the compiler or an external library). + + Basically, you can use "Ptr ptr" (or faster "const Ptr& ptr" for read-only access) + everywhere instead of "MyObjectType* ptr", where MyObjectType is some C structure or a C++ class. + To make it all work, you need to specialize Ptr<>::delete_obj(), like: + + \code + template<> void Ptr::delete_obj() { call_destructor_func(obj); } + \endcode + + \note{if MyObjectType is a C++ class with a destructor, you do not need to specialize delete_obj(), + since the default implementation calls "delete obj;"} + + \note{Another good property of the class is that the operations on the reference counter are atomic, + i.e. it is safe to use the class in multi-threaded applications} +*/ +template class CV_EXPORTS Ptr +{ +public: + //! empty constructor + Ptr(); + //! take ownership of the pointer. The associated reference counter is allocated and set to 1 + Ptr(_Tp* _obj); + //! calls release() + ~Ptr(); + //! copy constructor. Copies the members and calls addref() + Ptr(const Ptr& ptr); + //! copy operator. Calls ptr.addref() and release() before copying the members + Ptr& operator = (const Ptr& ptr); + //! increments the reference counter + void addref(); + //! decrements the reference counter. If it reaches 0, delete_obj() is called + void release(); + //! deletes the object. Override if needed + void delete_obj(); + //! returns true iff obj==NULL + bool empty() const; + + + //! helper operators making "Ptr ptr" use very similar to "T* ptr". + _Tp* operator -> (); + const _Tp* operator -> () const; + + operator _Tp* (); + operator const _Tp*() const; + +protected: + _Tp* obj; //< the object pointer. + int* refcount; //< the associated reference counter +}; + + +//////////////////////// Input/Output Array Arguments ///////////////////////////////// + +/*! + Proxy datatype for passing Mat's and vector<>'s as input parameters + */ +class CV_EXPORTS _InputArray +{ +public: + enum { KIND_SHIFT=16, NONE=0< _InputArray(const vector<_Tp>& vec); + template _InputArray(const vector >& vec); + _InputArray(const vector& vec); + template _InputArray(const Matx<_Tp, m, n>& matx); + _InputArray(const Scalar& s); + _InputArray(const double& val); + virtual Mat getMat(int i=-1) const; + virtual void getMatVector(vector& mv) const; + virtual int kind() const; + virtual Size size(int i=-1) const; + virtual size_t total(int i=-1) const; + virtual int type(int i=-1) const; + virtual int depth(int i=-1) const; + virtual int channels(int i=-1) const; + virtual bool empty() const; + + int flags; + void* obj; + Size sz; +}; + + +enum +{ + DEPTH_MASK_8U = 1 << CV_8U, + DEPTH_MASK_8S = 1 << CV_8S, + DEPTH_MASK_16U = 1 << CV_16U, + DEPTH_MASK_16S = 1 << CV_16S, + DEPTH_MASK_32S = 1 << CV_32S, + DEPTH_MASK_32F = 1 << CV_32F, + DEPTH_MASK_64F = 1 << CV_64F, + DEPTH_MASK_ALL = (DEPTH_MASK_64F<<1)-1, + DEPTH_MASK_ALL_BUT_8S = DEPTH_MASK_ALL & ~DEPTH_MASK_8S, + DEPTH_MASK_FLT = DEPTH_MASK_32F + DEPTH_MASK_64F +}; + + +/*! + Proxy datatype for passing Mat's and vector<>'s as input parameters + */ +class CV_EXPORTS _OutputArray : public _InputArray +{ +public: + _OutputArray(); + _OutputArray(Mat& m); + template _OutputArray(vector<_Tp>& vec); + template _OutputArray(vector >& vec); + _OutputArray(vector& vec); + template _OutputArray(Matx<_Tp, m, n>& matx); + virtual bool fixedSize() const; + virtual bool fixedType() const; + virtual bool needed() const; + virtual Mat& getMatRef(int i=-1) const; + virtual void create(Size sz, int type, int i=-1, bool allocateVector=false, int fixedDepthMask=0) const; + virtual void create(int rows, int cols, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + virtual void release() const; + virtual void clear() const; +}; + +typedef const _InputArray& InputArray; +typedef InputArray InputArrayOfArrays; +typedef const _OutputArray& OutputArray; +typedef OutputArray OutputArrayOfArrays; +typedef OutputArray InputOutputArray; +typedef OutputArray InputOutputArrayOfArrays; + +CV_EXPORTS OutputArray noArray(); + +/////////////////////////////////////// Mat /////////////////////////////////////////// + +enum { MAGIC_MASK=0xFFFF0000, TYPE_MASK=0x00000FFF, DEPTH_MASK=7 }; + +static inline size_t getElemSize(int type) { return CV_ELEM_SIZE(type); } + +/*! + Custom array allocator + +*/ +class CV_EXPORTS MatAllocator +{ +public: + MatAllocator() {} + virtual ~MatAllocator() {} + virtual void allocate(int dims, const int* sizes, int type, int*& refcount, + uchar*& datastart, uchar*& data, size_t* step) = 0; + virtual void deallocate(int* refcount, uchar* datastart, uchar* data) = 0; +}; + +/*! + The n-dimensional matrix class. + + The class represents an n-dimensional dense numerical array that can act as + a matrix, image, optical flow map, 3-focal tensor etc. + It is very similar to CvMat and CvMatND types from earlier versions of OpenCV, + and similarly to those types, the matrix can be multi-channel. It also fully supports ROI mechanism. + + There are many different ways to create cv::Mat object. Here are the some popular ones: +
    +
  • using cv::Mat::create(nrows, ncols, type) method or + the similar constructor cv::Mat::Mat(nrows, ncols, type[, fill_value]) constructor. + A new matrix of the specified size and specifed type will be allocated. + "type" has the same meaning as in cvCreateMat function, + e.g. CV_8UC1 means 8-bit single-channel matrix, CV_32FC2 means 2-channel (i.e. complex) + floating-point matrix etc: + + \code + // make 7x7 complex matrix filled with 1+3j. + cv::Mat M(7,7,CV_32FC2,Scalar(1,3)); + // and now turn M to 100x60 15-channel 8-bit matrix. + // The old content will be deallocated + M.create(100,60,CV_8UC(15)); + \endcode + + As noted in the introduction of this chapter, Mat::create() + will only allocate a new matrix when the current matrix dimensionality + or type are different from the specified. + +
  • by using a copy constructor or assignment operator, where on the right side it can + be a matrix or expression, see below. Again, as noted in the introduction, + matrix assignment is O(1) operation because it only copies the header + and increases the reference counter. cv::Mat::clone() method can be used to get a full + (a.k.a. deep) copy of the matrix when you need it. + +
  • by constructing a header for a part of another matrix. It can be a single row, single column, + several rows, several columns, rectangular region in the matrix (called a minor in algebra) or + a diagonal. Such operations are also O(1), because the new header will reference the same data. + You can actually modify a part of the matrix using this feature, e.g. + + \code + // add 5-th row, multiplied by 3 to the 3rd row + M.row(3) = M.row(3) + M.row(5)*3; + + // now copy 7-th column to the 1-st column + // M.col(1) = M.col(7); // this will not work + Mat M1 = M.col(1); + M.col(7).copyTo(M1); + + // create new 320x240 image + cv::Mat img(Size(320,240),CV_8UC3); + // select a roi + cv::Mat roi(img, Rect(10,10,100,100)); + // fill the ROI with (0,255,0) (which is green in RGB space); + // the original 320x240 image will be modified + roi = Scalar(0,255,0); + \endcode + + Thanks to the additional cv::Mat::datastart and cv::Mat::dataend members, it is possible to + compute the relative sub-matrix position in the main "container" matrix using cv::Mat::locateROI(): + + \code + Mat A = Mat::eye(10, 10, CV_32S); + // extracts A columns, 1 (inclusive) to 3 (exclusive). + Mat B = A(Range::all(), Range(1, 3)); + // extracts B rows, 5 (inclusive) to 9 (exclusive). + // that is, C ~ A(Range(5, 9), Range(1, 3)) + Mat C = B(Range(5, 9), Range::all()); + Size size; Point ofs; + C.locateROI(size, ofs); + // size will be (width=10,height=10) and the ofs will be (x=1, y=5) + \endcode + + As in the case of whole matrices, if you need a deep copy, use cv::Mat::clone() method + of the extracted sub-matrices. + +
  • by making a header for user-allocated-data. It can be useful for +
      +
    1. processing "foreign" data using OpenCV (e.g. when you implement + a DirectShow filter or a processing module for gstreamer etc.), e.g. + + \code + void process_video_frame(const unsigned char* pixels, + int width, int height, int step) + { + cv::Mat img(height, width, CV_8UC3, pixels, step); + cv::GaussianBlur(img, img, cv::Size(7,7), 1.5, 1.5); + } + \endcode + +
    2. for quick initialization of small matrices and/or super-fast element access + + \code + double m[3][3] = {{a, b, c}, {d, e, f}, {g, h, i}}; + cv::Mat M = cv::Mat(3, 3, CV_64F, m).inv(); + \endcode +
    + + partial yet very common cases of this "user-allocated data" case are conversions + from CvMat and IplImage to cv::Mat. For this purpose there are special constructors + taking pointers to CvMat or IplImage and the optional + flag indicating whether to copy the data or not. + + Backward conversion from cv::Mat to CvMat or IplImage is provided via cast operators + cv::Mat::operator CvMat() an cv::Mat::operator IplImage(). + The operators do not copy the data. + + + \code + IplImage* img = cvLoadImage("greatwave.jpg", 1); + Mat mtx(img); // convert IplImage* -> cv::Mat + CvMat oldmat = mtx; // convert cv::Mat -> CvMat + CV_Assert(oldmat.cols == img->width && oldmat.rows == img->height && + oldmat.data.ptr == (uchar*)img->imageData && oldmat.step == img->widthStep); + \endcode + +
  • by using MATLAB-style matrix initializers, cv::Mat::zeros(), cv::Mat::ones(), cv::Mat::eye(), e.g.: + + \code + // create a double-precision identity martix and add it to M. + M += Mat::eye(M.rows, M.cols, CV_64F); + \endcode + +
  • by using comma-separated initializer: + + \code + // create 3x3 double-precision identity matrix + Mat M = (Mat_(3,3) << 1, 0, 0, 0, 1, 0, 0, 0, 1); + \endcode + + here we first call constructor of cv::Mat_ class (that we describe further) with the proper matrix, + and then we just put "<<" operator followed by comma-separated values that can be constants, + variables, expressions etc. Also, note the extra parentheses that are needed to avoid compiler errors. + +
+ + Once matrix is created, it will be automatically managed by using reference-counting mechanism + (unless the matrix header is built on top of user-allocated data, + in which case you should handle the data by yourself). + The matrix data will be deallocated when no one points to it; + if you want to release the data pointed by a matrix header before the matrix destructor is called, + use cv::Mat::release(). + + The next important thing to learn about the matrix class is element access. Here is how the matrix is stored. + The elements are stored in row-major order (row by row). The cv::Mat::data member points to the first element of the first row, + cv::Mat::rows contains the number of matrix rows and cv::Mat::cols - the number of matrix columns. There is yet another member, + cv::Mat::step that is used to actually compute address of a matrix element. cv::Mat::step is needed because the matrix can be + a part of another matrix or because there can some padding space in the end of each row for a proper alignment. + + \image html roi.png + + Given these parameters, address of the matrix element M_{ij} is computed as following: + + addr(M_{ij})=M.data + M.step*i + j*M.elemSize() + + if you know the matrix element type, e.g. it is float, then you can use cv::Mat::at() method: + + addr(M_{ij})=&M.at(i,j) + + (where & is used to convert the reference returned by cv::Mat::at() to a pointer). + if you need to process a whole row of matrix, the most efficient way is to get + the pointer to the row first, and then just use plain C operator []: + + \code + // compute sum of positive matrix elements + // (assuming that M is double-precision matrix) + double sum=0; + for(int i = 0; i < M.rows; i++) + { + const double* Mi = M.ptr(i); + for(int j = 0; j < M.cols; j++) + sum += std::max(Mi[j], 0.); + } + \endcode + + Some operations, like the above one, do not actually depend on the matrix shape, + they just process elements of a matrix one by one (or elements from multiple matrices + that are sitting in the same place, e.g. matrix addition). Such operations are called + element-wise and it makes sense to check whether all the input/output matrices are continuous, + i.e. have no gaps in the end of each row, and if yes, process them as a single long row: + + \code + // compute sum of positive matrix elements, optimized variant + double sum=0; + int cols = M.cols, rows = M.rows; + if(M.isContinuous()) + { + cols *= rows; + rows = 1; + } + for(int i = 0; i < rows; i++) + { + const double* Mi = M.ptr(i); + for(int j = 0; j < cols; j++) + sum += std::max(Mi[j], 0.); + } + \endcode + in the case of continuous matrix the outer loop body will be executed just once, + so the overhead will be smaller, which will be especially noticeable in the case of small matrices. + + Finally, there are STL-style iterators that are smart enough to skip gaps between successive rows: + \code + // compute sum of positive matrix elements, iterator-based variant + double sum=0; + MatConstIterator_ it = M.begin(), it_end = M.end(); + for(; it != it_end; ++it) + sum += std::max(*it, 0.); + \endcode + + The matrix iterators are random-access iterators, so they can be passed + to any STL algorithm, including std::sort(). +*/ +class CV_EXPORTS Mat +{ +public: + //! default constructor + Mat(); + //! constructs 2D matrix of the specified size and type + // (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.) + Mat(int _rows, int _cols, int _type); + Mat(Size _size, int _type); + //! constucts 2D matrix and fills it with the specified value _s. + Mat(int _rows, int _cols, int _type, const Scalar& _s); + Mat(Size _size, int _type, const Scalar& _s); + + //! constructs n-dimensional matrix + Mat(int _ndims, const int* _sizes, int _type); + Mat(int _ndims, const int* _sizes, int _type, const Scalar& _s); + + //! copy constructor + Mat(const Mat& m); + //! constructor for matrix headers pointing to user-allocated data + Mat(int _rows, int _cols, int _type, void* _data, size_t _step=AUTO_STEP); + Mat(Size _size, int _type, void* _data, size_t _step=AUTO_STEP); + Mat(int _ndims, const int* _sizes, int _type, void* _data, const size_t* _steps=0); + + //! creates a matrix header for a part of the bigger matrix + Mat(const Mat& m, const Range& rowRange, const Range& colRange=Range::all()); + Mat(const Mat& m, const Rect& roi); + Mat(const Mat& m, const Range* ranges); + //! converts old-style CvMat to the new matrix; the data is not copied by default + Mat(const CvMat* m, bool copyData=false); + //! converts old-style CvMatND to the new matrix; the data is not copied by default + Mat(const CvMatND* m, bool copyData=false); + //! converts old-style IplImage to the new matrix; the data is not copied by default + Mat(const IplImage* img, bool copyData=false); + //! builds matrix from std::vector with or without copying the data + template explicit Mat(const vector<_Tp>& vec, bool copyData=false); + //! builds matrix from cv::Vec; the data is copied by default + template explicit Mat(const Vec<_Tp, n>& vec, + bool copyData=true); + //! builds matrix from cv::Matx; the data is copied by default + template explicit Mat(const Matx<_Tp, m, n>& mtx, + bool copyData=true); + //! builds matrix from a 2D point + template explicit Mat(const Point_<_Tp>& pt, bool copyData=true); + //! builds matrix from a 3D point + template explicit Mat(const Point3_<_Tp>& pt, bool copyData=true); + //! builds matrix from comma initializer + template explicit Mat(const MatCommaInitializer_<_Tp>& commaInitializer); + //! destructor - calls release() + ~Mat(); + //! assignment operators + Mat& operator = (const Mat& m); + Mat& operator = (const MatExpr& expr); + + //! returns a new matrix header for the specified row + Mat row(int y) const; + //! returns a new matrix header for the specified column + Mat col(int x) const; + //! ... for the specified row span + Mat rowRange(int startrow, int endrow) const; + Mat rowRange(const Range& r) const; + //! ... for the specified column span + Mat colRange(int startcol, int endcol) const; + Mat colRange(const Range& r) const; + //! ... for the specified diagonal + // (d=0 - the main diagonal, + // >0 - a diagonal from the lower half, + // <0 - a diagonal from the upper half) + Mat diag(int d=0) const; + //! constructs a square diagonal matrix which main diagonal is vector "d" + static Mat diag(const Mat& d); + + //! returns deep copy of the matrix, i.e. the data is copied + Mat clone() const; + //! copies the matrix content to "m". + // It calls m.create(this->size(), this->type()). + void copyTo( OutputArray m ) const; + //! copies those matrix elements to "m" that are marked with non-zero mask elements. + void copyTo( OutputArray m, InputArray mask ) const; + //! converts matrix to another datatype with optional scalng. See cvConvertScale. + void convertTo( OutputArray m, int rtype, double alpha=1, double beta=0 ) const; + + void assignTo( Mat& m, int type=-1 ) const; + + //! sets every matrix element to s + Mat& operator = (const Scalar& s); + //! sets some of the matrix elements to s, according to the mask + Mat& setTo(InputArray value, InputArray mask=noArray()); + //! creates alternative matrix header for the same data, with different + // number of channels and/or different number of rows. see cvReshape. + Mat reshape(int _cn, int _rows=0) const; + Mat reshape(int _cn, int _newndims, const int* _newsz) const; + + //! matrix transposition by means of matrix expressions + MatExpr t() const; + //! matrix inversion by means of matrix expressions + MatExpr inv(int method=DECOMP_LU) const; + //! per-element matrix multiplication by means of matrix expressions + MatExpr mul(InputArray m, double scale=1) const; + + //! computes cross-product of 2 3D vectors + Mat cross(InputArray m) const; + //! computes dot-product + double dot(InputArray m) const; + + //! Matlab-style matrix initialization + static MatExpr zeros(int rows, int cols, int type); + static MatExpr zeros(Size size, int type); + static MatExpr zeros(int ndims, const int* sz, int type); + static MatExpr ones(int rows, int cols, int type); + static MatExpr ones(Size size, int type); + static MatExpr ones(int ndims, const int* sz, int type); + static MatExpr eye(int rows, int cols, int type); + static MatExpr eye(Size size, int type); + + //! allocates new matrix data unless the matrix already has specified size and type. + // previous data is unreferenced if needed. + void create(int _rows, int _cols, int _type); + void create(Size _size, int _type); + void create(int _ndims, const int* _sizes, int _type); + + //! increases the reference counter; use with care to avoid memleaks + void addref(); + //! decreases reference counter; + // deallocates the data when reference counter reaches 0. + void release(); + + //! deallocates the matrix data + void deallocate(); + //! internal use function; properly re-allocates _size, _step arrays + void copySize(const Mat& m); + + //! reserves enough space to fit sz hyper-planes + void reserve(size_t sz); + //! resizes matrix to the specified number of hyper-planes + void resize(size_t sz); + //! resizes matrix to the specified number of hyper-planes; initializes the newly added elements + void resize(size_t sz, const Scalar& s); + //! internal function + void push_back_(const void* elem); + //! adds element to the end of 1d matrix (or possibly multiple elements when _Tp=Mat) + template void push_back(const _Tp& elem); + template void push_back(const Mat_<_Tp>& elem); + void push_back(const Mat& m); + //! removes several hyper-planes from bottom of the matrix + void pop_back(size_t nelems=1); + + //! locates matrix header within a parent matrix. See below + void locateROI( Size& wholeSize, Point& ofs ) const; + //! moves/resizes the current matrix ROI inside the parent matrix. + Mat& adjustROI( int dtop, int dbottom, int dleft, int dright ); + //! extracts a rectangular sub-matrix + // (this is a generalized form of row, rowRange etc.) + Mat operator()( Range rowRange, Range colRange ) const; + Mat operator()( const Rect& roi ) const; + Mat operator()( const Range* ranges ) const; + + //! converts header to CvMat; no data is copied + operator CvMat() const; + //! converts header to CvMatND; no data is copied + operator CvMatND() const; + //! converts header to IplImage; no data is copied + operator IplImage() const; + + template operator vector<_Tp>() const; + template operator Vec<_Tp, n>() const; + template operator Matx<_Tp, m, n>() const; + + //! returns true iff the matrix data is continuous + // (i.e. when there are no gaps between successive rows). + // similar to CV_IS_MAT_CONT(cvmat->type) + bool isContinuous() const; + + //! returns true if the matrix is a submatrix of another matrix + bool isSubmatrix() const; + + //! returns element size in bytes, + // similar to CV_ELEM_SIZE(cvmat->type) + size_t elemSize() const; + //! returns the size of element channel in bytes. + size_t elemSize1() const; + //! returns element type, similar to CV_MAT_TYPE(cvmat->type) + int type() const; + //! returns element type, similar to CV_MAT_DEPTH(cvmat->type) + int depth() const; + //! returns element type, similar to CV_MAT_CN(cvmat->type) + int channels() const; + //! returns step/elemSize1() + size_t step1(int i=0) const; + //! returns true if matrix data is NULL + bool empty() const; + //! returns the total number of matrix elements + size_t total() const; + + //! returns N if the matrix is 1-channel (N x ptdim) or ptdim-channel (1 x N) or (N x 1); negative number otherwise + int checkVector(int elemChannels, int depth=-1, bool requireContinuous=true) const; + + //! returns pointer to i0-th submatrix along the dimension #0 + uchar* ptr(int i0=0); + const uchar* ptr(int i0=0) const; + + //! returns pointer to (i0,i1) submatrix along the dimensions #0 and #1 + uchar* ptr(int i0, int i1); + const uchar* ptr(int i0, int i1) const; + + //! returns pointer to (i0,i1,i3) submatrix along the dimensions #0, #1, #2 + uchar* ptr(int i0, int i1, int i2); + const uchar* ptr(int i0, int i1, int i2) const; + + //! returns pointer to the matrix element + uchar* ptr(const int* idx); + //! returns read-only pointer to the matrix element + const uchar* ptr(const int* idx) const; + + template uchar* ptr(const Vec& idx); + template const uchar* ptr(const Vec& idx) const; + + //! template version of the above method + template _Tp* ptr(int i0=0); + template const _Tp* ptr(int i0=0) const; + + template _Tp* ptr(int i0, int i1); + template const _Tp* ptr(int i0, int i1) const; + + template _Tp* ptr(int i0, int i1, int i2); + template const _Tp* ptr(int i0, int i1, int i2) const; + + template _Tp* ptr(const int* idx); + template const _Tp* ptr(const int* idx) const; + + template _Tp* ptr(const Vec& idx); + template const _Tp* ptr(const Vec& idx) const; + + //! the same as above, with the pointer dereferencing + template _Tp& at(int i0=0); + template const _Tp& at(int i0=0) const; + + template _Tp& at(int i0, int i1); + template const _Tp& at(int i0, int i1) const; + + template _Tp& at(int i0, int i1, int i2); + template const _Tp& at(int i0, int i1, int i2) const; + + template _Tp& at(const int* idx); + template const _Tp& at(const int* idx) const; + + template _Tp& at(const Vec& idx); + template const _Tp& at(const Vec& idx) const; + + //! special versions for 2D arrays (especially convenient for referencing image pixels) + template _Tp& at(Point pt); + template const _Tp& at(Point pt) const; + + //! template methods for iteration over matrix elements. + // the iterators take care of skipping gaps in the end of rows (if any) + template MatIterator_<_Tp> begin(); + template MatIterator_<_Tp> end(); + template MatConstIterator_<_Tp> begin() const; + template MatConstIterator_<_Tp> end() const; + + enum { MAGIC_VAL=0x42FF0000, AUTO_STEP=0, CONTINUOUS_FLAG=CV_MAT_CONT_FLAG, SUBMATRIX_FLAG=CV_SUBMAT_FLAG }; + + /*! includes several bit-fields: + - the magic signature + - continuity flag + - depth + - number of channels + */ + int flags; + //! the matrix dimensionality, >= 2 + int dims; + //! the number of rows and columns or (-1, -1) when the matrix has more than 2 dimensions + int rows, cols; + //! pointer to the data + uchar* data; + + //! pointer to the reference counter; + // when matrix points to user-allocated data, the pointer is NULL + int* refcount; + + //! helper fields used in locateROI and adjustROI + uchar* datastart; + uchar* dataend; + uchar* datalimit; + + //! custom allocator + MatAllocator* allocator; + + struct CV_EXPORTS MSize + { + MSize(int* _p); + Size operator()() const; + const int& operator[](int i) const; + int& operator[](int i); + operator const int*() const; + bool operator == (const MSize& sz) const; + bool operator != (const MSize& sz) const; + + int* p; + }; + + struct CV_EXPORTS MStep + { + MStep(); + MStep(size_t s); + const size_t& operator[](int i) const; + size_t& operator[](int i); + operator size_t() const; + MStep& operator = (size_t s); + + size_t* p; + size_t buf[2]; + protected: + MStep& operator = (const MStep&); + }; + + MSize size; + MStep step; +}; + + +/*! + Random Number Generator + + The class implements RNG using Multiply-with-Carry algorithm +*/ +class CV_EXPORTS RNG +{ +public: + enum { UNIFORM=0, NORMAL=1 }; + + RNG(); + RNG(uint64 _state); + //! updates the state and returns the next 32-bit unsigned integer random number + unsigned next(); + + operator uchar(); + operator schar(); + operator ushort(); + operator short(); + operator unsigned(); + //! returns a random integer sampled uniformly from [0, N). + unsigned operator()(unsigned N); + unsigned operator ()(); + operator int(); + operator float(); + operator double(); + //! returns uniformly distributed integer random number from [a,b) range + int uniform(int a, int b); + //! returns uniformly distributed floating-point random number from [a,b) range + float uniform(float a, float b); + //! returns uniformly distributed double-precision floating-point random number from [a,b) range + double uniform(double a, double b); + void fill( InputOutputArray mat, int distType, InputArray a, InputArray b ); + //! returns Gaussian random variate with mean zero. + double gaussian(double sigma); + + uint64 state; +}; + + +/*! + Termination criteria in iterative algorithms + */ +class CV_EXPORTS TermCriteria +{ +public: + enum + { + COUNT=1, //!< the maximum number of iterations or elements to compute + MAX_ITER=COUNT, //!< ditto + EPS=2 //!< the desired accuracy or change in parameters at which the iterative algorithm stops + }; + + //! default constructor + TermCriteria(); + //! full constructor + TermCriteria(int _type, int _maxCount, double _epsilon); + //! conversion from CvTermCriteria + TermCriteria(const CvTermCriteria& criteria); + //! conversion from CvTermCriteria + operator CvTermCriteria() const; + + int type; //!< the type of termination criteria: COUNT, EPS or COUNT + EPS + int maxCount; // the maximum number of iterations/elements + double epsilon; // the desired accuracy +}; + + +//! swaps two matrices +CV_EXPORTS void swap(Mat& a, Mat& b); + +//! converts array (CvMat or IplImage) to cv::Mat +CV_EXPORTS Mat cvarrToMat(const CvArr* arr, bool copyData=false, + bool allowND=true, int coiMode=0); +//! extracts Channel of Interest from CvMat or IplImage and makes cv::Mat out of it. +CV_EXPORTS void extractImageCOI(const CvArr* arr, OutputArray coiimg, int coi=-1); +//! inserts single-channel cv::Mat into a multi-channel CvMat or IplImage +CV_EXPORTS void insertImageCOI(InputArray coiimg, CvArr* arr, int coi=-1); + +//! adds one matrix to another (dst = src1 + src2) +CV_EXPORTS_W void add(InputArray src1, InputArray src2, OutputArray dst, + InputArray mask=noArray(), int dtype=-1); +//! subtracts one matrix from another (dst = src1 - src2) +CV_EXPORTS_W void subtract(InputArray src1, InputArray src2, OutputArray dst, + InputArray mask=noArray(), int dtype=-1); + +//! computes element-wise weighted product of the two arrays (dst = scale*src1*src2) +CV_EXPORTS_W void multiply(InputArray src1, InputArray src2, + OutputArray dst, double scale=1, int dtype=-1); + +//! computes element-wise weighted quotient of the two arrays (dst = scale*src1/src2) +CV_EXPORTS_W void divide(InputArray src1, InputArray src2, OutputArray dst, + double scale=1, int dtype=-1); + +//! computes element-wise weighted reciprocal of an array (dst = scale/src2) +CV_EXPORTS_W void divide(double scale, InputArray src2, + OutputArray dst, int dtype=-1); + +//! adds scaled array to another one (dst = alpha*src1 + src2) +CV_EXPORTS_W void scaleAdd(InputArray src1, double alpha, InputArray src2, OutputArray dst); + +//! computes weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma) +CV_EXPORTS_W void addWeighted(InputArray src1, double alpha, InputArray src2, + double beta, double gamma, OutputArray dst, int dtype=-1); + +//! scales array elements, computes absolute values and converts the results to 8-bit unsigned integers: dst(i)=saturate_castabs(src(i)*alpha+beta) +CV_EXPORTS_W void convertScaleAbs(InputArray src, OutputArray dst, + double alpha=1, double beta=0); +//! transforms array of numbers using a lookup table: dst(i)=lut(src(i)) +CV_EXPORTS_W void LUT(InputArray src, InputArray lut, OutputArray dst, + int interpolation=0); + +//! computes sum of array elements +CV_EXPORTS_AS(sumElems) Scalar sum(InputArray src); +//! computes the number of nonzero array elements +CV_EXPORTS_W int countNonZero( InputArray src ); +//! computes mean value of selected array elements +CV_EXPORTS_W Scalar mean(InputArray src, InputArray mask=noArray()); +//! computes mean value and standard deviation of all or selected array elements +CV_EXPORTS_W void meanStdDev(InputArray src, OutputArray mean, OutputArray stddev, + InputArray mask=noArray()); +//! computes norm of the selected array part +CV_EXPORTS_W double norm(InputArray src1, int normType=NORM_L2, InputArray mask=noArray()); +//! computes norm of selected part of the difference between two arrays +CV_EXPORTS_W double norm(InputArray src1, InputArray src2, + int normType=NORM_L2, InputArray mask=noArray()); +//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values +CV_EXPORTS_W void normalize( InputArray src, OutputArray dst, double alpha=1, double beta=0, + int norm_type=NORM_L2, int dtype=-1, InputArray mask=noArray()); + +//! finds global minimum and maximum array elements and returns their values and their locations +CV_EXPORTS_W void minMaxLoc(InputArray src, CV_OUT double* minVal, + CV_OUT double* maxVal=0, CV_OUT Point* minLoc=0, + CV_OUT Point* maxLoc=0, InputArray mask=noArray()); +CV_EXPORTS void minMaxIdx(InputArray src, double* minVal, double* maxVal, + int* minIdx=0, int* maxIdx=0, InputArray mask=noArray()); + +//! transforms 2D matrix to 1D row or column vector by taking sum, minimum, maximum or mean value over all the rows +CV_EXPORTS_W void reduce(InputArray src, OutputArray dst, int dim, int rtype, int dtype=-1); + +//! makes multi-channel array out of several single-channel arrays +CV_EXPORTS void merge(const Mat* mv, size_t count, OutputArray dst); +//! makes multi-channel array out of several single-channel arrays +CV_EXPORTS_W void merge(const vector& mv, OutputArray dst); + +//! copies each plane of a multi-channel array to a dedicated array +CV_EXPORTS void split(const Mat& src, Mat* mvbegin); +//! copies each plane of a multi-channel array to a dedicated array +CV_EXPORTS_W void split(const Mat& m, CV_OUT vector& mv); + +//! copies selected channels from the input arrays to the selected channels of the output arrays +CV_EXPORTS void mixChannels(const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts, + const int* fromTo, size_t npairs); +CV_EXPORTS void mixChannels(const vector& src, vector& dst, + const int* fromTo, size_t npairs); +CV_EXPORTS_W void mixChannels(InputArrayOfArrays src, InputArrayOfArrays dst, + const vector& fromTo); + +//! extracts a single channel from src (coi is 0-based index) +CV_EXPORTS_W void extractChannel(InputArray src, OutputArray dst, int coi); + +//! inserts a single channel to dst (coi is 0-based index) +CV_EXPORTS_W void insertChannel(InputArray src, InputOutputArray dst, int coi); + +//! reverses the order of the rows, columns or both in a matrix +CV_EXPORTS_W void flip(InputArray src, OutputArray dst, int flipCode); + +//! replicates the input matrix the specified number of times in the horizontal and/or vertical direction +CV_EXPORTS_W void repeat(InputArray src, int ny, int nx, OutputArray dst); +CV_EXPORTS Mat repeat(const Mat& src, int ny, int nx); + +CV_EXPORTS void hconcat(const Mat* src, size_t nsrc, OutputArray dst); +CV_EXPORTS void hconcat(InputArray src1, InputArray src2, OutputArray dst); +CV_EXPORTS_W void hconcat(InputArray src, OutputArray dst); + +CV_EXPORTS void vconcat(const Mat* src, size_t nsrc, OutputArray dst); +CV_EXPORTS void vconcat(InputArray src1, InputArray src2, OutputArray dst); +CV_EXPORTS_W void vconcat(InputArray src, OutputArray dst); + +//! computes bitwise conjunction of the two arrays (dst = src1 & src2) +CV_EXPORTS_W void bitwise_and(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! computes bitwise disjunction of the two arrays (dst = src1 | src2) +CV_EXPORTS_W void bitwise_or(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! computes bitwise exclusive-or of the two arrays (dst = src1 ^ src2) +CV_EXPORTS_W void bitwise_xor(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! inverts each bit of array (dst = ~src) +CV_EXPORTS_W void bitwise_not(InputArray src, OutputArray dst, + InputArray mask=noArray()); +//! computes element-wise absolute difference of two arrays (dst = abs(src1 - src2)) +CV_EXPORTS_W void absdiff(InputArray src1, InputArray src2, OutputArray dst); +//! set mask elements for those array elements which are within the element-specific bounding box (dst = lowerb <= src && src < upperb) +CV_EXPORTS_W void inRange(InputArray src, InputArray lowerb, + InputArray upperb, OutputArray dst); +//! compares elements of two arrays (dst = src1 src2) +CV_EXPORTS_W void compare(InputArray src1, InputArray src2, OutputArray dst, int cmpop); +//! computes per-element minimum of two arrays (dst = min(src1, src2)) +CV_EXPORTS_W void min(InputArray src1, InputArray src2, OutputArray dst); +//! computes per-element maximum of two arrays (dst = max(src1, src2)) +CV_EXPORTS_W void max(InputArray src1, InputArray src2, OutputArray dst); + +//! computes per-element minimum of two arrays (dst = min(src1, src2)) +CV_EXPORTS void min(const Mat& src1, const Mat& src2, Mat& dst); +//! computes per-element minimum of array and scalar (dst = min(src1, src2)) +CV_EXPORTS void min(const Mat& src1, double src2, Mat& dst); +//! computes per-element maximum of two arrays (dst = max(src1, src2)) +CV_EXPORTS void max(const Mat& src1, const Mat& src2, Mat& dst); +//! computes per-element maximum of array and scalar (dst = max(src1, src2)) +CV_EXPORTS void max(const Mat& src1, double src2, Mat& dst); + +//! computes square root of each matrix element (dst = src**0.5) +CV_EXPORTS_W void sqrt(InputArray src, OutputArray dst); +//! raises the input matrix elements to the specified power (b = a**power) +CV_EXPORTS_W void pow(InputArray src, double power, OutputArray dst); +//! computes exponent of each matrix element (dst = e**src) +CV_EXPORTS_W void exp(InputArray src, OutputArray dst); +//! computes natural logarithm of absolute value of each matrix element: dst = log(abs(src)) +CV_EXPORTS_W void log(InputArray src, OutputArray dst); +//! computes cube root of the argument +CV_EXPORTS_W float cubeRoot(float val); +//! computes the angle in degrees (0..360) of the vector (x,y) +CV_EXPORTS_W float fastAtan2(float y, float x); +//! converts polar coordinates to Cartesian +CV_EXPORTS_W void polarToCart(InputArray magnitude, InputArray angle, + OutputArray x, OutputArray y, bool angleInDegrees=false); +//! converts Cartesian coordinates to polar +CV_EXPORTS_W void cartToPolar(InputArray x, InputArray y, + OutputArray magnitude, OutputArray angle, + bool angleInDegrees=false); +//! computes angle (angle(i)) of each (x(i), y(i)) vector +CV_EXPORTS_W void phase(InputArray x, InputArray y, OutputArray angle, + bool angleInDegrees=false); +//! computes magnitude (magnitude(i)) of each (x(i), y(i)) vector +CV_EXPORTS_W void magnitude(InputArray x, InputArray y, OutputArray magnitude); +//! checks that each matrix element is within the specified range. +CV_EXPORTS_W bool checkRange(InputArray a, bool quiet=true, CV_OUT Point* pt=0, + double minVal=-DBL_MAX, double maxVal=DBL_MAX); +//! implements generalized matrix product algorithm GEMM from BLAS +CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha, + InputArray src3, double gamma, OutputArray dst, int flags=0); +//! multiplies matrix by its transposition from the left or from the right +CV_EXPORTS_W void mulTransposed( InputArray src, OutputArray dst, bool aTa, + InputArray delta=noArray(), + double scale=1, int dtype=-1 ); +//! transposes the matrix +CV_EXPORTS_W void transpose(InputArray src, OutputArray dst); +//! performs affine transformation of each element of multi-channel input matrix +CV_EXPORTS_W void transform(InputArray src, OutputArray dst, InputArray m ); +//! performs perspective transformation of each element of multi-channel input matrix +CV_EXPORTS_W void perspectiveTransform(InputArray src, OutputArray dst, InputArray m ); + +//! extends the symmetrical matrix from the lower half or from the upper half +CV_EXPORTS_W void completeSymm(InputOutputArray mtx, bool lowerToUpper=false); +//! initializes scaled identity matrix +CV_EXPORTS_W void setIdentity(InputOutputArray mtx, const Scalar& s=Scalar(1)); +//! computes determinant of a square matrix +CV_EXPORTS_W double determinant(InputArray mtx); +//! computes trace of a matrix +CV_EXPORTS_W Scalar trace(InputArray mtx); +//! computes inverse or pseudo-inverse matrix +CV_EXPORTS_W double invert(InputArray src, OutputArray dst, int flags=DECOMP_LU); +//! solves linear system or a least-square problem +CV_EXPORTS_W bool solve(InputArray src1, InputArray src2, + OutputArray dst, int flags=DECOMP_LU); + +enum +{ + SORT_EVERY_ROW=0, + SORT_EVERY_COLUMN=1, + SORT_ASCENDING=0, + SORT_DESCENDING=16 +}; + +//! sorts independently each matrix row or each matrix column +CV_EXPORTS_W void sort(InputArray src, OutputArray dst, int flags); +//! sorts independently each matrix row or each matrix column +CV_EXPORTS_W void sortIdx(InputArray src, OutputArray dst, int flags); +//! finds real roots of a cubic polynomial +CV_EXPORTS_W int solveCubic(InputArray coeffs, OutputArray roots); +//! finds real and complex roots of a polynomial +CV_EXPORTS_W double solvePoly(InputArray coeffs, OutputArray roots, int maxIters=300); +//! finds eigenvalues of a symmetric matrix +CV_EXPORTS bool eigen(InputArray src, OutputArray eigenvalues, int lowindex=-1, + int highindex=-1); +//! finds eigenvalues and eigenvectors of a symmetric matrix +CV_EXPORTS bool eigen(InputArray src, OutputArray eigenvalues, + OutputArray eigenvectors, + int lowindex=-1, int highindex=-1); +CV_EXPORTS_W bool eigen(InputArray src, bool computeEigenvectors, + OutputArray eigenvalues, OutputArray eigenvectors); + +enum +{ + COVAR_SCRAMBLED=0, + COVAR_NORMAL=1, + COVAR_USE_AVG=2, + COVAR_SCALE=4, + COVAR_ROWS=8, + COVAR_COLS=16 +}; + +//! computes covariation matrix of a set of samples +CV_EXPORTS void calcCovarMatrix( const Mat* samples, int nsamples, Mat& covar, Mat& mean, + int flags, int ctype=CV_64F); +//! computes covariation matrix of a set of samples +CV_EXPORTS_W void calcCovarMatrix( InputArray samples, OutputArray covar, + OutputArray mean, int flags, int ctype=CV_64F); + +/*! + Principal Component Analysis + + The class PCA is used to compute the special basis for a set of vectors. + The basis will consist of eigenvectors of the covariance matrix computed + from the input set of vectors. After PCA is performed, vectors can be transformed from + the original high-dimensional space to the subspace formed by a few most + prominent eigenvectors (called the principal components), + corresponding to the largest eigenvalues of the covariation matrix. + Thus the dimensionality of the vector and the correlation between the coordinates is reduced. + + The following sample is the function that takes two matrices. The first one stores the set + of vectors (a row per vector) that is used to compute PCA, the second one stores another + "test" set of vectors (a row per vector) that are first compressed with PCA, + then reconstructed back and then the reconstruction error norm is computed and printed for each vector. + + \code + using namespace cv; + + PCA compressPCA(const Mat& pcaset, int maxComponents, + const Mat& testset, Mat& compressed) + { + PCA pca(pcaset, // pass the data + Mat(), // we do not have a pre-computed mean vector, + // so let the PCA engine to compute it + CV_PCA_DATA_AS_ROW, // indicate that the vectors + // are stored as matrix rows + // (use CV_PCA_DATA_AS_COL if the vectors are + // the matrix columns) + maxComponents // specify, how many principal components to retain + ); + // if there is no test data, just return the computed basis, ready-to-use + if( !testset.data ) + return pca; + CV_Assert( testset.cols == pcaset.cols ); + + compressed.create(testset.rows, maxComponents, testset.type()); + + Mat reconstructed; + for( int i = 0; i < testset.rows; i++ ) + { + Mat vec = testset.row(i), coeffs = compressed.row(i), reconstructed; + // compress the vector, the result will be stored + // in the i-th row of the output matrix + pca.project(vec, coeffs); + // and then reconstruct it + pca.backProject(coeffs, reconstructed); + // and measure the error + printf("%d. diff = %g\n", i, norm(vec, reconstructed, NORM_L2)); + } + return pca; + } + \endcode +*/ +class CV_EXPORTS PCA +{ +public: + //! default constructor + PCA(); + //! the constructor that performs PCA + PCA(InputArray data, InputArray mean, int flags, int maxComponents=0); + //! operator that performs PCA. The previously stored data, if any, is released + PCA& operator()(InputArray data, InputArray mean, int flags, int maxComponents=0); + //! projects vector from the original space to the principal components subspace + Mat project(InputArray vec) const; + //! projects vector from the original space to the principal components subspace + void project(InputArray vec, OutputArray result) const; + //! reconstructs the original vector from the projection + Mat backProject(InputArray vec) const; + //! reconstructs the original vector from the projection + void backProject(InputArray vec, OutputArray result) const; + + Mat eigenvectors; //!< eigenvectors of the covariation matrix + Mat eigenvalues; //!< eigenvalues of the covariation matrix + Mat mean; //!< mean value subtracted before the projection and added after the back projection +}; + +CV_EXPORTS_W void PCACompute(InputArray data, CV_OUT InputOutputArray mean, + OutputArray eigenvectors, int maxComponents=0); + +CV_EXPORTS_W void PCAProject(InputArray data, InputArray mean, + InputArray eigenvectors, OutputArray result); + +CV_EXPORTS_W void PCABackProject(InputArray data, InputArray mean, + InputArray eigenvectors, OutputArray result); + + +/*! + Singular Value Decomposition class + + The class is used to compute Singular Value Decomposition of a floating-point matrix and then + use it to solve least-square problems, under-determined linear systems, invert matrices, + compute condition numbers etc. + + For a bit faster operation you can pass flags=SVD::MODIFY_A|... to modify the decomposed matrix + when it is not necessarily to preserve it. If you want to compute condition number of a matrix + or absolute value of its determinant - you do not need SVD::u or SVD::vt, + so you can pass flags=SVD::NO_UV|... . Another flag SVD::FULL_UV indicates that the full-size SVD::u and SVD::vt + must be computed, which is not necessary most of the time. +*/ +class CV_EXPORTS SVD +{ +public: + enum { MODIFY_A=1, NO_UV=2, FULL_UV=4 }; + //! the default constructor + SVD(); + //! the constructor that performs SVD + SVD( InputArray src, int flags=0 ); + //! the operator that performs SVD. The previously allocated SVD::u, SVD::w are SVD::vt are released. + SVD& operator ()( InputArray src, int flags=0 ); + + //! decomposes matrix and stores the results to user-provided matrices + static void compute( InputArray src, OutputArray w, + OutputArray u, OutputArray vt, int flags=0 ); + //! computes singular values of a matrix + static void compute( InputArray src, OutputArray w, int flags=0 ); + //! performs back substitution + static void backSubst( InputArray w, InputArray u, + InputArray vt, InputArray rhs, + OutputArray dst ); + + template static void compute( const Matx<_Tp, m, n>& a, + Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt ); + template static void compute( const Matx<_Tp, m, n>& a, + Matx<_Tp, nm, 1>& w ); + template static void backSubst( const Matx<_Tp, nm, 1>& w, + const Matx<_Tp, m, nm>& u, const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, Matx<_Tp, n, nb>& dst ); + + //! finds dst = arg min_{|dst|=1} |m*dst| + static void solveZ( InputArray src, OutputArray dst ); + //! performs back substitution, so that dst is the solution or pseudo-solution of m*dst = rhs, where m is the decomposed matrix + void backSubst( InputArray rhs, OutputArray dst ) const; + + Mat u, w, vt; +}; + +//! computes SVD of src +CV_EXPORTS_W void SVDecomp( InputArray src, CV_OUT OutputArray w, + CV_OUT OutputArray u, CV_OUT OutputArray vt, int flags=0 ); + +//! performs back substitution for the previously computed SVD +CV_EXPORTS_W void SVBackSubst( InputArray w, InputArray u, InputArray vt, + InputArray rhs, CV_OUT OutputArray dst ); + +//! computes Mahalanobis distance between two vectors: sqrt((v1-v2)'*icovar*(v1-v2)), where icovar is the inverse covariation matrix +CV_EXPORTS_W double Mahalanobis(InputArray v1, InputArray v2, InputArray icovar); +//! a synonym for Mahalanobis +CV_EXPORTS double Mahalonobis(InputArray v1, InputArray v2, InputArray icovar); + +//! performs forward or inverse 1D or 2D Discrete Fourier Transformation +CV_EXPORTS_W void dft(InputArray src, OutputArray dst, int flags=0, int nonzeroRows=0); +//! performs inverse 1D or 2D Discrete Fourier Transformation +CV_EXPORTS_W void idft(InputArray src, OutputArray dst, int flags=0, int nonzeroRows=0); +//! performs forward or inverse 1D or 2D Discrete Cosine Transformation +CV_EXPORTS_W void dct(InputArray src, OutputArray dst, int flags=0); +//! performs inverse 1D or 2D Discrete Cosine Transformation +CV_EXPORTS_W void idct(InputArray src, OutputArray dst, int flags=0); +//! computes element-wise product of the two Fourier spectrums. The second spectrum can optionally be conjugated before the multiplication +CV_EXPORTS_W void mulSpectrums(InputArray a, InputArray b, OutputArray c, + int flags, bool conjB=false); +//! computes the minimal vector size vecsize1 >= vecsize so that the dft() of the vector of length vecsize1 can be computed efficiently +CV_EXPORTS_W int getOptimalDFTSize(int vecsize); + +/*! + Various k-Means flags +*/ +enum +{ + KMEANS_RANDOM_CENTERS=0, // Chooses random centers for k-Means initialization + KMEANS_PP_CENTERS=2, // Uses k-Means++ algorithm for initialization + KMEANS_USE_INITIAL_LABELS=1 // Uses the user-provided labels for K-Means initialization +}; +//! clusters the input data using k-Means algorithm +CV_EXPORTS_W double kmeans( InputArray data, int K, CV_OUT InputOutputArray bestLabels, + TermCriteria criteria, int attempts, + int flags, OutputArray centers=noArray() ); + +//! returns the thread-local Random number generator +CV_EXPORTS RNG& theRNG(); + +//! returns the next unifomly-distributed random number of the specified type +template static inline _Tp randu() { return (_Tp)theRNG(); } + +//! fills array with uniformly-distributed random numbers from the range [low, high) +CV_EXPORTS_W void randu(InputOutputArray dst, InputArray low, InputArray high); + +//! fills array with normally-distributed random numbers with the specified mean and the standard deviation +CV_EXPORTS_W void randn(InputOutputArray dst, InputArray mean, InputArray stddev); + +//! shuffles the input array elements +CV_EXPORTS void randShuffle(InputOutputArray dst, double iterFactor=1., RNG* rng=0); +CV_EXPORTS_AS(randShuffle) void randShuffle_(InputOutputArray dst, double iterFactor=1.); + +//! draws the line segment (pt1, pt2) in the image +CV_EXPORTS_W void line(Mat& img, Point pt1, Point pt2, const Scalar& color, + int thickness=1, int lineType=8, int shift=0); + +//! draws the rectangle outline or a solid rectangle with the opposite corners pt1 and pt2 in the image +CV_EXPORTS_W void rectangle(Mat& img, Point pt1, Point pt2, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws the rectangle outline or a solid rectangle covering rec in the image +CV_EXPORTS void rectangle(Mat& img, Rect rec, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws the circle outline or a solid circle in the image +CV_EXPORTS_W void circle(Mat& img, Point center, int radius, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws an elliptic arc, ellipse sector or a rotated ellipse in the image +CV_EXPORTS_W void ellipse(Mat& img, Point center, Size axes, + double angle, double startAngle, double endAngle, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws a rotated ellipse in the image +CV_EXPORTS_W void ellipse(Mat& img, const RotatedRect& box, const Scalar& color, + int thickness=1, int lineType=8); + +//! draws a filled convex polygon in the image +CV_EXPORTS void fillConvexPoly(Mat& img, const Point* pts, int npts, + const Scalar& color, int lineType=8, + int shift=0); +CV_EXPORTS_W void fillConvexPoly(InputOutputArray img, InputArray points, + const Scalar& color, int lineType=8, + int shift=0); + +//! fills an area bounded by one or more polygons +CV_EXPORTS void fillPoly(Mat& img, const Point** pts, + const int* npts, int ncontours, + const Scalar& color, int lineType=8, int shift=0, + Point offset=Point() ); + +CV_EXPORTS_W void fillPoly(InputOutputArray img, InputArrayOfArrays pts, + const Scalar& color, int lineType=8, int shift=0, + Point offset=Point() ); + +//! draws one or more polygonal curves +CV_EXPORTS void polylines(Mat& img, const Point** pts, const int* npts, + int ncontours, bool isClosed, const Scalar& color, + int thickness=1, int lineType=8, int shift=0 ); + +CV_EXPORTS_W void polylines(InputOutputArray img, InputArrayOfArrays pts, + bool isClosed, const Scalar& color, + int thickness=1, int lineType=8, int shift=0 ); + +//! clips the line segment by the rectangle Rect(0, 0, imgSize.width, imgSize.height) +CV_EXPORTS bool clipLine(Size imgSize, CV_IN_OUT Point& pt1, CV_IN_OUT Point& pt2); + +//! clips the line segment by the rectangle imgRect +CV_EXPORTS_W bool clipLine(Rect imgRect, CV_OUT CV_IN_OUT Point& pt1, CV_OUT CV_IN_OUT Point& pt2); + +/*! + Line iterator class + + The class is used to iterate over all the pixels on the raster line + segment connecting two specified points. +*/ +class CV_EXPORTS LineIterator +{ +public: + //! intializes the iterator + LineIterator( const Mat& img, Point pt1, Point pt2, + int connectivity=8, bool leftToRight=false ); + //! returns pointer to the current pixel + uchar* operator *(); + //! prefix increment operator (++it). shifts iterator to the next pixel + LineIterator& operator ++(); + //! postfix increment operator (it++). shifts iterator to the next pixel + LineIterator operator ++(int); + //! returns coordinates of the current pixel + Point pos() const; + + uchar* ptr; + const uchar* ptr0; + int step, elemSize; + int err, count; + int minusDelta, plusDelta; + int minusStep, plusStep; +}; + +//! converts elliptic arc to a polygonal curve +CV_EXPORTS_W void ellipse2Poly( Point center, Size axes, int angle, + int arcStart, int arcEnd, int delta, + CV_OUT vector& pts ); + +enum +{ + FONT_HERSHEY_SIMPLEX = 0, + FONT_HERSHEY_PLAIN = 1, + FONT_HERSHEY_DUPLEX = 2, + FONT_HERSHEY_COMPLEX = 3, + FONT_HERSHEY_TRIPLEX = 4, + FONT_HERSHEY_COMPLEX_SMALL = 5, + FONT_HERSHEY_SCRIPT_SIMPLEX = 6, + FONT_HERSHEY_SCRIPT_COMPLEX = 7, + FONT_ITALIC = 16 +}; + +//! renders text string in the image +CV_EXPORTS_W void putText( Mat& img, const string& text, Point org, + int fontFace, double fontScale, Scalar color, + int thickness=1, int linetype=8, + bool bottomLeftOrigin=false ); + +//! returns bounding box of the text string +CV_EXPORTS_W Size getTextSize(const string& text, int fontFace, + double fontScale, int thickness, + CV_OUT int* baseLine); + +///////////////////////////////// Mat_<_Tp> //////////////////////////////////// + +/*! + Template matrix class derived from Mat + + The class Mat_ is a "thin" template wrapper on top of cv::Mat. It does not have any extra data fields, + nor it or cv::Mat have any virtual methods and thus references or pointers to these two classes + can be safely converted one to another. But do it with care, for example: + + \code + // create 100x100 8-bit matrix + Mat M(100,100,CV_8U); + // this will compile fine. no any data conversion will be done. + Mat_& M1 = (Mat_&)M; + // the program will likely crash at the statement below + M1(99,99) = 1.f; + \endcode + + While cv::Mat is sufficient in most cases, cv::Mat_ can be more convenient if you use a lot of element + access operations and if you know matrix type at compile time. + Note that cv::Mat::at<_Tp>(int y, int x) and cv::Mat_<_Tp>::operator ()(int y, int x) do absolutely the + same thing and run at the same speed, but the latter is certainly shorter: + + \code + Mat_ M(20,20); + for(int i = 0; i < M.rows; i++) + for(int j = 0; j < M.cols; j++) + M(i,j) = 1./(i+j+1); + Mat E, V; + eigen(M,E,V); + cout << E.at(0,0)/E.at(M.rows-1,0); + \endcode + + It is easy to use Mat_ for multi-channel images/matrices - just pass cv::Vec as cv::Mat_ template parameter: + + \code + // allocate 320x240 color image and fill it with green (in RGB space) + Mat_ img(240, 320, Vec3b(0,255,0)); + // now draw a diagonal white line + for(int i = 0; i < 100; i++) + img(i,i)=Vec3b(255,255,255); + // and now modify the 2nd (red) channel of each pixel + for(int i = 0; i < img.rows; i++) + for(int j = 0; j < img.cols; j++) + img(i,j)[2] ^= (uchar)(i ^ j); // img(y,x)[c] accesses c-th channel of the pixel (x,y) + \endcode +*/ +template class CV_EXPORTS Mat_ : public Mat +{ +public: + typedef _Tp value_type; + typedef typename DataType<_Tp>::channel_type channel_type; + typedef MatIterator_<_Tp> iterator; + typedef MatConstIterator_<_Tp> const_iterator; + + //! default constructor + Mat_(); + //! equivalent to Mat(_rows, _cols, DataType<_Tp>::type) + Mat_(int _rows, int _cols); + //! constructor that sets each matrix element to specified value + Mat_(int _rows, int _cols, const _Tp& value); + //! equivalent to Mat(_size, DataType<_Tp>::type) + explicit Mat_(Size _size); + //! constructor that sets each matrix element to specified value + Mat_(Size _size, const _Tp& value); + //! n-dim array constructor + Mat_(int _ndims, const int* _sizes); + //! n-dim array constructor that sets each matrix element to specified value + Mat_(int _ndims, const int* _sizes, const _Tp& value); + //! copy/conversion contructor. If m is of different type, it's converted + Mat_(const Mat& m); + //! copy constructor + Mat_(const Mat_& m); + //! constructs a matrix on top of user-allocated data. step is in bytes(!!!), regardless of the type + Mat_(int _rows, int _cols, _Tp* _data, size_t _step=AUTO_STEP); + //! constructs n-dim matrix on top of user-allocated data. steps are in bytes(!!!), regardless of the type + Mat_(int _ndims, const int* _sizes, _Tp* _data, const size_t* _steps=0); + //! selects a submatrix + Mat_(const Mat_& m, const Range& rowRange, const Range& colRange=Range::all()); + //! selects a submatrix + Mat_(const Mat_& m, const Rect& roi); + //! selects a submatrix, n-dim version + Mat_(const Mat_& m, const Range* ranges); + //! from a matrix expression + explicit Mat_(const MatExpr& e); + //! makes a matrix out of Vec, std::vector, Point_ or Point3_. The matrix will have a single column + explicit Mat_(const vector<_Tp>& vec, bool copyData=false); + template explicit Mat_(const Vec::channel_type, n>& vec, bool copyData=true); + template explicit Mat_(const Matx::channel_type, m, n>& mtx, bool copyData=true); + explicit Mat_(const Point_::channel_type>& pt, bool copyData=true); + explicit Mat_(const Point3_::channel_type>& pt, bool copyData=true); + explicit Mat_(const MatCommaInitializer_<_Tp>& commaInitializer); + + Mat_& operator = (const Mat& m); + Mat_& operator = (const Mat_& m); + //! set all the elements to s. + Mat_& operator = (const _Tp& s); + //! assign a matrix expression + Mat_& operator = (const MatExpr& e); + + //! iterators; they are smart enough to skip gaps in the end of rows + iterator begin(); + iterator end(); + const_iterator begin() const; + const_iterator end() const; + + //! equivalent to Mat::create(_rows, _cols, DataType<_Tp>::type) + void create(int _rows, int _cols); + //! equivalent to Mat::create(_size, DataType<_Tp>::type) + void create(Size _size); + //! equivalent to Mat::create(_ndims, _sizes, DatType<_Tp>::type) + void create(int _ndims, const int* _sizes); + //! cross-product + Mat_ cross(const Mat_& m) const; + //! data type conversion + template operator Mat_() const; + //! overridden forms of Mat::row() etc. + Mat_ row(int y) const; + Mat_ col(int x) const; + Mat_ diag(int d=0) const; + Mat_ clone() const; + + //! overridden forms of Mat::elemSize() etc. + size_t elemSize() const; + size_t elemSize1() const; + int type() const; + int depth() const; + int channels() const; + size_t step1(int i=0) const; + //! returns step()/sizeof(_Tp) + size_t stepT(int i=0) const; + + //! overridden forms of Mat::zeros() etc. Data type is omitted, of course + static MatExpr zeros(int rows, int cols); + static MatExpr zeros(Size size); + static MatExpr zeros(int _ndims, const int* _sizes); + static MatExpr ones(int rows, int cols); + static MatExpr ones(Size size); + static MatExpr ones(int _ndims, const int* _sizes); + static MatExpr eye(int rows, int cols); + static MatExpr eye(Size size); + + //! some more overriden methods + Mat_ reshape(int _rows) const; + Mat_& adjustROI( int dtop, int dbottom, int dleft, int dright ); + Mat_ operator()( const Range& rowRange, const Range& colRange ) const; + Mat_ operator()( const Rect& roi ) const; + Mat_ operator()( const Range* ranges ) const; + + //! more convenient forms of row and element access operators + _Tp* operator [](int y); + const _Tp* operator [](int y) const; + + //! returns reference to the specified element + _Tp& operator ()(const int* idx); + //! returns read-only reference to the specified element + const _Tp& operator ()(const int* idx) const; + + //! returns reference to the specified element + template _Tp& operator ()(const Vec& idx); + //! returns read-only reference to the specified element + template const _Tp& operator ()(const Vec& idx) const; + + //! returns reference to the specified element (1D case) + _Tp& operator ()(int idx0); + //! returns read-only reference to the specified element (1D case) + const _Tp& operator ()(int idx0) const; + //! returns reference to the specified element (2D case) + _Tp& operator ()(int idx0, int idx1); + //! returns read-only reference to the specified element (2D case) + const _Tp& operator ()(int idx0, int idx1) const; + //! returns reference to the specified element (3D case) + _Tp& operator ()(int idx0, int idx1, int idx2); + //! returns read-only reference to the specified element (3D case) + const _Tp& operator ()(int idx0, int idx1, int idx2) const; + + _Tp& operator ()(Point pt); + const _Tp& operator ()(Point pt) const; + + //! conversion to vector. + operator vector<_Tp>() const; + //! conversion to Vec + template operator Vec::channel_type, n>() const; + //! conversion to Matx + template operator Matx::channel_type, m, n>() const; +}; + +typedef Mat_ Mat1b; +typedef Mat_ Mat2b; +typedef Mat_ Mat3b; +typedef Mat_ Mat4b; + +typedef Mat_ Mat1s; +typedef Mat_ Mat2s; +typedef Mat_ Mat3s; +typedef Mat_ Mat4s; + +typedef Mat_ Mat1w; +typedef Mat_ Mat2w; +typedef Mat_ Mat3w; +typedef Mat_ Mat4w; + +typedef Mat_ Mat1i; +typedef Mat_ Mat2i; +typedef Mat_ Mat3i; +typedef Mat_ Mat4i; + +typedef Mat_ Mat1f; +typedef Mat_ Mat2f; +typedef Mat_ Mat3f; +typedef Mat_ Mat4f; + +typedef Mat_ Mat1d; +typedef Mat_ Mat2d; +typedef Mat_ Mat3d; +typedef Mat_ Mat4d; + +//////////// Iterators & Comma initializers ////////////////// + +class CV_EXPORTS MatConstIterator +{ +public: + typedef uchar* value_type; + typedef ptrdiff_t difference_type; + typedef const uchar** pointer; + typedef uchar* reference; + typedef std::random_access_iterator_tag iterator_category; + + //! default constructor + MatConstIterator(); + //! constructor that sets the iterator to the beginning of the matrix + MatConstIterator(const Mat* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, const int* _idx); + //! copy constructor + MatConstIterator(const MatConstIterator& it); + + //! copy operator + MatConstIterator& operator = (const MatConstIterator& it); + //! returns the current matrix element + uchar* operator *() const; + //! returns the i-th matrix element, relative to the current + uchar* operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatConstIterator& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatConstIterator& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatConstIterator& operator --(); + //! decrements the iterator + MatConstIterator operator --(int); + //! increments the iterator + MatConstIterator& operator ++(); + //! increments the iterator + MatConstIterator operator ++(int); + //! returns the current iterator position + Point pos() const; + //! returns the current iterator position + void pos(int* _idx) const; + ptrdiff_t lpos() const; + void seek(ptrdiff_t ofs, bool relative=false); + void seek(const int* _idx, bool relative=false); + + const Mat* m; + size_t elemSize; + uchar* ptr; + uchar* sliceStart; + uchar* sliceEnd; +}; + +/*! + Matrix read-only iterator + + */ +template +class CV_EXPORTS MatConstIterator_ : public MatConstIterator +{ +public: + typedef _Tp value_type; + typedef ptrdiff_t difference_type; + typedef const _Tp* pointer; + typedef const _Tp& reference; + typedef std::random_access_iterator_tag iterator_category; + + //! default constructor + MatConstIterator_(); + //! constructor that sets the iterator to the beginning of the matrix + MatConstIterator_(const Mat_<_Tp>* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, const int* _idx); + //! copy constructor + MatConstIterator_(const MatConstIterator_& it); + + //! copy operator + MatConstIterator_& operator = (const MatConstIterator_& it); + //! returns the current matrix element + _Tp operator *() const; + //! returns the i-th matrix element, relative to the current + _Tp operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatConstIterator_& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatConstIterator_& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatConstIterator_& operator --(); + //! decrements the iterator + MatConstIterator_ operator --(int); + //! increments the iterator + MatConstIterator_& operator ++(); + //! increments the iterator + MatConstIterator_ operator ++(int); + //! returns the current iterator position + Point pos() const; +}; + + +/*! + Matrix read-write iterator + +*/ +template +class CV_EXPORTS MatIterator_ : public MatConstIterator_<_Tp> +{ +public: + typedef _Tp* pointer; + typedef _Tp& reference; + typedef std::random_access_iterator_tag iterator_category; + + //! the default constructor + MatIterator_(); + //! constructor that sets the iterator to the beginning of the matrix + MatIterator_(Mat_<_Tp>* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(Mat_<_Tp>* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(const Mat_<_Tp>* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(const Mat_<_Tp>* _m, const int* _idx); + //! copy constructor + MatIterator_(const MatIterator_& it); + //! copy operator + MatIterator_& operator = (const MatIterator_<_Tp>& it ); + + //! returns the current matrix element + _Tp& operator *() const; + //! returns the i-th matrix element, relative to the current + _Tp& operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatIterator_& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatIterator_& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatIterator_& operator --(); + //! decrements the iterator + MatIterator_ operator --(int); + //! increments the iterator + MatIterator_& operator ++(); + //! increments the iterator + MatIterator_ operator ++(int); +}; + +template class CV_EXPORTS MatOp_Iter_; + +/*! + Comma-separated Matrix Initializer + + The class instances are usually not created explicitly. + Instead, they are created on "matrix << firstValue" operator. + + The sample below initializes 2x2 rotation matrix: + + \code + double angle = 30, a = cos(angle*CV_PI/180), b = sin(angle*CV_PI/180); + Mat R = (Mat_(2,2) << a, -b, b, a); + \endcode +*/ +template class CV_EXPORTS MatCommaInitializer_ +{ +public: + //! the constructor, created by "matrix << firstValue" operator, where matrix is cv::Mat + MatCommaInitializer_(Mat_<_Tp>* _m); + //! the operator that takes the next value and put it to the matrix + template MatCommaInitializer_<_Tp>& operator , (T2 v); + //! another form of conversion operator + Mat_<_Tp> operator *() const; + operator Mat_<_Tp>() const; +protected: + MatIterator_<_Tp> it; +}; + + +template class CV_EXPORTS MatxCommaInitializer +{ +public: + MatxCommaInitializer(Matx<_Tp, m, n>* _mtx); + template MatxCommaInitializer<_Tp, m, n>& operator , (T2 val); + Matx<_Tp, m, n> operator *() const; + + Matx<_Tp, m, n>* dst; + int idx; +}; + +template class CV_EXPORTS VecCommaInitializer : public MatxCommaInitializer<_Tp, m, 1> +{ +public: + VecCommaInitializer(Vec<_Tp, m>* _vec); + template VecCommaInitializer<_Tp, m>& operator , (T2 val); + Vec<_Tp, m> operator *() const; +}; + +/*! + Automatically Allocated Buffer Class + + The class is used for temporary buffers in functions and methods. + If a temporary buffer is usually small (a few K's of memory), + but its size depends on the parameters, it makes sense to create a small + fixed-size array on stack and use it if it's large enough. If the required buffer size + is larger than the fixed size, another buffer of sufficient size is allocated dynamically + and released after the processing. Therefore, in typical cases, when the buffer size is small, + there is no overhead associated with malloc()/free(). + At the same time, there is no limit on the size of processed data. + + This is what AutoBuffer does. The template takes 2 parameters - type of the buffer elements and + the number of stack-allocated elements. Here is how the class is used: + + \code + void my_func(const cv::Mat& m) + { + cv::AutoBuffer buf; // create automatic buffer containing 1000 floats + + buf.allocate(m.rows); // if m.rows <= 1000, the pre-allocated buffer is used, + // otherwise the buffer of "m.rows" floats will be allocated + // dynamically and deallocated in cv::AutoBuffer destructor + ... + } + \endcode +*/ +template class CV_EXPORTS AutoBuffer +{ +public: + typedef _Tp value_type; + enum { buffer_padding = (int)((16 + sizeof(_Tp) - 1)/sizeof(_Tp)) }; + + //! the default contructor + AutoBuffer(); + //! constructor taking the real buffer size + AutoBuffer(size_t _size); + //! destructor. calls deallocate() + ~AutoBuffer(); + + //! allocates the new buffer of size _size. if the _size is small enough, stack-allocated buffer is used + void allocate(size_t _size); + //! deallocates the buffer if it was dynamically allocated + void deallocate(); + //! returns pointer to the real buffer, stack-allocated or head-allocated + operator _Tp* (); + //! returns read-only pointer to the real buffer, stack-allocated or head-allocated + operator const _Tp* () const; + +protected: + //! pointer to the real buffer, can point to buf if the buffer is small enough + _Tp* ptr; + //! size of the real buffer + size_t size; + //! pre-allocated buffer + _Tp buf[fixed_size+buffer_padding]; +}; + +/////////////////////////// multi-dimensional dense matrix ////////////////////////// + +/*! + n-Dimensional Dense Matrix Iterator Class. + + The class cv::NAryMatIterator is used for iterating over one or more n-dimensional dense arrays (cv::Mat's). + + The iterator is completely different from cv::Mat_ and cv::SparseMat_ iterators. + It iterates through the slices (or planes), not the elements, where "slice" is a continuous part of the arrays. + + Here is the example on how the iterator can be used to normalize 3D histogram: + + \code + void normalizeColorHist(Mat& hist) + { + #if 1 + // intialize iterator (the style is different from STL). + // after initialization the iterator will contain + // the number of slices or planes + // the iterator will go through + Mat* arrays[] = { &hist, 0 }; + Mat planes[1]; + NAryMatIterator it(arrays, planes); + double s = 0; + // iterate through the matrix. on each iteration + // it.planes[i] (of type Mat) will be set to the current plane of + // i-th n-dim matrix passed to the iterator constructor. + for(int p = 0; p < it.nplanes; p++, ++it) + s += sum(it.planes[0])[0]; + it = NAryMatIterator(hist); + s = 1./s; + for(int p = 0; p < it.nplanes; p++, ++it) + it.planes[0] *= s; + #elif 1 + // this is a shorter implementation of the above + // using built-in operations on Mat + double s = sum(hist)[0]; + hist.convertTo(hist, hist.type(), 1./s, 0); + #else + // and this is even shorter one + // (assuming that the histogram elements are non-negative) + normalize(hist, hist, 1, 0, NORM_L1); + #endif + } + \endcode + + You can iterate through several matrices simultaneously as long as they have the same geometry + (dimensionality and all the dimension sizes are the same), which is useful for binary + and n-ary operations on such matrices. Just pass those matrices to cv::MatNDIterator. + Then, during the iteration it.planes[0], it.planes[1], ... will + be the slices of the corresponding matrices +*/ +class CV_EXPORTS NAryMatIterator +{ +public: + //! the default constructor + NAryMatIterator(); + //! the full constructor taking arbitrary number of n-dim matrices + NAryMatIterator(const Mat** arrays, uchar** ptrs, int narrays=-1); + //! the full constructor taking arbitrary number of n-dim matrices + NAryMatIterator(const Mat** arrays, Mat* planes, int narrays=-1); + //! the separate iterator initialization method + void init(const Mat** arrays, Mat* planes, uchar** ptrs, int narrays=-1); + + //! proceeds to the next plane of every iterated matrix + NAryMatIterator& operator ++(); + //! proceeds to the next plane of every iterated matrix (postfix increment operator) + NAryMatIterator operator ++(int); + + //! the iterated arrays + const Mat** arrays; + //! the current planes + Mat* planes; + //! data pointers + uchar** ptrs; + //! the number of arrays + int narrays; + //! the number of hyper-planes that the iterator steps through + size_t nplanes; + //! the size of each segment (in elements) + size_t size; +protected: + int iterdepth; + size_t idx; +}; + +//typedef NAryMatIterator NAryMatNDIterator; + +typedef void (*ConvertData)(const void* from, void* to, int cn); +typedef void (*ConvertScaleData)(const void* from, void* to, int cn, double alpha, double beta); + +//! returns the function for converting pixels from one data type to another +CV_EXPORTS ConvertData getConvertElem(int fromType, int toType); +//! returns the function for converting pixels from one data type to another with the optional scaling +CV_EXPORTS ConvertScaleData getConvertScaleElem(int fromType, int toType); + + +/////////////////////////// multi-dimensional sparse matrix ////////////////////////// + +class SparseMatIterator; +class SparseMatConstIterator; +template class SparseMatIterator_; +template class SparseMatConstIterator_; + +/*! + Sparse matrix class. + + The class represents multi-dimensional sparse numerical arrays. Such a sparse array can store elements + of any type that cv::Mat is able to store. "Sparse" means that only non-zero elements + are stored (though, as a result of some operations on a sparse matrix, some of its stored elements + can actually become 0. It's user responsibility to detect such elements and delete them using cv::SparseMat::erase(). + The non-zero elements are stored in a hash table that grows when it's filled enough, + so that the search time remains O(1) in average. Elements can be accessed using the following methods: + +
    +
  1. Query operations: cv::SparseMat::ptr() and the higher-level cv::SparseMat::ref(), + cv::SparseMat::value() and cv::SparseMat::find, for example: + \code + const int dims = 5; + int size[] = {10, 10, 10, 10, 10}; + SparseMat sparse_mat(dims, size, CV_32F); + for(int i = 0; i < 1000; i++) + { + int idx[dims]; + for(int k = 0; k < dims; k++) + idx[k] = rand()%sparse_mat.size(k); + sparse_mat.ref(idx) += 1.f; + } + \endcode + +
  2. Sparse matrix iterators. Like cv::Mat iterators and unlike cv::Mat iterators, the sparse matrix iterators are STL-style, + that is, the iteration is done as following: + \code + // prints elements of a sparse floating-point matrix and the sum of elements. + SparseMatConstIterator_ + it = sparse_mat.begin(), + it_end = sparse_mat.end(); + double s = 0; + int dims = sparse_mat.dims(); + for(; it != it_end; ++it) + { + // print element indices and the element value + const Node* n = it.node(); + printf("(") + for(int i = 0; i < dims; i++) + printf("%3d%c", n->idx[i], i < dims-1 ? ',' : ')'); + printf(": %f\n", *it); + s += *it; + } + printf("Element sum is %g\n", s); + \endcode + If you run this loop, you will notice that elements are enumerated + in no any logical order (lexicographical etc.), + they come in the same order as they stored in the hash table, i.e. semi-randomly. + + You may collect pointers to the nodes and sort them to get the proper ordering. + Note, however, that pointers to the nodes may become invalid when you add more + elements to the matrix; this is because of possible buffer reallocation. + +
  3. A combination of the above 2 methods when you need to process 2 or more sparse + matrices simultaneously, e.g. this is how you can compute unnormalized + cross-correlation of the 2 floating-point sparse matrices: + \code + double crossCorr(const SparseMat& a, const SparseMat& b) + { + const SparseMat *_a = &a, *_b = &b; + // if b contains less elements than a, + // it's faster to iterate through b + if(_a->nzcount() > _b->nzcount()) + std::swap(_a, _b); + SparseMatConstIterator_ it = _a->begin(), + it_end = _a->end(); + double ccorr = 0; + for(; it != it_end; ++it) + { + // take the next element from the first matrix + float avalue = *it; + const Node* anode = it.node(); + // and try to find element with the same index in the second matrix. + // since the hash value depends only on the element index, + // we reuse hashvalue stored in the node + float bvalue = _b->value(anode->idx,&anode->hashval); + ccorr += avalue*bvalue; + } + return ccorr; + } + \endcode +
+*/ +class CV_EXPORTS SparseMat +{ +public: + typedef SparseMatIterator iterator; + typedef SparseMatConstIterator const_iterator; + + //! the sparse matrix header + struct CV_EXPORTS Hdr + { + Hdr(int _dims, const int* _sizes, int _type); + void clear(); + int refcount; + int dims; + int valueOffset; + size_t nodeSize; + size_t nodeCount; + size_t freeList; + vector pool; + vector hashtab; + int size[CV_MAX_DIM]; + }; + + //! sparse matrix node - element of a hash table + struct CV_EXPORTS Node + { + //! hash value + size_t hashval; + //! index of the next node in the same hash table entry + size_t next; + //! index of the matrix element + int idx[CV_MAX_DIM]; + }; + + //! default constructor + SparseMat(); + //! creates matrix of the specified size and type + SparseMat(int dims, const int* _sizes, int _type); + //! copy constructor + SparseMat(const SparseMat& m); + //! converts dense 2d matrix to the sparse form + /*! + \param m the input matrix + \param try1d if true and m is a single-column matrix (Nx1), + then the sparse matrix will be 1-dimensional. + */ + explicit SparseMat(const Mat& m); + //! converts old-style sparse matrix to the new-style. All the data is copied + SparseMat(const CvSparseMat* m); + //! the destructor + ~SparseMat(); + + //! assignment operator. This is O(1) operation, i.e. no data is copied + SparseMat& operator = (const SparseMat& m); + //! equivalent to the corresponding constructor + SparseMat& operator = (const Mat& m); + + //! creates full copy of the matrix + SparseMat clone() const; + + //! copies all the data to the destination matrix. All the previous content of m is erased + void copyTo( SparseMat& m ) const; + //! converts sparse matrix to dense matrix. + void copyTo( Mat& m ) const; + //! multiplies all the matrix elements by the specified scale factor alpha and converts the results to the specified data type + void convertTo( SparseMat& m, int rtype, double alpha=1 ) const; + //! converts sparse matrix to dense n-dim matrix with optional type conversion and scaling. + /*! + \param rtype The output matrix data type. When it is =-1, the output array will have the same data type as (*this) + \param alpha The scale factor + \param beta The optional delta added to the scaled values before the conversion + */ + void convertTo( Mat& m, int rtype, double alpha=1, double beta=0 ) const; + + // not used now + void assignTo( SparseMat& m, int type=-1 ) const; + + //! reallocates sparse matrix. + /*! + If the matrix already had the proper size and type, + it is simply cleared with clear(), otherwise, + the old matrix is released (using release()) and the new one is allocated. + */ + void create(int dims, const int* _sizes, int _type); + //! sets all the sparse matrix elements to 0, which means clearing the hash table. + void clear(); + //! manually increments the reference counter to the header. + void addref(); + // decrements the header reference counter. When the counter reaches 0, the header and all the underlying data are deallocated. + void release(); + + //! converts sparse matrix to the old-style representation; all the elements are copied. + operator CvSparseMat*() const; + //! returns the size of each element in bytes (not including the overhead - the space occupied by SparseMat::Node elements) + size_t elemSize() const; + //! returns elemSize()/channels() + size_t elemSize1() const; + + //! returns type of sparse matrix elements + int type() const; + //! returns the depth of sparse matrix elements + int depth() const; + //! returns the number of channels + int channels() const; + + //! returns the array of sizes, or NULL if the matrix is not allocated + const int* size() const; + //! returns the size of i-th matrix dimension (or 0) + int size(int i) const; + //! returns the matrix dimensionality + int dims() const; + //! returns the number of non-zero elements (=the number of hash table nodes) + size_t nzcount() const; + + //! computes the element hash value (1D case) + size_t hash(int i0) const; + //! computes the element hash value (2D case) + size_t hash(int i0, int i1) const; + //! computes the element hash value (3D case) + size_t hash(int i0, int i1, int i2) const; + //! computes the element hash value (nD case) + size_t hash(const int* idx) const; + + //@{ + /*! + specialized variants for 1D, 2D, 3D cases and the generic_type one for n-D case. + + return pointer to the matrix element. +
    +
  • if the element is there (it's non-zero), the pointer to it is returned +
  • if it's not there and createMissing=false, NULL pointer is returned +
  • if it's not there and createMissing=true, then the new element + is created and initialized with 0. Pointer to it is returned +
  • if the optional hashval pointer is not NULL, the element hash value is + not computed, but *hashval is taken instead. +
+ */ + //! returns pointer to the specified element (1D case) + uchar* ptr(int i0, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (2D case) + uchar* ptr(int i0, int i1, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (3D case) + uchar* ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (nD case) + uchar* ptr(const int* idx, bool createMissing, size_t* hashval=0); + //@} + + //@{ + /*! + return read-write reference to the specified sparse matrix element. + + ref<_Tp>(i0,...[,hashval]) is equivalent to *(_Tp*)ptr(i0,...,true[,hashval]). + The methods always return a valid reference. + If the element did not exist, it is created and initialiazed with 0. + */ + //! returns reference to the specified element (1D case) + template _Tp& ref(int i0, size_t* hashval=0); + //! returns reference to the specified element (2D case) + template _Tp& ref(int i0, int i1, size_t* hashval=0); + //! returns reference to the specified element (3D case) + template _Tp& ref(int i0, int i1, int i2, size_t* hashval=0); + //! returns reference to the specified element (nD case) + template _Tp& ref(const int* idx, size_t* hashval=0); + //@} + + //@{ + /*! + return value of the specified sparse matrix element. + + value<_Tp>(i0,...[,hashval]) is equivalent + + \code + { const _Tp* p = find<_Tp>(i0,...[,hashval]); return p ? *p : _Tp(); } + \endcode + + That is, if the element did not exist, the methods return 0. + */ + //! returns value of the specified element (1D case) + template _Tp value(int i0, size_t* hashval=0) const; + //! returns value of the specified element (2D case) + template _Tp value(int i0, int i1, size_t* hashval=0) const; + //! returns value of the specified element (3D case) + template _Tp value(int i0, int i1, int i2, size_t* hashval=0) const; + //! returns value of the specified element (nD case) + template _Tp value(const int* idx, size_t* hashval=0) const; + //@} + + //@{ + /*! + Return pointer to the specified sparse matrix element if it exists + + find<_Tp>(i0,...[,hashval]) is equivalent to (_const Tp*)ptr(i0,...false[,hashval]). + + If the specified element does not exist, the methods return NULL. + */ + //! returns pointer to the specified element (1D case) + template const _Tp* find(int i0, size_t* hashval=0) const; + //! returns pointer to the specified element (2D case) + template const _Tp* find(int i0, int i1, size_t* hashval=0) const; + //! returns pointer to the specified element (3D case) + template const _Tp* find(int i0, int i1, int i2, size_t* hashval=0) const; + //! returns pointer to the specified element (nD case) + template const _Tp* find(const int* idx, size_t* hashval=0) const; + + //! erases the specified element (2D case) + void erase(int i0, int i1, size_t* hashval=0); + //! erases the specified element (3D case) + void erase(int i0, int i1, int i2, size_t* hashval=0); + //! erases the specified element (nD case) + void erase(const int* idx, size_t* hashval=0); + + //@{ + /*! + return the sparse matrix iterator pointing to the first sparse matrix element + */ + //! returns the sparse matrix iterator at the matrix beginning + SparseMatIterator begin(); + //! returns the sparse matrix iterator at the matrix beginning + template SparseMatIterator_<_Tp> begin(); + //! returns the read-only sparse matrix iterator at the matrix beginning + SparseMatConstIterator begin() const; + //! returns the read-only sparse matrix iterator at the matrix beginning + template SparseMatConstIterator_<_Tp> begin() const; + //@} + /*! + return the sparse matrix iterator pointing to the element following the last sparse matrix element + */ + //! returns the sparse matrix iterator at the matrix end + SparseMatIterator end(); + //! returns the read-only sparse matrix iterator at the matrix end + SparseMatConstIterator end() const; + //! returns the typed sparse matrix iterator at the matrix end + template SparseMatIterator_<_Tp> end(); + //! returns the typed read-only sparse matrix iterator at the matrix end + template SparseMatConstIterator_<_Tp> end() const; + + //! returns the value stored in the sparse martix node + template _Tp& value(Node* n); + //! returns the value stored in the sparse martix node + template const _Tp& value(const Node* n) const; + + ////////////// some internal-use methods /////////////// + Node* node(size_t nidx); + const Node* node(size_t nidx) const; + + uchar* newNode(const int* idx, size_t hashval); + void removeNode(size_t hidx, size_t nidx, size_t previdx); + void resizeHashTab(size_t newsize); + + enum { MAGIC_VAL=0x42FD0000, MAX_DIM=CV_MAX_DIM, HASH_SCALE=0x5bd1e995, HASH_BIT=0x80000000 }; + + int flags; + Hdr* hdr; +}; + +//! finds global minimum and maximum sparse array elements and returns their values and their locations +CV_EXPORTS void minMaxLoc(const SparseMat& a, double* minVal, + double* maxVal, int* minIdx=0, int* maxIdx=0); +//! computes norm of a sparse matrix +CV_EXPORTS double norm( const SparseMat& src, int normType ); +//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values +CV_EXPORTS void normalize( const SparseMat& src, SparseMat& dst, double alpha, int normType ); + +/*! + Read-Only Sparse Matrix Iterator. + Here is how to use the iterator to compute the sum of floating-point sparse matrix elements: + + \code + SparseMatConstIterator it = m.begin(), it_end = m.end(); + double s = 0; + CV_Assert( m.type() == CV_32F ); + for( ; it != it_end; ++it ) + s += it.value(); + \endcode +*/ +class CV_EXPORTS SparseMatConstIterator +{ +public: + //! the default constructor + SparseMatConstIterator(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatConstIterator(const SparseMat* _m); + //! the copy constructor + SparseMatConstIterator(const SparseMatConstIterator& it); + + //! the assignment operator + SparseMatConstIterator& operator = (const SparseMatConstIterator& it); + + //! template method returning the current matrix element + template const _Tp& value() const; + //! returns the current node of the sparse matrix. it.node->idx is the current element index + const SparseMat::Node* node() const; + + //! moves iterator to the previous element + SparseMatConstIterator& operator --(); + //! moves iterator to the previous element + SparseMatConstIterator operator --(int); + //! moves iterator to the next element + SparseMatConstIterator& operator ++(); + //! moves iterator to the next element + SparseMatConstIterator operator ++(int); + + //! moves iterator to the element after the last element + void seekEnd(); + + const SparseMat* m; + size_t hashidx; + uchar* ptr; +}; + +/*! + Read-write Sparse Matrix Iterator + + The class is similar to cv::SparseMatConstIterator, + but can be used for in-place modification of the matrix elements. +*/ +class CV_EXPORTS SparseMatIterator : public SparseMatConstIterator +{ +public: + //! the default constructor + SparseMatIterator(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatIterator(SparseMat* _m); + //! the full constructor setting the iterator to the specified sparse matrix element + SparseMatIterator(SparseMat* _m, const int* idx); + //! the copy constructor + SparseMatIterator(const SparseMatIterator& it); + + //! the assignment operator + SparseMatIterator& operator = (const SparseMatIterator& it); + //! returns read-write reference to the current sparse matrix element + template _Tp& value() const; + //! returns pointer to the current sparse matrix node. it.node->idx is the index of the current element (do not modify it!) + SparseMat::Node* node() const; + + //! moves iterator to the next element + SparseMatIterator& operator ++(); + //! moves iterator to the next element + SparseMatIterator operator ++(int); +}; + +/*! + The Template Sparse Matrix class derived from cv::SparseMat + + The class provides slightly more convenient operations for accessing elements. + + \code + SparseMat m; + ... + SparseMat_ m_ = (SparseMat_&)m; + m_.ref(1)++; // equivalent to m.ref(1)++; + m_.ref(2) += m_(3); // equivalent to m.ref(2) += m.value(3); + \endcode +*/ +template class CV_EXPORTS SparseMat_ : public SparseMat +{ +public: + typedef SparseMatIterator_<_Tp> iterator; + typedef SparseMatConstIterator_<_Tp> const_iterator; + + //! the default constructor + SparseMat_(); + //! the full constructor equivelent to SparseMat(dims, _sizes, DataType<_Tp>::type) + SparseMat_(int dims, const int* _sizes); + //! the copy constructor. If DataType<_Tp>.type != m.type(), the m elements are converted + SparseMat_(const SparseMat& m); + //! the copy constructor. This is O(1) operation - no data is copied + SparseMat_(const SparseMat_& m); + //! converts dense matrix to the sparse form + SparseMat_(const Mat& m); + //! converts the old-style sparse matrix to the C++ class. All the elements are copied + SparseMat_(const CvSparseMat* m); + //! the assignment operator. If DataType<_Tp>.type != m.type(), the m elements are converted + SparseMat_& operator = (const SparseMat& m); + //! the assignment operator. This is O(1) operation - no data is copied + SparseMat_& operator = (const SparseMat_& m); + //! converts dense matrix to the sparse form + SparseMat_& operator = (const Mat& m); + + //! makes full copy of the matrix. All the elements are duplicated + SparseMat_ clone() const; + //! equivalent to cv::SparseMat::create(dims, _sizes, DataType<_Tp>::type) + void create(int dims, const int* _sizes); + //! converts sparse matrix to the old-style CvSparseMat. All the elements are copied + operator CvSparseMat*() const; + + //! returns type of the matrix elements + int type() const; + //! returns depth of the matrix elements + int depth() const; + //! returns the number of channels in each matrix element + int channels() const; + + //! equivalent to SparseMat::ref<_Tp>(i0, hashval) + _Tp& ref(int i0, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(i0, i1, hashval) + _Tp& ref(int i0, int i1, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(i0, i1, i2, hashval) + _Tp& ref(int i0, int i1, int i2, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(idx, hashval) + _Tp& ref(const int* idx, size_t* hashval=0); + + //! equivalent to SparseMat::value<_Tp>(i0, hashval) + _Tp operator()(int i0, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(i0, i1, hashval) + _Tp operator()(int i0, int i1, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(i0, i1, i2, hashval) + _Tp operator()(int i0, int i1, int i2, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(idx, hashval) + _Tp operator()(const int* idx, size_t* hashval=0) const; + + //! returns sparse matrix iterator pointing to the first sparse matrix element + SparseMatIterator_<_Tp> begin(); + //! returns read-only sparse matrix iterator pointing to the first sparse matrix element + SparseMatConstIterator_<_Tp> begin() const; + //! returns sparse matrix iterator pointing to the element following the last sparse matrix element + SparseMatIterator_<_Tp> end(); + //! returns read-only sparse matrix iterator pointing to the element following the last sparse matrix element + SparseMatConstIterator_<_Tp> end() const; +}; + + +/*! + Template Read-Only Sparse Matrix Iterator Class. + + This is the derived from SparseMatConstIterator class that + introduces more convenient operator *() for accessing the current element. +*/ +template class CV_EXPORTS SparseMatConstIterator_ : public SparseMatConstIterator +{ +public: + typedef std::forward_iterator_tag iterator_category; + + //! the default constructor + SparseMatConstIterator_(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatConstIterator_(const SparseMat_<_Tp>* _m); + //! the copy constructor + SparseMatConstIterator_(const SparseMatConstIterator_& it); + + //! the assignment operator + SparseMatConstIterator_& operator = (const SparseMatConstIterator_& it); + //! the element access operator + const _Tp& operator *() const; + + //! moves iterator to the next element + SparseMatConstIterator_& operator ++(); + //! moves iterator to the next element + SparseMatConstIterator_ operator ++(int); +}; + +/*! + Template Read-Write Sparse Matrix Iterator Class. + + This is the derived from cv::SparseMatConstIterator_ class that + introduces more convenient operator *() for accessing the current element. +*/ +template class CV_EXPORTS SparseMatIterator_ : public SparseMatConstIterator_<_Tp> +{ +public: + typedef std::forward_iterator_tag iterator_category; + + //! the default constructor + SparseMatIterator_(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatIterator_(SparseMat_<_Tp>* _m); + //! the copy constructor + SparseMatIterator_(const SparseMatIterator_& it); + + //! the assignment operator + SparseMatIterator_& operator = (const SparseMatIterator_& it); + //! returns the reference to the current element + _Tp& operator *() const; + + //! moves the iterator to the next element + SparseMatIterator_& operator ++(); + //! moves the iterator to the next element + SparseMatIterator_ operator ++(int); +}; + +//////////////////// Fast Nearest-Neighbor Search Structure //////////////////// + +/*! + Fast Nearest Neighbor Search Class. + + The class implements D. Lowe BBF (Best-Bin-First) algorithm for the last + approximate (or accurate) nearest neighbor search in multi-dimensional spaces. + + First, a set of vectors is passed to KDTree::KDTree() constructor + or KDTree::build() method, where it is reordered. + + Then arbitrary vectors can be passed to KDTree::findNearest() methods, which + find the K nearest neighbors among the vectors from the initial set. + The user can balance between the speed and accuracy of the search by varying Emax + parameter, which is the number of leaves that the algorithm checks. + The larger parameter values yield more accurate results at the expense of lower processing speed. + + \code + KDTree T(points, false); + const int K = 3, Emax = INT_MAX; + int idx[K]; + float dist[K]; + T.findNearest(query_vec, K, Emax, idx, 0, dist); + CV_Assert(dist[0] <= dist[1] && dist[1] <= dist[2]); + \endcode +*/ +class CV_EXPORTS_W KDTree +{ +public: + /*! + The node of the search tree. + */ + struct Node + { + Node() : idx(-1), left(-1), right(-1), boundary(0.f) {} + Node(int _idx, int _left, int _right, float _boundary) + : idx(_idx), left(_left), right(_right), boundary(_boundary) {} + //! split dimension; >=0 for nodes (dim), < 0 for leaves (index of the point) + int idx; + //! node indices of the left and the right branches + int left, right; + //! go to the left if query_vec[node.idx]<=node.boundary, otherwise go to the right + float boundary; + }; + + //! the default constructor + CV_WRAP KDTree(); + //! the full constructor that builds the search tree + CV_WRAP KDTree(InputArray points, bool copyAndReorderPoints=false); + //! the full constructor that builds the search tree + CV_WRAP KDTree(InputArray points, InputArray _labels, + bool copyAndReorderPoints=false); + //! builds the search tree + CV_WRAP void build(InputArray points, bool copyAndReorderPoints=false); + //! builds the search tree + CV_WRAP void build(InputArray points, InputArray labels, + bool copyAndReorderPoints=false); + //! finds the K nearest neighbors of "vec" while looking at Emax (at most) leaves + CV_WRAP int findNearest(InputArray vec, int K, int Emax, + OutputArray neighborsIdx, + OutputArray neighbors=noArray(), + OutputArray dist=noArray(), + OutputArray labels=noArray()) const; + //! finds all the points from the initial set that belong to the specified box + CV_WRAP void findOrthoRange(InputArray minBounds, + InputArray maxBounds, + OutputArray neighborsIdx, + OutputArray neighbors=noArray(), + OutputArray labels=noArray()) const; + //! returns vectors with the specified indices + CV_WRAP void getPoints(InputArray idx, OutputArray pts, + OutputArray labels=noArray()) const; + //! return a vector with the specified index + const float* getPoint(int ptidx, int* label=0) const; + //! returns the search space dimensionality + CV_WRAP int dims() const; + + vector nodes; //!< all the tree nodes + CV_PROP Mat points; //!< all the points. It can be a reordered copy of the input vector set or the original vector set. + CV_PROP vector labels; //!< the parallel array of labels. + CV_PROP int maxDepth; //!< maximum depth of the search tree. Do not modify it + CV_PROP_RW int normType; //!< type of the distance (cv::NORM_L1 or cv::NORM_L2) used for search. Initially set to cv::NORM_L2, but you can modify it +}; + +//////////////////////////////////////// XML & YAML I/O //////////////////////////////////// + +class CV_EXPORTS FileNode; + +/*! + XML/YAML File Storage Class. + + The class describes an object associated with XML or YAML file. + It can be used to store data to such a file or read and decode the data. + + The storage is organized as a tree of nested sequences (or lists) and mappings. + Sequence is a heterogenious array, which elements are accessed by indices or sequentially using an iterator. + Mapping is analogue of std::map or C structure, which elements are accessed by names. + The most top level structure is a mapping. + Leaves of the file storage tree are integers, floating-point numbers and text strings. + + For example, the following code: + + \code + // open file storage for writing. Type of the file is determined from the extension + FileStorage fs("test.yml", FileStorage::WRITE); + fs << "test_int" << 5 << "test_real" << 3.1 << "test_string" << "ABCDEFGH"; + fs << "test_mat" << Mat::eye(3,3,CV_32F); + + fs << "test_list" << "[" << 0.0000000000001 << 2 << CV_PI << -3435345 << "2-502 2-029 3egegeg" << + "{:" << "month" << 12 << "day" << 31 << "year" << 1969 << "}" << "]"; + fs << "test_map" << "{" << "x" << 1 << "y" << 2 << "width" << 100 << "height" << 200 << "lbp" << "[:"; + + const uchar arr[] = {0, 1, 1, 0, 1, 1, 0, 1}; + fs.writeRaw("u", arr, (int)(sizeof(arr)/sizeof(arr[0]))); + + fs << "]" << "}"; + \endcode + + will produce the following file: + + \verbatim + %YAML:1.0 + test_int: 5 + test_real: 3.1000000000000001e+00 + test_string: ABCDEFGH + test_mat: !!opencv-matrix + rows: 3 + cols: 3 + dt: f + data: [ 1., 0., 0., 0., 1., 0., 0., 0., 1. ] + test_list: + - 1.0000000000000000e-13 + - 2 + - 3.1415926535897931e+00 + - -3435345 + - "2-502 2-029 3egegeg" + - { month:12, day:31, year:1969 } + test_map: + x: 1 + y: 2 + width: 100 + height: 200 + lbp: [ 0, 1, 1, 0, 1, 1, 0, 1 ] + \endverbatim + + and to read the file above, the following code can be used: + + \code + // open file storage for reading. + // Type of the file is determined from the content, not the extension + FileStorage fs("test.yml", FileStorage::READ); + int test_int = (int)fs["test_int"]; + double test_real = (double)fs["test_real"]; + string test_string = (string)fs["test_string"]; + + Mat M; + fs["test_mat"] >> M; + + FileNode tl = fs["test_list"]; + CV_Assert(tl.type() == FileNode::SEQ && tl.size() == 6); + double tl0 = (double)tl[0]; + int tl1 = (int)tl[1]; + double tl2 = (double)tl[2]; + int tl3 = (int)tl[3]; + string tl4 = (string)tl[4]; + CV_Assert(tl[5].type() == FileNode::MAP && tl[5].size() == 3); + + int month = (int)tl[5]["month"]; + int day = (int)tl[5]["day"]; + int year = (int)tl[5]["year"]; + + FileNode tm = fs["test_map"]; + + int x = (int)tm["x"]; + int y = (int)tm["y"]; + int width = (int)tm["width"]; + int height = (int)tm["height"]; + + int lbp_val = 0; + FileNodeIterator it = tm["lbp"].begin(); + + for(int k = 0; k < 8; k++, ++it) + lbp_val |= ((int)*it) << k; + \endcode +*/ +class CV_EXPORTS_W FileStorage +{ +public: + //! file storage mode + enum + { + READ=0, //! read mode + WRITE=1, //! write mode + APPEND=2 //! append mode + }; + enum + { + UNDEFINED=0, + VALUE_EXPECTED=1, + NAME_EXPECTED=2, + INSIDE_MAP=4 + }; + //! the default constructor + CV_WRAP FileStorage(); + //! the full constructor that opens file storage for reading or writing + CV_WRAP FileStorage(const string& filename, int flags, const string& encoding=string()); + //! the constructor that takes pointer to the C FileStorage structure + FileStorage(CvFileStorage* fs); + //! the destructor. calls release() + virtual ~FileStorage(); + + //! opens file storage for reading or writing. The previous storage is closed with release() + CV_WRAP virtual bool open(const string& filename, int flags, const string& encoding=string()); + //! returns true if the object is associated with currently opened file. + CV_WRAP virtual bool isOpened() const; + //! closes the file and releases all the memory buffers + CV_WRAP virtual void release(); + + //! returns the first element of the top-level mapping + CV_WRAP FileNode getFirstTopLevelNode() const; + //! returns the top-level mapping. YAML supports multiple streams + CV_WRAP FileNode root(int streamidx=0) const; + //! returns the specified element of the top-level mapping + FileNode operator[](const string& nodename) const; + //! returns the specified element of the top-level mapping + CV_WRAP FileNode operator[](const char* nodename) const; + + //! returns pointer to the underlying C FileStorage structure + CvFileStorage* operator *() { return fs; } + //! returns pointer to the underlying C FileStorage structure + const CvFileStorage* operator *() const { return fs; } + //! writes one or more numbers of the specified format to the currently written structure + void writeRaw( const string& fmt, const uchar* vec, size_t len ); + //! writes the registered C structure (CvMat, CvMatND, CvSeq). See cvWrite() + void writeObj( const string& name, const void* obj ); + + //! returns the normalized object name for the specified file name + static string getDefaultObjectName(const string& filename); + + Ptr fs; //!< the underlying C FileStorage structure + string elname; //!< the currently written element + vector structs; //!< the stack of written structures + int state; //!< the writer state +}; + +class CV_EXPORTS FileNodeIterator; + +/*! + File Storage Node class + + The node is used to store each and every element of the file storage opened for reading - + from the primitive objects, such as numbers and text strings, to the complex nodes: + sequences, mappings and the registered objects. + + Note that file nodes are only used for navigating file storages opened for reading. + When a file storage is opened for writing, no data is stored in memory after it is written. +*/ +class CV_EXPORTS_W_SIMPLE FileNode +{ +public: + //! type of the file storage node + enum + { + NONE=0, //!< empty node + INT=1, //!< an integer + REAL=2, //!< floating-point number + FLOAT=REAL, //!< synonym or REAL + STR=3, //!< text string in UTF-8 encoding + STRING=STR, //!< synonym for STR + REF=4, //!< integer of size size_t. Typically used for storing complex dynamic structures where some elements reference the others + SEQ=5, //!< sequence + MAP=6, //!< mapping + TYPE_MASK=7, + FLOW=8, //!< compact representation of a sequence or mapping. Used only by YAML writer + USER=16, //!< a registered object (e.g. a matrix) + EMPTY=32, //!< empty structure (sequence or mapping) + NAMED=64 //!< the node has a name (i.e. it is element of a mapping) + }; + //! the default constructor + CV_WRAP FileNode(); + //! the full constructor wrapping CvFileNode structure. + FileNode(const CvFileStorage* fs, const CvFileNode* node); + //! the copy constructor + FileNode(const FileNode& node); + //! returns element of a mapping node + FileNode operator[](const string& nodename) const; + //! returns element of a mapping node + CV_WRAP FileNode operator[](const char* nodename) const; + //! returns element of a sequence node + CV_WRAP FileNode operator[](int i) const; + //! returns type of the node + CV_WRAP int type() const; + + //! returns true if the node is empty + CV_WRAP bool empty() const; + //! returns true if the node is a "none" object + CV_WRAP bool isNone() const; + //! returns true if the node is a sequence + CV_WRAP bool isSeq() const; + //! returns true if the node is a mapping + CV_WRAP bool isMap() const; + //! returns true if the node is an integer + CV_WRAP bool isInt() const; + //! returns true if the node is a floating-point number + CV_WRAP bool isReal() const; + //! returns true if the node is a text string + CV_WRAP bool isString() const; + //! returns true if the node has a name + CV_WRAP bool isNamed() const; + //! returns the node name or an empty string if the node is nameless + CV_WRAP string name() const; + //! returns the number of elements in the node, if it is a sequence or mapping, or 1 otherwise. + CV_WRAP size_t size() const; + //! returns the node content as an integer. If the node stores floating-point number, it is rounded. + operator int() const; + //! returns the node content as float + operator float() const; + //! returns the node content as double + operator double() const; + //! returns the node content as text string + operator string() const; + + //! returns pointer to the underlying file node + CvFileNode* operator *(); + //! returns pointer to the underlying file node + const CvFileNode* operator* () const; + + //! returns iterator pointing to the first node element + FileNodeIterator begin() const; + //! returns iterator pointing to the element following the last node element + FileNodeIterator end() const; + + //! reads node elements to the buffer with the specified format + void readRaw( const string& fmt, uchar* vec, size_t len ) const; + //! reads the registered object and returns pointer to it + void* readObj() const; + + // do not use wrapper pointer classes for better efficiency + const CvFileStorage* fs; + const CvFileNode* node; +}; + + +/*! + File Node Iterator + + The class is used for iterating sequences (usually) and mappings. + */ +class CV_EXPORTS FileNodeIterator +{ +public: + //! the default constructor + FileNodeIterator(); + //! the full constructor set to the ofs-th element of the node + FileNodeIterator(const CvFileStorage* fs, const CvFileNode* node, size_t ofs=0); + //! the copy constructor + FileNodeIterator(const FileNodeIterator& it); + //! returns the currently observed element + FileNode operator *() const; + //! accesses the currently observed element methods + FileNode operator ->() const; + + //! moves iterator to the next node + FileNodeIterator& operator ++ (); + //! moves iterator to the next node + FileNodeIterator operator ++ (int); + //! moves iterator to the previous node + FileNodeIterator& operator -- (); + //! moves iterator to the previous node + FileNodeIterator operator -- (int); + //! moves iterator forward by the specified offset (possibly negative) + FileNodeIterator& operator += (int); + //! moves iterator backward by the specified offset (possibly negative) + FileNodeIterator& operator -= (int); + + //! reads the next maxCount elements (or less, if the sequence/mapping last element occurs earlier) to the buffer with the specified format + FileNodeIterator& readRaw( const string& fmt, uchar* vec, + size_t maxCount=(size_t)INT_MAX ); + + const CvFileStorage* fs; + const CvFileNode* container; + CvSeqReader reader; + size_t remaining; +}; + +////////////// convenient wrappers for operating old-style dynamic structures ////////////// + +template class SeqIterator; + +typedef Ptr MemStorage; + +/*! + Template Sequence Class derived from CvSeq + + The class provides more convenient access to sequence elements, + STL-style operations and iterators. + + \note The class is targeted for simple data types, + i.e. no constructors or destructors + are called for the sequence elements. +*/ +template class CV_EXPORTS Seq +{ +public: + typedef SeqIterator<_Tp> iterator; + typedef SeqIterator<_Tp> const_iterator; + + //! the default constructor + Seq(); + //! the constructor for wrapping CvSeq structure. The real element type in CvSeq should match _Tp. + Seq(const CvSeq* seq); + //! creates the empty sequence that resides in the specified storage + Seq(MemStorage& storage, int headerSize = sizeof(CvSeq)); + //! returns read-write reference to the specified element + _Tp& operator [](int idx); + //! returns read-only reference to the specified element + const _Tp& operator[](int idx) const; + //! returns iterator pointing to the beginning of the sequence + SeqIterator<_Tp> begin() const; + //! returns iterator pointing to the element following the last sequence element + SeqIterator<_Tp> end() const; + //! returns the number of elements in the sequence + size_t size() const; + //! returns the type of sequence elements (CV_8UC1 ... CV_64FC(CV_CN_MAX) ...) + int type() const; + //! returns the depth of sequence elements (CV_8U ... CV_64F) + int depth() const; + //! returns the number of channels in each sequence element + int channels() const; + //! returns the size of each sequence element + size_t elemSize() const; + //! returns index of the specified sequence element + size_t index(const _Tp& elem) const; + //! appends the specified element to the end of the sequence + void push_back(const _Tp& elem); + //! appends the specified element to the front of the sequence + void push_front(const _Tp& elem); + //! appends zero or more elements to the end of the sequence + void push_back(const _Tp* elems, size_t count); + //! appends zero or more elements to the front of the sequence + void push_front(const _Tp* elems, size_t count); + //! inserts the specified element to the specified position + void insert(int idx, const _Tp& elem); + //! inserts zero or more elements to the specified position + void insert(int idx, const _Tp* elems, size_t count); + //! removes element at the specified position + void remove(int idx); + //! removes the specified subsequence + void remove(const Range& r); + + //! returns reference to the first sequence element + _Tp& front(); + //! returns read-only reference to the first sequence element + const _Tp& front() const; + //! returns reference to the last sequence element + _Tp& back(); + //! returns read-only reference to the last sequence element + const _Tp& back() const; + //! returns true iff the sequence contains no elements + bool empty() const; + + //! removes all the elements from the sequence + void clear(); + //! removes the first element from the sequence + void pop_front(); + //! removes the last element from the sequence + void pop_back(); + //! removes zero or more elements from the beginning of the sequence + void pop_front(_Tp* elems, size_t count); + //! removes zero or more elements from the end of the sequence + void pop_back(_Tp* elems, size_t count); + + //! copies the whole sequence or the sequence slice to the specified vector + void copyTo(vector<_Tp>& vec, const Range& range=Range::all()) const; + //! returns the vector containing all the sequence elements + operator vector<_Tp>() const; + + CvSeq* seq; +}; + + +/*! + STL-style Sequence Iterator inherited from the CvSeqReader structure +*/ +template class CV_EXPORTS SeqIterator : public CvSeqReader +{ +public: + //! the default constructor + SeqIterator(); + //! the constructor setting the iterator to the beginning or to the end of the sequence + SeqIterator(const Seq<_Tp>& seq, bool seekEnd=false); + //! positions the iterator within the sequence + void seek(size_t pos); + //! reports the current iterator position + size_t tell() const; + //! returns reference to the current sequence element + _Tp& operator *(); + //! returns read-only reference to the current sequence element + const _Tp& operator *() const; + //! moves iterator to the next sequence element + SeqIterator& operator ++(); + //! moves iterator to the next sequence element + SeqIterator operator ++(int) const; + //! moves iterator to the previous sequence element + SeqIterator& operator --(); + //! moves iterator to the previous sequence element + SeqIterator operator --(int) const; + + //! moves iterator forward by the specified offset (possibly negative) + SeqIterator& operator +=(int); + //! moves iterator backward by the specified offset (possibly negative) + SeqIterator& operator -=(int); + + // this is index of the current element module seq->total*2 + // (to distinguish between 0 and seq->total) + int index; +}; + + +#if 0 +class CV_EXPORTS AlgorithmImpl; + +/*! + Base class for high-level OpenCV algorithms +*/ +class CV_EXPORTS Algorithm +{ +public: + virtual ~Algorithm(); + virtual string name() const; + + template _Tp get(int paramId) const; + template bool set(int paramId, const _Tp& value); + string paramName(int paramId) const; + string paramHelp(int paramId) const; + int paramType(int paramId) const; + int findParam(const string& name) const; + template _Tp paramDefaultValue(int paramId) const; + template bool paramRange(int paramId, _Tp& minVal, _Tp& maxVal) const; + + virtual void getParams(vector& ids) const; + virtual void write(vector& buf) const; + virtual bool read(const vector& buf); + + typedef Algorithm* (*Constructor)(void); + static void add(const string& name, Constructor create); + static void getList(vector& algorithms); + static Ptr create(const string& name); + +protected: + template void addParam(int propId, _Tp& value, bool readOnly, const string& name, + const string& help=string(), const _Tp& defaultValue=_Tp(), + _Tp (Algorithm::*getter)()=0, bool (Algorithm::*setter)(const _Tp&)=0); + template void setParamRange(int propId, const _Tp& minVal, const _Tp& maxVal); + + bool set_(int paramId, int argType, const void* value); + void get_(int paramId, int argType, void* value); + void paramDefaultValue_(int paramId, int argType, void* value); + void paramRange_(int paramId, int argType, void* minval, void* maxval); + void addParam_(int propId, int argType, void* value, bool readOnly, const string& name, + const string& help, const void* defaultValue, void* getter, void* setter); + void setParamRange_(int propId, int argType, const void* minVal, const void* maxVal); + + Ptr impl; +}; +#endif + +/*! +"\nThe CommandLineParser class is designed for command line arguments parsing\n" + "Keys map: \n" + "Before you start to work with CommandLineParser you have to create a map for keys.\n" + " It will look like this\n" + " const char* keys =\n" + " {\n" + " { s| string| 123asd |string parameter}\n" + " { d| digit | 100 |digit parameter }\n" + " { c|noCamera|false |without camera }\n" + " { 1| |some text|help }\n" + " { 2| |333 |another help }\n" + " };\n" + "Usage syntax: \n" + " \"{\" - start of parameter string.\n" + " \"}\" - end of parameter string\n" + " \"|\" - separator between short name, full name, default value and help\n" + "Supported syntax: \n" + " --key1=arg1 \n" + " -key2=arg2 \n" + "Usage: \n" + " Imagine that the input parameters are next:\n" + " -s=string_value --digit=250 --noCamera lena.jpg 10000\n" + " CommandLineParser parser(argc, argv, keys) - create a parser object\n" + " parser.get(\"s\" or \"string\") will return you first parameter value\n" + " parser.get(\"s\", false or \"string\", false) will return you first parameter value\n" + " without spaces in end and begin\n" + " parser.get(\"d\" or \"digit\") will return you second parameter value.\n" + " It also works with 'unsigned int', 'double', and 'float' types>\n" + " parser.get(\"c\" or \"noCamera\") will return you true .\n" + " If you enter this key in commandline>\n" + " It return you false otherwise.\n" + " parser.get(\"1\") will return you the first argument without parameter (lena.jpg) \n" + " parser.get(\"2\") will return you the second argument without parameter (10000)\n" + " It also works with 'unsigned int', 'double', and 'float' types \n" +*/ +class CV_EXPORTS CommandLineParser +{ + public: + + //! the default constructor + CommandLineParser(int argc, const char* argv[], const char* key_map); + + //! get parameter, you can choose: delete spaces in end and begin or not + template + _Tp get(const std::string& name, bool space_delete=true) + { + if (!has(name)) + { + return _Tp(); + } + std::string str = getString(name); + return analizeValue<_Tp>(str, space_delete); + } + + //! print short name, full name, current value and help for all params + void printParams(); + + protected: + std::map > data; + std::string getString(const std::string& name); + + bool has(const std::string& keys); + + template + _Tp analizeValue(const std::string& str, bool space_delete=false); + + template + static _Tp getData(const std::string& str) + { + _Tp res; + std::stringstream s1(str); + s1 >> res; + return res; + } + + template + _Tp fromStringNumber(const std::string& str);//the default conversion function for numbers + + }; + +template<> CV_EXPORTS +bool CommandLineParser::get(const std::string& name, bool space_delete); + +template<> CV_EXPORTS +std::string CommandLineParser::analizeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +int CommandLineParser::analizeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +unsigned CommandLineParser::analizeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +float CommandLineParser::analizeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +double CommandLineParser::analizeValue(const std::string& str, bool space_delete); + +} + +#endif // __cplusplus + +#include "operations.hpp" +#include "mat.hpp" + +#endif /*__OPENCV_CORE_HPP__*/ diff --git a/opencv/core/opencv2/core/core_c.h b/opencv/core/opencv2/core/core_c.h new file mode 100644 index 0000000..05d8c72 --- /dev/null +++ b/opencv/core/opencv2/core/core_c.h @@ -0,0 +1,1885 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +#ifndef __OPENCV_CORE_C_H__ +#define __OPENCV_CORE_C_H__ + +#include "opencv2/core/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************************\ +* Array allocation, deallocation, initialization and access to elements * +\****************************************************************************************/ + +/* wrapper. + If there is no enough memory, the function + (as well as other OpenCV functions that call cvAlloc) + raises an error. */ +CVAPI(void*) cvAlloc( size_t size ); + +/* wrapper. + Here and further all the memory releasing functions + (that all call cvFree) take double pointer in order to + to clear pointer to the data after releasing it. + Passing pointer to NULL pointer is Ok: nothing happens in this case +*/ +CVAPI(void) cvFree_( void* ptr ); +#define cvFree(ptr) (cvFree_(*(ptr)), *(ptr)=0) + +/* Allocates and initializes IplImage header */ +CVAPI(IplImage*) cvCreateImageHeader( CvSize size, int depth, int channels ); + +/* Inializes IplImage header */ +CVAPI(IplImage*) cvInitImageHeader( IplImage* image, CvSize size, int depth, + int channels, int origin CV_DEFAULT(0), + int align CV_DEFAULT(4)); + +/* Creates IPL image (header and data) */ +CVAPI(IplImage*) cvCreateImage( CvSize size, int depth, int channels ); + +/* Releases (i.e. deallocates) IPL image header */ +CVAPI(void) cvReleaseImageHeader( IplImage** image ); + +/* Releases IPL image header and data */ +CVAPI(void) cvReleaseImage( IplImage** image ); + +/* Creates a copy of IPL image (widthStep may differ) */ +CVAPI(IplImage*) cvCloneImage( const IplImage* image ); + +/* Sets a Channel Of Interest (only a few functions support COI) - + use cvCopy to extract the selected channel and/or put it back */ +CVAPI(void) cvSetImageCOI( IplImage* image, int coi ); + +/* Retrieves image Channel Of Interest */ +CVAPI(int) cvGetImageCOI( const IplImage* image ); + +/* Sets image ROI (region of interest) (COI is not changed) */ +CVAPI(void) cvSetImageROI( IplImage* image, CvRect rect ); + +/* Resets image ROI and COI */ +CVAPI(void) cvResetImageROI( IplImage* image ); + +/* Retrieves image ROI */ +CVAPI(CvRect) cvGetImageROI( const IplImage* image ); + +/* Allocates and initalizes CvMat header */ +CVAPI(CvMat*) cvCreateMatHeader( int rows, int cols, int type ); + +#define CV_AUTOSTEP 0x7fffffff + +/* Initializes CvMat header */ +CVAPI(CvMat*) cvInitMatHeader( CvMat* mat, int rows, int cols, + int type, void* data CV_DEFAULT(NULL), + int step CV_DEFAULT(CV_AUTOSTEP) ); + +/* Allocates and initializes CvMat header and allocates data */ +CVAPI(CvMat*) cvCreateMat( int rows, int cols, int type ); + +/* Releases CvMat header and deallocates matrix data + (reference counting is used for data) */ +CVAPI(void) cvReleaseMat( CvMat** mat ); + +/* Decrements CvMat data reference counter and deallocates the data if + it reaches 0 */ +CV_INLINE void cvDecRefData( CvArr* arr ) +{ + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + mat->data.ptr = NULL; + if( mat->refcount != NULL && --*mat->refcount == 0 ) + cvFree( &mat->refcount ); + mat->refcount = NULL; + } + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + mat->data.ptr = NULL; + if( mat->refcount != NULL && --*mat->refcount == 0 ) + cvFree( &mat->refcount ); + mat->refcount = NULL; + } +} + +/* Increments CvMat data reference counter */ +CV_INLINE int cvIncRefData( CvArr* arr ) +{ + int refcount = 0; + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + if( mat->refcount != NULL ) + refcount = ++*mat->refcount; + } + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + if( mat->refcount != NULL ) + refcount = ++*mat->refcount; + } + return refcount; +} + + +/* Creates an exact copy of the input matrix (except, may be, step value) */ +CVAPI(CvMat*) cvCloneMat( const CvMat* mat ); + + +/* Makes a new matrix from subrectangle of input array. + No data is copied */ +CVAPI(CvMat*) cvGetSubRect( const CvArr* arr, CvMat* submat, CvRect rect ); +#define cvGetSubArr cvGetSubRect + +/* Selects row span of the input array: arr(start_row:delta_row:end_row,:) + (end_row is not included into the span). */ +CVAPI(CvMat*) cvGetRows( const CvArr* arr, CvMat* submat, + int start_row, int end_row, + int delta_row CV_DEFAULT(1)); + +CV_INLINE CvMat* cvGetRow( const CvArr* arr, CvMat* submat, int row ) +{ + return cvGetRows( arr, submat, row, row + 1, 1 ); +} + + +/* Selects column span of the input array: arr(:,start_col:end_col) + (end_col is not included into the span) */ +CVAPI(CvMat*) cvGetCols( const CvArr* arr, CvMat* submat, + int start_col, int end_col ); + +CV_INLINE CvMat* cvGetCol( const CvArr* arr, CvMat* submat, int col ) +{ + return cvGetCols( arr, submat, col, col + 1 ); +} + +/* Select a diagonal of the input array. + (diag = 0 means the main diagonal, >0 means a diagonal above the main one, + <0 - below the main one). + The diagonal will be represented as a column (nx1 matrix). */ +CVAPI(CvMat*) cvGetDiag( const CvArr* arr, CvMat* submat, + int diag CV_DEFAULT(0)); + +/* low-level scalar <-> raw data conversion functions */ +CVAPI(void) cvScalarToRawData( const CvScalar* scalar, void* data, int type, + int extend_to_12 CV_DEFAULT(0) ); + +CVAPI(void) cvRawDataToScalar( const void* data, int type, CvScalar* scalar ); + +/* Allocates and initializes CvMatND header */ +CVAPI(CvMatND*) cvCreateMatNDHeader( int dims, const int* sizes, int type ); + +/* Allocates and initializes CvMatND header and allocates data */ +CVAPI(CvMatND*) cvCreateMatND( int dims, const int* sizes, int type ); + +/* Initializes preallocated CvMatND header */ +CVAPI(CvMatND*) cvInitMatNDHeader( CvMatND* mat, int dims, const int* sizes, + int type, void* data CV_DEFAULT(NULL) ); + +/* Releases CvMatND */ +CV_INLINE void cvReleaseMatND( CvMatND** mat ) +{ + cvReleaseMat( (CvMat**)mat ); +} + +/* Creates a copy of CvMatND (except, may be, steps) */ +CVAPI(CvMatND*) cvCloneMatND( const CvMatND* mat ); + +/* Allocates and initializes CvSparseMat header and allocates data */ +CVAPI(CvSparseMat*) cvCreateSparseMat( int dims, const int* sizes, int type ); + +/* Releases CvSparseMat */ +CVAPI(void) cvReleaseSparseMat( CvSparseMat** mat ); + +/* Creates a copy of CvSparseMat (except, may be, zero items) */ +CVAPI(CvSparseMat*) cvCloneSparseMat( const CvSparseMat* mat ); + +/* Initializes sparse array iterator + (returns the first node or NULL if the array is empty) */ +CVAPI(CvSparseNode*) cvInitSparseMatIterator( const CvSparseMat* mat, + CvSparseMatIterator* mat_iterator ); + +// returns next sparse array node (or NULL if there is no more nodes) +CV_INLINE CvSparseNode* cvGetNextSparseNode( CvSparseMatIterator* mat_iterator ) +{ + if( mat_iterator->node->next ) + return mat_iterator->node = mat_iterator->node->next; + else + { + int idx; + for( idx = ++mat_iterator->curidx; idx < mat_iterator->mat->hashsize; idx++ ) + { + CvSparseNode* node = (CvSparseNode*)mat_iterator->mat->hashtable[idx]; + if( node ) + { + mat_iterator->curidx = idx; + return mat_iterator->node = node; + } + } + return NULL; + } +} + +/**************** matrix iterator: used for n-ary operations on dense arrays *********/ + +#define CV_MAX_ARR 10 + +typedef struct CvNArrayIterator +{ + int count; /* number of arrays */ + int dims; /* number of dimensions to iterate */ + CvSize size; /* maximal common linear size: { width = size, height = 1 } */ + uchar* ptr[CV_MAX_ARR]; /* pointers to the array slices */ + int stack[CV_MAX_DIM]; /* for internal use */ + CvMatND* hdr[CV_MAX_ARR]; /* pointers to the headers of the + matrices that are processed */ +} +CvNArrayIterator; + +#define CV_NO_DEPTH_CHECK 1 +#define CV_NO_CN_CHECK 2 +#define CV_NO_SIZE_CHECK 4 + +/* initializes iterator that traverses through several arrays simulteneously + (the function together with cvNextArraySlice is used for + N-ari element-wise operations) */ +CVAPI(int) cvInitNArrayIterator( int count, CvArr** arrs, + const CvArr* mask, CvMatND* stubs, + CvNArrayIterator* array_iterator, + int flags CV_DEFAULT(0) ); + +/* returns zero value if iteration is finished, non-zero (slice length) otherwise */ +CVAPI(int) cvNextNArraySlice( CvNArrayIterator* array_iterator ); + + +/* Returns type of array elements: + CV_8UC1 ... CV_64FC4 ... */ +CVAPI(int) cvGetElemType( const CvArr* arr ); + +/* Retrieves number of an array dimensions and + optionally sizes of the dimensions */ +CVAPI(int) cvGetDims( const CvArr* arr, int* sizes CV_DEFAULT(NULL) ); + + +/* Retrieves size of a particular array dimension. + For 2d arrays cvGetDimSize(arr,0) returns number of rows (image height) + and cvGetDimSize(arr,1) returns number of columns (image width) */ +CVAPI(int) cvGetDimSize( const CvArr* arr, int index ); + + +/* ptr = &arr(idx0,idx1,...). All indexes are zero-based, + the major dimensions go first (e.g. (y,x) for 2D, (z,y,x) for 3D */ +CVAPI(uchar*) cvPtr1D( const CvArr* arr, int idx0, int* type CV_DEFAULT(NULL)); +CVAPI(uchar*) cvPtr2D( const CvArr* arr, int idx0, int idx1, int* type CV_DEFAULT(NULL) ); +CVAPI(uchar*) cvPtr3D( const CvArr* arr, int idx0, int idx1, int idx2, + int* type CV_DEFAULT(NULL)); + +/* For CvMat or IplImage number of indices should be 2 + (row index (y) goes first, column index (x) goes next). + For CvMatND or CvSparseMat number of infices should match number of and + indices order should match the array dimension order. */ +CVAPI(uchar*) cvPtrND( const CvArr* arr, const int* idx, int* type CV_DEFAULT(NULL), + int create_node CV_DEFAULT(1), + unsigned* precalc_hashval CV_DEFAULT(NULL)); + +/* value = arr(idx0,idx1,...) */ +CVAPI(CvScalar) cvGet1D( const CvArr* arr, int idx0 ); +CVAPI(CvScalar) cvGet2D( const CvArr* arr, int idx0, int idx1 ); +CVAPI(CvScalar) cvGet3D( const CvArr* arr, int idx0, int idx1, int idx2 ); +CVAPI(CvScalar) cvGetND( const CvArr* arr, const int* idx ); + +/* for 1-channel arrays */ +CVAPI(double) cvGetReal1D( const CvArr* arr, int idx0 ); +CVAPI(double) cvGetReal2D( const CvArr* arr, int idx0, int idx1 ); +CVAPI(double) cvGetReal3D( const CvArr* arr, int idx0, int idx1, int idx2 ); +CVAPI(double) cvGetRealND( const CvArr* arr, const int* idx ); + +/* arr(idx0,idx1,...) = value */ +CVAPI(void) cvSet1D( CvArr* arr, int idx0, CvScalar value ); +CVAPI(void) cvSet2D( CvArr* arr, int idx0, int idx1, CvScalar value ); +CVAPI(void) cvSet3D( CvArr* arr, int idx0, int idx1, int idx2, CvScalar value ); +CVAPI(void) cvSetND( CvArr* arr, const int* idx, CvScalar value ); + +/* for 1-channel arrays */ +CVAPI(void) cvSetReal1D( CvArr* arr, int idx0, double value ); +CVAPI(void) cvSetReal2D( CvArr* arr, int idx0, int idx1, double value ); +CVAPI(void) cvSetReal3D( CvArr* arr, int idx0, + int idx1, int idx2, double value ); +CVAPI(void) cvSetRealND( CvArr* arr, const int* idx, double value ); + +/* clears element of ND dense array, + in case of sparse arrays it deletes the specified node */ +CVAPI(void) cvClearND( CvArr* arr, const int* idx ); + +/* Converts CvArr (IplImage or CvMat,...) to CvMat. + If the last parameter is non-zero, function can + convert multi(>2)-dimensional array to CvMat as long as + the last array's dimension is continous. The resultant + matrix will be have appropriate (a huge) number of rows */ +CVAPI(CvMat*) cvGetMat( const CvArr* arr, CvMat* header, + int* coi CV_DEFAULT(NULL), + int allowND CV_DEFAULT(0)); + +/* Converts CvArr (IplImage or CvMat) to IplImage */ +CVAPI(IplImage*) cvGetImage( const CvArr* arr, IplImage* image_header ); + + +/* Changes a shape of multi-dimensional array. + new_cn == 0 means that number of channels remains unchanged. + new_dims == 0 means that number and sizes of dimensions remain the same + (unless they need to be changed to set the new number of channels) + if new_dims == 1, there is no need to specify new dimension sizes + The resultant configuration should be achievable w/o data copying. + If the resultant array is sparse, CvSparseMat header should be passed + to the function else if the result is 1 or 2 dimensional, + CvMat header should be passed to the function + else CvMatND header should be passed */ +CVAPI(CvArr*) cvReshapeMatND( const CvArr* arr, + int sizeof_header, CvArr* header, + int new_cn, int new_dims, int* new_sizes ); + +#define cvReshapeND( arr, header, new_cn, new_dims, new_sizes ) \ + cvReshapeMatND( (arr), sizeof(*(header)), (header), \ + (new_cn), (new_dims), (new_sizes)) + +CVAPI(CvMat*) cvReshape( const CvArr* arr, CvMat* header, + int new_cn, int new_rows CV_DEFAULT(0) ); + +/* Repeats source 2d array several times in both horizontal and + vertical direction to fill destination array */ +CVAPI(void) cvRepeat( const CvArr* src, CvArr* dst ); + +/* Allocates array data */ +CVAPI(void) cvCreateData( CvArr* arr ); + +/* Releases array data */ +CVAPI(void) cvReleaseData( CvArr* arr ); + +/* Attaches user data to the array header. The step is reffered to + the pre-last dimension. That is, all the planes of the array + must be joint (w/o gaps) */ +CVAPI(void) cvSetData( CvArr* arr, void* data, int step ); + +/* Retrieves raw data of CvMat, IplImage or CvMatND. + In the latter case the function raises an error if + the array can not be represented as a matrix */ +CVAPI(void) cvGetRawData( const CvArr* arr, uchar** data, + int* step CV_DEFAULT(NULL), + CvSize* roi_size CV_DEFAULT(NULL)); + +/* Returns width and height of array in elements */ +CVAPI(CvSize) cvGetSize( const CvArr* arr ); + +/* Copies source array to destination array */ +CVAPI(void) cvCopy( const CvArr* src, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Sets all or "masked" elements of input array + to the same value*/ +CVAPI(void) cvSet( CvArr* arr, CvScalar value, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Clears all the array elements (sets them to 0) */ +CVAPI(void) cvSetZero( CvArr* arr ); +#define cvZero cvSetZero + + +/* Splits a multi-channel array into the set of single-channel arrays or + extracts particular [color] plane */ +CVAPI(void) cvSplit( const CvArr* src, CvArr* dst0, CvArr* dst1, + CvArr* dst2, CvArr* dst3 ); + +/* Merges a set of single-channel arrays into the single multi-channel array + or inserts one particular [color] plane to the array */ +CVAPI(void) cvMerge( const CvArr* src0, const CvArr* src1, + const CvArr* src2, const CvArr* src3, + CvArr* dst ); + +/* Copies several channels from input arrays to + certain channels of output arrays */ +CVAPI(void) cvMixChannels( const CvArr** src, int src_count, + CvArr** dst, int dst_count, + const int* from_to, int pair_count ); + +/* Performs linear transformation on every source array element: + dst(x,y,c) = scale*src(x,y,c)+shift. + Arbitrary combination of input and output array depths are allowed + (number of channels must be the same), thus the function can be used + for type conversion */ +CVAPI(void) cvConvertScale( const CvArr* src, CvArr* dst, + double scale CV_DEFAULT(1), + double shift CV_DEFAULT(0) ); +#define cvCvtScale cvConvertScale +#define cvScale cvConvertScale +#define cvConvert( src, dst ) cvConvertScale( (src), (dst), 1, 0 ) + + +/* Performs linear transformation on every source array element, + stores absolute value of the result: + dst(x,y,c) = abs(scale*src(x,y,c)+shift). + destination array must have 8u type. + In other cases one may use cvConvertScale + cvAbsDiffS */ +CVAPI(void) cvConvertScaleAbs( const CvArr* src, CvArr* dst, + double scale CV_DEFAULT(1), + double shift CV_DEFAULT(0) ); +#define cvCvtScaleAbs cvConvertScaleAbs + + +/* checks termination criteria validity and + sets eps to default_eps (if it is not set), + max_iter to default_max_iters (if it is not set) +*/ +CVAPI(CvTermCriteria) cvCheckTermCriteria( CvTermCriteria criteria, + double default_eps, + int default_max_iters ); + +/****************************************************************************************\ +* Arithmetic, logic and comparison operations * +\****************************************************************************************/ + +/* dst(mask) = src1(mask) + src2(mask) */ +CVAPI(void) cvAdd( const CvArr* src1, const CvArr* src2, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src(mask) + value */ +CVAPI(void) cvAddS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src1(mask) - src2(mask) */ +CVAPI(void) cvSub( const CvArr* src1, const CvArr* src2, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src(mask) - value = src(mask) + (-value) */ +CV_INLINE void cvSubS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)) +{ + cvAddS( src, cvScalar( -value.val[0], -value.val[1], -value.val[2], -value.val[3]), + dst, mask ); +} + +/* dst(mask) = value - src(mask) */ +CVAPI(void) cvSubRS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) * src2(idx) * scale + (scaled element-wise multiplication of 2 arrays) */ +CVAPI(void) cvMul( const CvArr* src1, const CvArr* src2, + CvArr* dst, double scale CV_DEFAULT(1) ); + +/* element-wise division/inversion with scaling: + dst(idx) = src1(idx) * scale / src2(idx) + or dst(idx) = scale / src2(idx) if src1 == 0 */ +CVAPI(void) cvDiv( const CvArr* src1, const CvArr* src2, + CvArr* dst, double scale CV_DEFAULT(1)); + +/* dst = src1 * scale + src2 */ +CVAPI(void) cvScaleAdd( const CvArr* src1, CvScalar scale, + const CvArr* src2, CvArr* dst ); +#define cvAXPY( A, real_scalar, B, C ) cvScaleAdd(A, cvRealScalar(real_scalar), B, C) + +/* dst = src1 * alpha + src2 * beta + gamma */ +CVAPI(void) cvAddWeighted( const CvArr* src1, double alpha, + const CvArr* src2, double beta, + double gamma, CvArr* dst ); + +/* result = sum_i(src1(i) * src2(i)) (results for all channels are accumulated together) */ +CVAPI(double) cvDotProduct( const CvArr* src1, const CvArr* src2 ); + +/* dst(idx) = src1(idx) & src2(idx) */ +CVAPI(void) cvAnd( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) & value */ +CVAPI(void) cvAndS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) | src2(idx) */ +CVAPI(void) cvOr( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) | value */ +CVAPI(void) cvOrS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) ^ src2(idx) */ +CVAPI(void) cvXor( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) ^ value */ +CVAPI(void) cvXorS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = ~src(idx) */ +CVAPI(void) cvNot( const CvArr* src, CvArr* dst ); + +/* dst(idx) = lower(idx) <= src(idx) < upper(idx) */ +CVAPI(void) cvInRange( const CvArr* src, const CvArr* lower, + const CvArr* upper, CvArr* dst ); + +/* dst(idx) = lower <= src(idx) < upper */ +CVAPI(void) cvInRangeS( const CvArr* src, CvScalar lower, + CvScalar upper, CvArr* dst ); + +#define CV_CMP_EQ 0 +#define CV_CMP_GT 1 +#define CV_CMP_GE 2 +#define CV_CMP_LT 3 +#define CV_CMP_LE 4 +#define CV_CMP_NE 5 + +/* The comparison operation support single-channel arrays only. + Destination image should be 8uC1 or 8sC1 */ + +/* dst(idx) = src1(idx) _cmp_op_ src2(idx) */ +CVAPI(void) cvCmp( const CvArr* src1, const CvArr* src2, CvArr* dst, int cmp_op ); + +/* dst(idx) = src1(idx) _cmp_op_ value */ +CVAPI(void) cvCmpS( const CvArr* src, double value, CvArr* dst, int cmp_op ); + +/* dst(idx) = min(src1(idx),src2(idx)) */ +CVAPI(void) cvMin( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(idx) = max(src1(idx),src2(idx)) */ +CVAPI(void) cvMax( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(idx) = min(src(idx),value) */ +CVAPI(void) cvMinS( const CvArr* src, double value, CvArr* dst ); + +/* dst(idx) = max(src(idx),value) */ +CVAPI(void) cvMaxS( const CvArr* src, double value, CvArr* dst ); + +/* dst(x,y,c) = abs(src1(x,y,c) - src2(x,y,c)) */ +CVAPI(void) cvAbsDiff( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(x,y,c) = abs(src(x,y,c) - value(c)) */ +CVAPI(void) cvAbsDiffS( const CvArr* src, CvArr* dst, CvScalar value ); +#define cvAbs( src, dst ) cvAbsDiffS( (src), (dst), cvScalarAll(0)) + +/****************************************************************************************\ +* Math operations * +\****************************************************************************************/ + +/* Does cartesian->polar coordinates conversion. + Either of output components (magnitude or angle) is optional */ +CVAPI(void) cvCartToPolar( const CvArr* x, const CvArr* y, + CvArr* magnitude, CvArr* angle CV_DEFAULT(NULL), + int angle_in_degrees CV_DEFAULT(0)); + +/* Does polar->cartesian coordinates conversion. + Either of output components (magnitude or angle) is optional. + If magnitude is missing it is assumed to be all 1's */ +CVAPI(void) cvPolarToCart( const CvArr* magnitude, const CvArr* angle, + CvArr* x, CvArr* y, + int angle_in_degrees CV_DEFAULT(0)); + +/* Does powering: dst(idx) = src(idx)^power */ +CVAPI(void) cvPow( const CvArr* src, CvArr* dst, double power ); + +/* Does exponention: dst(idx) = exp(src(idx)). + Overflow is not handled yet. Underflow is handled. + Maximal relative error is ~7e-6 for single-precision input */ +CVAPI(void) cvExp( const CvArr* src, CvArr* dst ); + +/* Calculates natural logarithms: dst(idx) = log(abs(src(idx))). + Logarithm of 0 gives large negative number(~-700) + Maximal relative error is ~3e-7 for single-precision output +*/ +CVAPI(void) cvLog( const CvArr* src, CvArr* dst ); + +/* Fast arctangent calculation */ +CVAPI(float) cvFastArctan( float y, float x ); + +/* Fast cubic root calculation */ +CVAPI(float) cvCbrt( float value ); + +/* Checks array values for NaNs, Infs or simply for too large numbers + (if CV_CHECK_RANGE is set). If CV_CHECK_QUIET is set, + no runtime errors is raised (function returns zero value in case of "bad" values). + Otherwise cvError is called */ +#define CV_CHECK_RANGE 1 +#define CV_CHECK_QUIET 2 +CVAPI(int) cvCheckArr( const CvArr* arr, int flags CV_DEFAULT(0), + double min_val CV_DEFAULT(0), double max_val CV_DEFAULT(0)); +#define cvCheckArray cvCheckArr + +#define CV_RAND_UNI 0 +#define CV_RAND_NORMAL 1 +CVAPI(void) cvRandArr( CvRNG* rng, CvArr* arr, int dist_type, + CvScalar param1, CvScalar param2 ); + +CVAPI(void) cvRandShuffle( CvArr* mat, CvRNG* rng, + double iter_factor CV_DEFAULT(1.)); + +#define CV_SORT_EVERY_ROW 0 +#define CV_SORT_EVERY_COLUMN 1 +#define CV_SORT_ASCENDING 0 +#define CV_SORT_DESCENDING 16 + +CVAPI(void) cvSort( const CvArr* src, CvArr* dst CV_DEFAULT(NULL), + CvArr* idxmat CV_DEFAULT(NULL), + int flags CV_DEFAULT(0)); + +/* Finds real roots of a cubic equation */ +CVAPI(int) cvSolveCubic( const CvMat* coeffs, CvMat* roots ); + +/* Finds all real and complex roots of a polynomial equation */ +CVAPI(void) cvSolvePoly(const CvMat* coeffs, CvMat *roots2, + int maxiter CV_DEFAULT(20), int fig CV_DEFAULT(100)); + +/****************************************************************************************\ +* Matrix operations * +\****************************************************************************************/ + +/* Calculates cross product of two 3d vectors */ +CVAPI(void) cvCrossProduct( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* Matrix transform: dst = A*B + C, C is optional */ +#define cvMatMulAdd( src1, src2, src3, dst ) cvGEMM( (src1), (src2), 1., (src3), 1., (dst), 0 ) +#define cvMatMul( src1, src2, dst ) cvMatMulAdd( (src1), (src2), NULL, (dst)) + +#define CV_GEMM_A_T 1 +#define CV_GEMM_B_T 2 +#define CV_GEMM_C_T 4 +/* Extended matrix transform: + dst = alpha*op(A)*op(B) + beta*op(C), where op(X) is X or X^T */ +CVAPI(void) cvGEMM( const CvArr* src1, const CvArr* src2, double alpha, + const CvArr* src3, double beta, CvArr* dst, + int tABC CV_DEFAULT(0)); +#define cvMatMulAddEx cvGEMM + +/* Transforms each element of source array and stores + resultant vectors in destination array */ +CVAPI(void) cvTransform( const CvArr* src, CvArr* dst, + const CvMat* transmat, + const CvMat* shiftvec CV_DEFAULT(NULL)); +#define cvMatMulAddS cvTransform + +/* Does perspective transform on every element of input array */ +CVAPI(void) cvPerspectiveTransform( const CvArr* src, CvArr* dst, + const CvMat* mat ); + +/* Calculates (A-delta)*(A-delta)^T (order=0) or (A-delta)^T*(A-delta) (order=1) */ +CVAPI(void) cvMulTransposed( const CvArr* src, CvArr* dst, int order, + const CvArr* delta CV_DEFAULT(NULL), + double scale CV_DEFAULT(1.) ); + +/* Tranposes matrix. Square matrices can be transposed in-place */ +CVAPI(void) cvTranspose( const CvArr* src, CvArr* dst ); +#define cvT cvTranspose + +/* Completes the symmetric matrix from the lower (LtoR=0) or from the upper (LtoR!=0) part */ +CVAPI(void) cvCompleteSymm( CvMat* matrix, int LtoR CV_DEFAULT(0) ); + +/* Mirror array data around horizontal (flip=0), + vertical (flip=1) or both(flip=-1) axises: + cvFlip(src) flips images vertically and sequences horizontally (inplace) */ +CVAPI(void) cvFlip( const CvArr* src, CvArr* dst CV_DEFAULT(NULL), + int flip_mode CV_DEFAULT(0)); +#define cvMirror cvFlip + + +#define CV_SVD_MODIFY_A 1 +#define CV_SVD_U_T 2 +#define CV_SVD_V_T 4 + +/* Performs Singular Value Decomposition of a matrix */ +CVAPI(void) cvSVD( CvArr* A, CvArr* W, CvArr* U CV_DEFAULT(NULL), + CvArr* V CV_DEFAULT(NULL), int flags CV_DEFAULT(0)); + +/* Performs Singular Value Back Substitution (solves A*X = B): + flags must be the same as in cvSVD */ +CVAPI(void) cvSVBkSb( const CvArr* W, const CvArr* U, + const CvArr* V, const CvArr* B, + CvArr* X, int flags ); + +#define CV_LU 0 +#define CV_SVD 1 +#define CV_SVD_SYM 2 +#define CV_CHOLESKY 3 +#define CV_QR 4 +#define CV_NORMAL 16 + +/* Inverts matrix */ +CVAPI(double) cvInvert( const CvArr* src, CvArr* dst, + int method CV_DEFAULT(CV_LU)); +#define cvInv cvInvert + +/* Solves linear system (src1)*(dst) = (src2) + (returns 0 if src1 is a singular and CV_LU method is used) */ +CVAPI(int) cvSolve( const CvArr* src1, const CvArr* src2, CvArr* dst, + int method CV_DEFAULT(CV_LU)); + +/* Calculates determinant of input matrix */ +CVAPI(double) cvDet( const CvArr* mat ); + +/* Calculates trace of the matrix (sum of elements on the main diagonal) */ +CVAPI(CvScalar) cvTrace( const CvArr* mat ); + +/* Finds eigen values and vectors of a symmetric matrix */ +CVAPI(void) cvEigenVV( CvArr* mat, CvArr* evects, CvArr* evals, + double eps CV_DEFAULT(0), + int lowindex CV_DEFAULT(-1), + int highindex CV_DEFAULT(-1)); + +///* Finds selected eigen values and vectors of a symmetric matrix */ +//CVAPI(void) cvSelectedEigenVV( CvArr* mat, CvArr* evects, CvArr* evals, +// int lowindex, int highindex ); + +/* Makes an identity matrix (mat_ij = i == j) */ +CVAPI(void) cvSetIdentity( CvArr* mat, CvScalar value CV_DEFAULT(cvRealScalar(1)) ); + +/* Fills matrix with given range of numbers */ +CVAPI(CvArr*) cvRange( CvArr* mat, double start, double end ); + +/* Calculates covariation matrix for a set of vectors */ +/* transpose([v1-avg, v2-avg,...]) * [v1-avg,v2-avg,...] */ +#define CV_COVAR_SCRAMBLED 0 + +/* [v1-avg, v2-avg,...] * transpose([v1-avg,v2-avg,...]) */ +#define CV_COVAR_NORMAL 1 + +/* do not calc average (i.e. mean vector) - use the input vector instead + (useful for calculating covariance matrix by parts) */ +#define CV_COVAR_USE_AVG 2 + +/* scale the covariance matrix coefficients by number of the vectors */ +#define CV_COVAR_SCALE 4 + +/* all the input vectors are stored in a single matrix, as its rows */ +#define CV_COVAR_ROWS 8 + +/* all the input vectors are stored in a single matrix, as its columns */ +#define CV_COVAR_COLS 16 + +CVAPI(void) cvCalcCovarMatrix( const CvArr** vects, int count, + CvArr* cov_mat, CvArr* avg, int flags ); + +#define CV_PCA_DATA_AS_ROW 0 +#define CV_PCA_DATA_AS_COL 1 +#define CV_PCA_USE_AVG 2 +CVAPI(void) cvCalcPCA( const CvArr* data, CvArr* mean, + CvArr* eigenvals, CvArr* eigenvects, int flags ); + +CVAPI(void) cvProjectPCA( const CvArr* data, const CvArr* mean, + const CvArr* eigenvects, CvArr* result ); + +CVAPI(void) cvBackProjectPCA( const CvArr* proj, const CvArr* mean, + const CvArr* eigenvects, CvArr* result ); + +/* Calculates Mahalanobis(weighted) distance */ +CVAPI(double) cvMahalanobis( const CvArr* vec1, const CvArr* vec2, const CvArr* mat ); +#define cvMahalonobis cvMahalanobis + +/****************************************************************************************\ +* Array Statistics * +\****************************************************************************************/ + +/* Finds sum of array elements */ +CVAPI(CvScalar) cvSum( const CvArr* arr ); + +/* Calculates number of non-zero pixels */ +CVAPI(int) cvCountNonZero( const CvArr* arr ); + +/* Calculates mean value of array elements */ +CVAPI(CvScalar) cvAvg( const CvArr* arr, const CvArr* mask CV_DEFAULT(NULL) ); + +/* Calculates mean and standard deviation of pixel values */ +CVAPI(void) cvAvgSdv( const CvArr* arr, CvScalar* mean, CvScalar* std_dev, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Finds global minimum, maximum and their positions */ +CVAPI(void) cvMinMaxLoc( const CvArr* arr, double* min_val, double* max_val, + CvPoint* min_loc CV_DEFAULT(NULL), + CvPoint* max_loc CV_DEFAULT(NULL), + const CvArr* mask CV_DEFAULT(NULL) ); + +/* types of array norm */ +#define CV_C 1 +#define CV_L1 2 +#define CV_L2 4 +#define CV_NORM_MASK 7 +#define CV_RELATIVE 8 +#define CV_DIFF 16 +#define CV_MINMAX 32 + +#define CV_DIFF_C (CV_DIFF | CV_C) +#define CV_DIFF_L1 (CV_DIFF | CV_L1) +#define CV_DIFF_L2 (CV_DIFF | CV_L2) +#define CV_RELATIVE_C (CV_RELATIVE | CV_C) +#define CV_RELATIVE_L1 (CV_RELATIVE | CV_L1) +#define CV_RELATIVE_L2 (CV_RELATIVE | CV_L2) + +/* Finds norm, difference norm or relative difference norm for an array (or two arrays) */ +CVAPI(double) cvNorm( const CvArr* arr1, const CvArr* arr2 CV_DEFAULT(NULL), + int norm_type CV_DEFAULT(CV_L2), + const CvArr* mask CV_DEFAULT(NULL) ); + +CVAPI(void) cvNormalize( const CvArr* src, CvArr* dst, + double a CV_DEFAULT(1.), double b CV_DEFAULT(0.), + int norm_type CV_DEFAULT(CV_L2), + const CvArr* mask CV_DEFAULT(NULL) ); + + +#define CV_REDUCE_SUM 0 +#define CV_REDUCE_AVG 1 +#define CV_REDUCE_MAX 2 +#define CV_REDUCE_MIN 3 + +CVAPI(void) cvReduce( const CvArr* src, CvArr* dst, int dim CV_DEFAULT(-1), + int op CV_DEFAULT(CV_REDUCE_SUM) ); + +/****************************************************************************************\ +* Discrete Linear Transforms and Related Functions * +\****************************************************************************************/ + +#define CV_DXT_FORWARD 0 +#define CV_DXT_INVERSE 1 +#define CV_DXT_SCALE 2 /* divide result by size of array */ +#define CV_DXT_INV_SCALE (CV_DXT_INVERSE + CV_DXT_SCALE) +#define CV_DXT_INVERSE_SCALE CV_DXT_INV_SCALE +#define CV_DXT_ROWS 4 /* transform each row individually */ +#define CV_DXT_MUL_CONJ 8 /* conjugate the second argument of cvMulSpectrums */ + +/* Discrete Fourier Transform: + complex->complex, + real->ccs (forward), + ccs->real (inverse) */ +CVAPI(void) cvDFT( const CvArr* src, CvArr* dst, int flags, + int nonzero_rows CV_DEFAULT(0) ); +#define cvFFT cvDFT + +/* Multiply results of DFTs: DFT(X)*DFT(Y) or DFT(X)*conj(DFT(Y)) */ +CVAPI(void) cvMulSpectrums( const CvArr* src1, const CvArr* src2, + CvArr* dst, int flags ); + +/* Finds optimal DFT vector size >= size0 */ +CVAPI(int) cvGetOptimalDFTSize( int size0 ); + +/* Discrete Cosine Transform */ +CVAPI(void) cvDCT( const CvArr* src, CvArr* dst, int flags ); + +/****************************************************************************************\ +* Dynamic data structures * +\****************************************************************************************/ + +/* Calculates length of sequence slice (with support of negative indices). */ +CVAPI(int) cvSliceLength( CvSlice slice, const CvSeq* seq ); + + +/* Creates new memory storage. + block_size == 0 means that default, + somewhat optimal size, is used (currently, it is 64K) */ +CVAPI(CvMemStorage*) cvCreateMemStorage( int block_size CV_DEFAULT(0)); + + +/* Creates a memory storage that will borrow memory blocks from parent storage */ +CVAPI(CvMemStorage*) cvCreateChildMemStorage( CvMemStorage* parent ); + + +/* Releases memory storage. All the children of a parent must be released before + the parent. A child storage returns all the blocks to parent when it is released */ +CVAPI(void) cvReleaseMemStorage( CvMemStorage** storage ); + + +/* Clears memory storage. This is the only way(!!!) (besides cvRestoreMemStoragePos) + to reuse memory allocated for the storage - cvClearSeq,cvClearSet ... + do not free any memory. + A child storage returns all the blocks to the parent when it is cleared */ +CVAPI(void) cvClearMemStorage( CvMemStorage* storage ); + +/* Remember a storage "free memory" position */ +CVAPI(void) cvSaveMemStoragePos( const CvMemStorage* storage, CvMemStoragePos* pos ); + +/* Restore a storage "free memory" position */ +CVAPI(void) cvRestoreMemStoragePos( CvMemStorage* storage, CvMemStoragePos* pos ); + +/* Allocates continuous buffer of the specified size in the storage */ +CVAPI(void*) cvMemStorageAlloc( CvMemStorage* storage, size_t size ); + +/* Allocates string in memory storage */ +CVAPI(CvString) cvMemStorageAllocString( CvMemStorage* storage, const char* ptr, + int len CV_DEFAULT(-1) ); + +/* Creates new empty sequence that will reside in the specified storage */ +CVAPI(CvSeq*) cvCreateSeq( int seq_flags, int header_size, + int elem_size, CvMemStorage* storage ); + +/* Changes default size (granularity) of sequence blocks. + The default size is ~1Kbyte */ +CVAPI(void) cvSetSeqBlockSize( CvSeq* seq, int delta_elems ); + + +/* Adds new element to the end of sequence. Returns pointer to the element */ +CVAPI(schar*) cvSeqPush( CvSeq* seq, const void* element CV_DEFAULT(NULL)); + + +/* Adds new element to the beginning of sequence. Returns pointer to it */ +CVAPI(schar*) cvSeqPushFront( CvSeq* seq, const void* element CV_DEFAULT(NULL)); + + +/* Removes the last element from sequence and optionally saves it */ +CVAPI(void) cvSeqPop( CvSeq* seq, void* element CV_DEFAULT(NULL)); + + +/* Removes the first element from sequence and optioanally saves it */ +CVAPI(void) cvSeqPopFront( CvSeq* seq, void* element CV_DEFAULT(NULL)); + + +#define CV_FRONT 1 +#define CV_BACK 0 +/* Adds several new elements to the end of sequence */ +CVAPI(void) cvSeqPushMulti( CvSeq* seq, const void* elements, + int count, int in_front CV_DEFAULT(0) ); + +/* Removes several elements from the end of sequence and optionally saves them */ +CVAPI(void) cvSeqPopMulti( CvSeq* seq, void* elements, + int count, int in_front CV_DEFAULT(0) ); + +/* Inserts a new element in the middle of sequence. + cvSeqInsert(seq,0,elem) == cvSeqPushFront(seq,elem) */ +CVAPI(schar*) cvSeqInsert( CvSeq* seq, int before_index, + const void* element CV_DEFAULT(NULL)); + +/* Removes specified sequence element */ +CVAPI(void) cvSeqRemove( CvSeq* seq, int index ); + + +/* Removes all the elements from the sequence. The freed memory + can be reused later only by the same sequence unless cvClearMemStorage + or cvRestoreMemStoragePos is called */ +CVAPI(void) cvClearSeq( CvSeq* seq ); + + +/* Retrieves pointer to specified sequence element. + Negative indices are supported and mean counting from the end + (e.g -1 means the last sequence element) */ +CVAPI(schar*) cvGetSeqElem( const CvSeq* seq, int index ); + +/* Calculates index of the specified sequence element. + Returns -1 if element does not belong to the sequence */ +CVAPI(int) cvSeqElemIdx( const CvSeq* seq, const void* element, + CvSeqBlock** block CV_DEFAULT(NULL) ); + +/* Initializes sequence writer. The new elements will be added to the end of sequence */ +CVAPI(void) cvStartAppendToSeq( CvSeq* seq, CvSeqWriter* writer ); + + +/* Combination of cvCreateSeq and cvStartAppendToSeq */ +CVAPI(void) cvStartWriteSeq( int seq_flags, int header_size, + int elem_size, CvMemStorage* storage, + CvSeqWriter* writer ); + +/* Closes sequence writer, updates sequence header and returns pointer + to the resultant sequence + (which may be useful if the sequence was created using cvStartWriteSeq)) +*/ +CVAPI(CvSeq*) cvEndWriteSeq( CvSeqWriter* writer ); + + +/* Updates sequence header. May be useful to get access to some of previously + written elements via cvGetSeqElem or sequence reader */ +CVAPI(void) cvFlushSeqWriter( CvSeqWriter* writer ); + + +/* Initializes sequence reader. + The sequence can be read in forward or backward direction */ +CVAPI(void) cvStartReadSeq( const CvSeq* seq, CvSeqReader* reader, + int reverse CV_DEFAULT(0) ); + + +/* Returns current sequence reader position (currently observed sequence element) */ +CVAPI(int) cvGetSeqReaderPos( CvSeqReader* reader ); + + +/* Changes sequence reader position. It may seek to an absolute or + to relative to the current position */ +CVAPI(void) cvSetSeqReaderPos( CvSeqReader* reader, int index, + int is_relative CV_DEFAULT(0)); + +/* Copies sequence content to a continuous piece of memory */ +CVAPI(void*) cvCvtSeqToArray( const CvSeq* seq, void* elements, + CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ) ); + +/* Creates sequence header for array. + After that all the operations on sequences that do not alter the content + can be applied to the resultant sequence */ +CVAPI(CvSeq*) cvMakeSeqHeaderForArray( int seq_type, int header_size, + int elem_size, void* elements, int total, + CvSeq* seq, CvSeqBlock* block ); + +/* Extracts sequence slice (with or without copying sequence elements) */ +CVAPI(CvSeq*) cvSeqSlice( const CvSeq* seq, CvSlice slice, + CvMemStorage* storage CV_DEFAULT(NULL), + int copy_data CV_DEFAULT(0)); + +CV_INLINE CvSeq* cvCloneSeq( const CvSeq* seq, CvMemStorage* storage CV_DEFAULT(NULL)) +{ + return cvSeqSlice( seq, CV_WHOLE_SEQ, storage, 1 ); +} + +/* Removes sequence slice */ +CVAPI(void) cvSeqRemoveSlice( CvSeq* seq, CvSlice slice ); + +/* Inserts a sequence or array into another sequence */ +CVAPI(void) cvSeqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr ); + +/* a < b ? -1 : a > b ? 1 : 0 */ +typedef int (CV_CDECL* CvCmpFunc)(const void* a, const void* b, void* userdata ); + +/* Sorts sequence in-place given element comparison function */ +CVAPI(void) cvSeqSort( CvSeq* seq, CvCmpFunc func, void* userdata CV_DEFAULT(NULL) ); + +/* Finds element in a [sorted] sequence */ +CVAPI(schar*) cvSeqSearch( CvSeq* seq, const void* elem, CvCmpFunc func, + int is_sorted, int* elem_idx, + void* userdata CV_DEFAULT(NULL) ); + +/* Reverses order of sequence elements in-place */ +CVAPI(void) cvSeqInvert( CvSeq* seq ); + +/* Splits sequence into one or more equivalence classes using the specified criteria */ +CVAPI(int) cvSeqPartition( const CvSeq* seq, CvMemStorage* storage, + CvSeq** labels, CvCmpFunc is_equal, void* userdata ); + +/************ Internal sequence functions ************/ +CVAPI(void) cvChangeSeqBlock( void* reader, int direction ); +CVAPI(void) cvCreateSeqBlock( CvSeqWriter* writer ); + + +/* Creates a new set */ +CVAPI(CvSet*) cvCreateSet( int set_flags, int header_size, + int elem_size, CvMemStorage* storage ); + +/* Adds new element to the set and returns pointer to it */ +CVAPI(int) cvSetAdd( CvSet* set_header, CvSetElem* elem CV_DEFAULT(NULL), + CvSetElem** inserted_elem CV_DEFAULT(NULL) ); + +/* Fast variant of cvSetAdd */ +CV_INLINE CvSetElem* cvSetNew( CvSet* set_header ) +{ + CvSetElem* elem = set_header->free_elems; + if( elem ) + { + set_header->free_elems = elem->next_free; + elem->flags = elem->flags & CV_SET_ELEM_IDX_MASK; + set_header->active_count++; + } + else + cvSetAdd( set_header, NULL, (CvSetElem**)&elem ); + return elem; +} + +/* Removes set element given its pointer */ +CV_INLINE void cvSetRemoveByPtr( CvSet* set_header, void* elem ) +{ + CvSetElem* _elem = (CvSetElem*)elem; + assert( _elem->flags >= 0 /*&& (elem->flags & CV_SET_ELEM_IDX_MASK) < set_header->total*/ ); + _elem->next_free = set_header->free_elems; + _elem->flags = (_elem->flags & CV_SET_ELEM_IDX_MASK) | CV_SET_ELEM_FREE_FLAG; + set_header->free_elems = _elem; + set_header->active_count--; +} + +/* Removes element from the set by its index */ +CVAPI(void) cvSetRemove( CvSet* set_header, int index ); + +/* Returns a set element by index. If the element doesn't belong to the set, + NULL is returned */ +CV_INLINE CvSetElem* cvGetSetElem( const CvSet* set_header, int index ) +{ + CvSetElem* elem = (CvSetElem*)cvGetSeqElem( (CvSeq*)set_header, index ); + return elem && CV_IS_SET_ELEM( elem ) ? elem : 0; +} + +/* Removes all the elements from the set */ +CVAPI(void) cvClearSet( CvSet* set_header ); + +/* Creates new graph */ +CVAPI(CvGraph*) cvCreateGraph( int graph_flags, int header_size, + int vtx_size, int edge_size, + CvMemStorage* storage ); + +/* Adds new vertex to the graph */ +CVAPI(int) cvGraphAddVtx( CvGraph* graph, const CvGraphVtx* vtx CV_DEFAULT(NULL), + CvGraphVtx** inserted_vtx CV_DEFAULT(NULL) ); + + +/* Removes vertex from the graph together with all incident edges */ +CVAPI(int) cvGraphRemoveVtx( CvGraph* graph, int index ); +CVAPI(int) cvGraphRemoveVtxByPtr( CvGraph* graph, CvGraphVtx* vtx ); + + +/* Link two vertices specifed by indices or pointers if they + are not connected or return pointer to already existing edge + connecting the vertices. + Functions return 1 if a new edge was created, 0 otherwise */ +CVAPI(int) cvGraphAddEdge( CvGraph* graph, + int start_idx, int end_idx, + const CvGraphEdge* edge CV_DEFAULT(NULL), + CvGraphEdge** inserted_edge CV_DEFAULT(NULL) ); + +CVAPI(int) cvGraphAddEdgeByPtr( CvGraph* graph, + CvGraphVtx* start_vtx, CvGraphVtx* end_vtx, + const CvGraphEdge* edge CV_DEFAULT(NULL), + CvGraphEdge** inserted_edge CV_DEFAULT(NULL) ); + +/* Remove edge connecting two vertices */ +CVAPI(void) cvGraphRemoveEdge( CvGraph* graph, int start_idx, int end_idx ); +CVAPI(void) cvGraphRemoveEdgeByPtr( CvGraph* graph, CvGraphVtx* start_vtx, + CvGraphVtx* end_vtx ); + +/* Find edge connecting two vertices */ +CVAPI(CvGraphEdge*) cvFindGraphEdge( const CvGraph* graph, int start_idx, int end_idx ); +CVAPI(CvGraphEdge*) cvFindGraphEdgeByPtr( const CvGraph* graph, + const CvGraphVtx* start_vtx, + const CvGraphVtx* end_vtx ); +#define cvGraphFindEdge cvFindGraphEdge +#define cvGraphFindEdgeByPtr cvFindGraphEdgeByPtr + +/* Remove all vertices and edges from the graph */ +CVAPI(void) cvClearGraph( CvGraph* graph ); + + +/* Count number of edges incident to the vertex */ +CVAPI(int) cvGraphVtxDegree( const CvGraph* graph, int vtx_idx ); +CVAPI(int) cvGraphVtxDegreeByPtr( const CvGraph* graph, const CvGraphVtx* vtx ); + + +/* Retrieves graph vertex by given index */ +#define cvGetGraphVtx( graph, idx ) (CvGraphVtx*)cvGetSetElem((CvSet*)(graph), (idx)) + +/* Retrieves index of a graph vertex given its pointer */ +#define cvGraphVtxIdx( graph, vtx ) ((vtx)->flags & CV_SET_ELEM_IDX_MASK) + +/* Retrieves index of a graph edge given its pointer */ +#define cvGraphEdgeIdx( graph, edge ) ((edge)->flags & CV_SET_ELEM_IDX_MASK) + +#define cvGraphGetVtxCount( graph ) ((graph)->active_count) +#define cvGraphGetEdgeCount( graph ) ((graph)->edges->active_count) + +#define CV_GRAPH_VERTEX 1 +#define CV_GRAPH_TREE_EDGE 2 +#define CV_GRAPH_BACK_EDGE 4 +#define CV_GRAPH_FORWARD_EDGE 8 +#define CV_GRAPH_CROSS_EDGE 16 +#define CV_GRAPH_ANY_EDGE 30 +#define CV_GRAPH_NEW_TREE 32 +#define CV_GRAPH_BACKTRACKING 64 +#define CV_GRAPH_OVER -1 + +#define CV_GRAPH_ALL_ITEMS -1 + +/* flags for graph vertices and edges */ +#define CV_GRAPH_ITEM_VISITED_FLAG (1 << 30) +#define CV_IS_GRAPH_VERTEX_VISITED(vtx) \ + (((CvGraphVtx*)(vtx))->flags & CV_GRAPH_ITEM_VISITED_FLAG) +#define CV_IS_GRAPH_EDGE_VISITED(edge) \ + (((CvGraphEdge*)(edge))->flags & CV_GRAPH_ITEM_VISITED_FLAG) +#define CV_GRAPH_SEARCH_TREE_NODE_FLAG (1 << 29) +#define CV_GRAPH_FORWARD_EDGE_FLAG (1 << 28) + +typedef struct CvGraphScanner +{ + CvGraphVtx* vtx; /* current graph vertex (or current edge origin) */ + CvGraphVtx* dst; /* current graph edge destination vertex */ + CvGraphEdge* edge; /* current edge */ + + CvGraph* graph; /* the graph */ + CvSeq* stack; /* the graph vertex stack */ + int index; /* the lower bound of certainly visited vertices */ + int mask; /* event mask */ +} +CvGraphScanner; + +/* Creates new graph scanner. */ +CVAPI(CvGraphScanner*) cvCreateGraphScanner( CvGraph* graph, + CvGraphVtx* vtx CV_DEFAULT(NULL), + int mask CV_DEFAULT(CV_GRAPH_ALL_ITEMS)); + +/* Releases graph scanner. */ +CVAPI(void) cvReleaseGraphScanner( CvGraphScanner** scanner ); + +/* Get next graph element */ +CVAPI(int) cvNextGraphItem( CvGraphScanner* scanner ); + +/* Creates a copy of graph */ +CVAPI(CvGraph*) cvCloneGraph( const CvGraph* graph, CvMemStorage* storage ); + +/****************************************************************************************\ +* Drawing * +\****************************************************************************************/ + +/****************************************************************************************\ +* Drawing functions work with images/matrices of arbitrary type. * +* For color images the channel order is BGR[A] * +* Antialiasing is supported only for 8-bit image now. * +* All the functions include parameter color that means rgb value (that may be * +* constructed with CV_RGB macro) for color images and brightness * +* for grayscale images. * +* If a drawn figure is partially or completely outside of the image, it is clipped.* +\****************************************************************************************/ + +#define CV_RGB( r, g, b ) cvScalar( (b), (g), (r), 0 ) +#define CV_FILLED -1 + +#define CV_AA 16 + +/* Draws 4-connected, 8-connected or antialiased line segment connecting two points */ +CVAPI(void) cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +/* Draws a rectangle given two opposite corners of the rectangle (pt1 & pt2), + if thickness<0 (e.g. thickness == CV_FILLED), the filled box is drawn */ +CVAPI(void) cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + int shift CV_DEFAULT(0)); + +/* Draws a rectangle specified by a CvRect structure */ +CVAPI(void) cvRectangleR( CvArr* img, CvRect r, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + int shift CV_DEFAULT(0)); + + +/* Draws a circle with specified center and radius. + Thickness works in the same way as with cvRectangle */ +CVAPI(void) cvCircle( CvArr* img, CvPoint center, int radius, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +/* Draws ellipse outline, filled ellipse, elliptic arc or filled elliptic sector, + depending on , and parameters. The resultant figure + is rotated by . All the angles are in degrees */ +CVAPI(void) cvEllipse( CvArr* img, CvPoint center, CvSize axes, + double angle, double start_angle, double end_angle, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +CV_INLINE void cvEllipseBox( CvArr* img, CvBox2D box, CvScalar color, + int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ) +{ + CvSize axes; + axes.width = cvRound(box.size.width*0.5); + axes.height = cvRound(box.size.height*0.5); + + cvEllipse( img, cvPointFrom32f( box.center ), axes, box.angle, + 0, 360, color, thickness, line_type, shift ); +} + +/* Fills convex or monotonous polygon. */ +CVAPI(void) cvFillConvexPoly( CvArr* img, const CvPoint* pts, int npts, CvScalar color, + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +/* Fills an area bounded by one or more arbitrary polygons */ +CVAPI(void) cvFillPoly( CvArr* img, CvPoint** pts, const int* npts, + int contours, CvScalar color, + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +/* Draws one or more polygonal curves */ +CVAPI(void) cvPolyLine( CvArr* img, CvPoint** pts, const int* npts, int contours, + int is_closed, CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +#define cvDrawRect cvRectangle +#define cvDrawLine cvLine +#define cvDrawCircle cvCircle +#define cvDrawEllipse cvEllipse +#define cvDrawPolyLine cvPolyLine + +/* Clips the line segment connecting *pt1 and *pt2 + by the rectangular window + (0<=xptr will point + to pt1 (or pt2, see left_to_right description) location in the image. + Returns the number of pixels on the line between the ending points. */ +CVAPI(int) cvInitLineIterator( const CvArr* image, CvPoint pt1, CvPoint pt2, + CvLineIterator* line_iterator, + int connectivity CV_DEFAULT(8), + int left_to_right CV_DEFAULT(0)); + +/* Moves iterator to the next line point */ +#define CV_NEXT_LINE_POINT( line_iterator ) \ +{ \ + int _line_iterator_mask = (line_iterator).err < 0 ? -1 : 0; \ + (line_iterator).err += (line_iterator).minus_delta + \ + ((line_iterator).plus_delta & _line_iterator_mask); \ + (line_iterator).ptr += (line_iterator).minus_step + \ + ((line_iterator).plus_step & _line_iterator_mask); \ +} + + +/* basic font types */ +#define CV_FONT_HERSHEY_SIMPLEX 0 +#define CV_FONT_HERSHEY_PLAIN 1 +#define CV_FONT_HERSHEY_DUPLEX 2 +#define CV_FONT_HERSHEY_COMPLEX 3 +#define CV_FONT_HERSHEY_TRIPLEX 4 +#define CV_FONT_HERSHEY_COMPLEX_SMALL 5 +#define CV_FONT_HERSHEY_SCRIPT_SIMPLEX 6 +#define CV_FONT_HERSHEY_SCRIPT_COMPLEX 7 + +/* font flags */ +#define CV_FONT_ITALIC 16 + +#define CV_FONT_VECTOR0 CV_FONT_HERSHEY_SIMPLEX + + +/* Font structure */ +typedef struct CvFont +{ + const char* nameFont; //Qt:nameFont + CvScalar color; //Qt:ColorFont -> cvScalar(blue_component, green_component, red\_component[, alpha_component]) + int font_face; //Qt: bool italic /* =CV_FONT_* */ + const int* ascii; /* font data and metrics */ + const int* greek; + const int* cyrillic; + float hscale, vscale; + float shear; /* slope coefficient: 0 - normal, >0 - italic */ + int thickness; //Qt: weight /* letters thickness */ + float dx; /* horizontal interval between letters */ + int line_type; //Qt: PointSize +} +CvFont; + +/* Initializes font structure used further in cvPutText */ +CVAPI(void) cvInitFont( CvFont* font, int font_face, + double hscale, double vscale, + double shear CV_DEFAULT(0), + int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8)); + +CV_INLINE CvFont cvFont( double scale, int thickness CV_DEFAULT(1) ) +{ + CvFont font; + cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, scale, scale, 0, thickness, CV_AA ); + return font; +} + +/* Renders text stroke with specified font and color at specified location. + CvFont should be initialized with cvInitFont */ +CVAPI(void) cvPutText( CvArr* img, const char* text, CvPoint org, + const CvFont* font, CvScalar color ); + +/* Calculates bounding box of text stroke (useful for alignment) */ +CVAPI(void) cvGetTextSize( const char* text_string, const CvFont* font, + CvSize* text_size, int* baseline ); + + + +/* Unpacks color value, if arrtype is CV_8UC?, is treated as + packed color value, otherwise the first channels (depending on arrtype) + of destination scalar are set to the same value = */ +CVAPI(CvScalar) cvColorToScalar( double packed_color, int arrtype ); + +/* Returns the polygon points which make up the given ellipse. The ellipse is define by + the box of size 'axes' rotated 'angle' around the 'center'. A partial sweep + of the ellipse arc can be done by spcifying arc_start and arc_end to be something + other than 0 and 360, respectively. The input array 'pts' must be large enough to + hold the result. The total number of points stored into 'pts' is returned by this + function. */ +CVAPI(int) cvEllipse2Poly( CvPoint center, CvSize axes, + int angle, int arc_start, int arc_end, CvPoint * pts, int delta ); + +/* Draws contour outlines or filled interiors on the image */ +CVAPI(void) cvDrawContours( CvArr *img, CvSeq* contour, + CvScalar external_color, CvScalar hole_color, + int max_level, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + CvPoint offset CV_DEFAULT(cvPoint(0,0))); + +/* Does look-up transformation. Elements of the source array + (that should be 8uC1 or 8sC1) are used as indexes in lutarr 256-element table */ +CVAPI(void) cvLUT( const CvArr* src, CvArr* dst, const CvArr* lut ); + + +/******************* Iteration through the sequence tree *****************/ +typedef struct CvTreeNodeIterator +{ + const void* node; + int level; + int max_level; +} +CvTreeNodeIterator; + +CVAPI(void) cvInitTreeNodeIterator( CvTreeNodeIterator* tree_iterator, + const void* first, int max_level ); +CVAPI(void*) cvNextTreeNode( CvTreeNodeIterator* tree_iterator ); +CVAPI(void*) cvPrevTreeNode( CvTreeNodeIterator* tree_iterator ); + +/* Inserts sequence into tree with specified "parent" sequence. + If parent is equal to frame (e.g. the most external contour), + then added contour will have null pointer to parent. */ +CVAPI(void) cvInsertNodeIntoTree( void* node, void* parent, void* frame ); + +/* Removes contour from tree (together with the contour children). */ +CVAPI(void) cvRemoveNodeFromTree( void* node, void* frame ); + +/* Gathers pointers to all the sequences, + accessible from the , to the single sequence */ +CVAPI(CvSeq*) cvTreeToNodeSeq( const void* first, int header_size, + CvMemStorage* storage ); + +/* The function implements the K-means algorithm for clustering an array of sample + vectors in a specified number of classes */ +#define CV_KMEANS_USE_INITIAL_LABELS 1 +CVAPI(int) cvKMeans2( const CvArr* samples, int cluster_count, CvArr* labels, + CvTermCriteria termcrit, int attempts CV_DEFAULT(1), + CvRNG* rng CV_DEFAULT(0), int flags CV_DEFAULT(0), + CvArr* _centers CV_DEFAULT(0), double* compactness CV_DEFAULT(0) ); + +/****************************************************************************************\ +* System functions * +\****************************************************************************************/ + +/* Add the function pointers table with associated information to the IPP primitives list */ +CVAPI(int) cvRegisterModule( const CvModuleInfo* module_info ); + +/* Loads optimized functions from IPP, MKL etc. or switches back to pure C code */ +CVAPI(int) cvUseOptimized( int on_off ); + +/* Retrieves information about the registered modules and loaded optimized plugins */ +CVAPI(void) cvGetModuleInfo( const char* module_name, + const char** version, + const char** loaded_addon_plugins ); + +typedef void* (CV_CDECL *CvAllocFunc)(size_t size, void* userdata); +typedef int (CV_CDECL *CvFreeFunc)(void* pptr, void* userdata); + +/* Set user-defined memory managment functions (substitutors for malloc and free) that + will be called by cvAlloc, cvFree and higher-level functions (e.g. cvCreateImage) */ +CVAPI(void) cvSetMemoryManager( CvAllocFunc alloc_func CV_DEFAULT(NULL), + CvFreeFunc free_func CV_DEFAULT(NULL), + void* userdata CV_DEFAULT(NULL)); + + +typedef IplImage* (CV_STDCALL* Cv_iplCreateImageHeader) + (int,int,int,char*,char*,int,int,int,int,int, + IplROI*,IplImage*,void*,IplTileInfo*); +typedef void (CV_STDCALL* Cv_iplAllocateImageData)(IplImage*,int,int); +typedef void (CV_STDCALL* Cv_iplDeallocate)(IplImage*,int); +typedef IplROI* (CV_STDCALL* Cv_iplCreateROI)(int,int,int,int,int); +typedef IplImage* (CV_STDCALL* Cv_iplCloneImage)(const IplImage*); + +/* Makes OpenCV use IPL functions for IplImage allocation/deallocation */ +CVAPI(void) cvSetIPLAllocators( Cv_iplCreateImageHeader create_header, + Cv_iplAllocateImageData allocate_data, + Cv_iplDeallocate deallocate, + Cv_iplCreateROI create_roi, + Cv_iplCloneImage clone_image ); + +#define CV_TURN_ON_IPL_COMPATIBILITY() \ + cvSetIPLAllocators( iplCreateImageHeader, iplAllocateImage, \ + iplDeallocate, iplCreateROI, iplCloneImage ) + +/****************************************************************************************\ +* Data Persistence * +\****************************************************************************************/ + +/********************************** High-level functions ********************************/ + +/* opens existing or creates new file storage */ +CVAPI(CvFileStorage*) cvOpenFileStorage( const char* filename, CvMemStorage* memstorage, + int flags, const char* encoding CV_DEFAULT(NULL) ); + +/* closes file storage and deallocates buffers */ +CVAPI(void) cvReleaseFileStorage( CvFileStorage** fs ); + +/* returns attribute value or 0 (NULL) if there is no such attribute */ +CVAPI(const char*) cvAttrValue( const CvAttrList* attr, const char* attr_name ); + +/* starts writing compound structure (map or sequence) */ +CVAPI(void) cvStartWriteStruct( CvFileStorage* fs, const char* name, + int struct_flags, const char* type_name CV_DEFAULT(NULL), + CvAttrList attributes CV_DEFAULT(cvAttrList())); + +/* finishes writing compound structure */ +CVAPI(void) cvEndWriteStruct( CvFileStorage* fs ); + +/* writes an integer */ +CVAPI(void) cvWriteInt( CvFileStorage* fs, const char* name, int value ); + +/* writes a floating-point number */ +CVAPI(void) cvWriteReal( CvFileStorage* fs, const char* name, double value ); + +/* writes a string */ +CVAPI(void) cvWriteString( CvFileStorage* fs, const char* name, + const char* str, int quote CV_DEFAULT(0) ); + +/* writes a comment */ +CVAPI(void) cvWriteComment( CvFileStorage* fs, const char* comment, + int eol_comment ); + +/* writes instance of a standard type (matrix, image, sequence, graph etc.) + or user-defined type */ +CVAPI(void) cvWrite( CvFileStorage* fs, const char* name, const void* ptr, + CvAttrList attributes CV_DEFAULT(cvAttrList())); + +/* starts the next stream */ +CVAPI(void) cvStartNextStream( CvFileStorage* fs ); + +/* helper function: writes multiple integer or floating-point numbers */ +CVAPI(void) cvWriteRawData( CvFileStorage* fs, const void* src, + int len, const char* dt ); + +/* returns the hash entry corresponding to the specified literal key string or 0 + if there is no such a key in the storage */ +CVAPI(CvStringHashNode*) cvGetHashedKey( CvFileStorage* fs, const char* name, + int len CV_DEFAULT(-1), + int create_missing CV_DEFAULT(0)); + +/* returns file node with the specified key within the specified map + (collection of named nodes) */ +CVAPI(CvFileNode*) cvGetRootFileNode( const CvFileStorage* fs, + int stream_index CV_DEFAULT(0) ); + +/* returns file node with the specified key within the specified map + (collection of named nodes) */ +CVAPI(CvFileNode*) cvGetFileNode( CvFileStorage* fs, CvFileNode* map, + const CvStringHashNode* key, + int create_missing CV_DEFAULT(0) ); + +/* this is a slower version of cvGetFileNode that takes the key as a literal string */ +CVAPI(CvFileNode*) cvGetFileNodeByName( const CvFileStorage* fs, + const CvFileNode* map, + const char* name ); + +CV_INLINE int cvReadInt( const CvFileNode* node, int default_value CV_DEFAULT(0) ) +{ + return !node ? default_value : + CV_NODE_IS_INT(node->tag) ? node->data.i : + CV_NODE_IS_REAL(node->tag) ? cvRound(node->data.f) : 0x7fffffff; +} + + +CV_INLINE int cvReadIntByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, int default_value CV_DEFAULT(0) ) +{ + return cvReadInt( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +CV_INLINE double cvReadReal( const CvFileNode* node, double default_value CV_DEFAULT(0.) ) +{ + return !node ? default_value : + CV_NODE_IS_INT(node->tag) ? (double)node->data.i : + CV_NODE_IS_REAL(node->tag) ? node->data.f : 1e300; +} + + +CV_INLINE double cvReadRealByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, double default_value CV_DEFAULT(0.) ) +{ + return cvReadReal( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +CV_INLINE const char* cvReadString( const CvFileNode* node, + const char* default_value CV_DEFAULT(NULL) ) +{ + return !node ? default_value : CV_NODE_IS_STRING(node->tag) ? node->data.str.ptr : 0; +} + + +CV_INLINE const char* cvReadStringByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, const char* default_value CV_DEFAULT(NULL) ) +{ + return cvReadString( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +/* decodes standard or user-defined object and returns it */ +CVAPI(void*) cvRead( CvFileStorage* fs, CvFileNode* node, + CvAttrList* attributes CV_DEFAULT(NULL)); + +/* decodes standard or user-defined object and returns it */ +CV_INLINE void* cvReadByName( CvFileStorage* fs, const CvFileNode* map, + const char* name, CvAttrList* attributes CV_DEFAULT(NULL) ) +{ + return cvRead( fs, cvGetFileNodeByName( fs, map, name ), attributes ); +} + + +/* starts reading data from sequence or scalar numeric node */ +CVAPI(void) cvStartReadRawData( const CvFileStorage* fs, const CvFileNode* src, + CvSeqReader* reader ); + +/* reads multiple numbers and stores them to array */ +CVAPI(void) cvReadRawDataSlice( const CvFileStorage* fs, CvSeqReader* reader, + int count, void* dst, const char* dt ); + +/* combination of two previous functions for easier reading of whole sequences */ +CVAPI(void) cvReadRawData( const CvFileStorage* fs, const CvFileNode* src, + void* dst, const char* dt ); + +/* writes a copy of file node to file storage */ +CVAPI(void) cvWriteFileNode( CvFileStorage* fs, const char* new_node_name, + const CvFileNode* node, int embed ); + +/* returns name of file node */ +CVAPI(const char*) cvGetFileNodeName( const CvFileNode* node ); + +/*********************************** Adding own types ***********************************/ + +CVAPI(void) cvRegisterType( const CvTypeInfo* info ); +CVAPI(void) cvUnregisterType( const char* type_name ); +CVAPI(CvTypeInfo*) cvFirstType(void); +CVAPI(CvTypeInfo*) cvFindType( const char* type_name ); +CVAPI(CvTypeInfo*) cvTypeOf( const void* struct_ptr ); + +/* universal functions */ +CVAPI(void) cvRelease( void** struct_ptr ); +CVAPI(void*) cvClone( const void* struct_ptr ); + +/* simple API for reading/writing data */ +CVAPI(void) cvSave( const char* filename, const void* struct_ptr, + const char* name CV_DEFAULT(NULL), + const char* comment CV_DEFAULT(NULL), + CvAttrList attributes CV_DEFAULT(cvAttrList())); +CVAPI(void*) cvLoad( const char* filename, + CvMemStorage* memstorage CV_DEFAULT(NULL), + const char* name CV_DEFAULT(NULL), + const char** real_name CV_DEFAULT(NULL) ); + +/*********************************** Measuring Execution Time ***************************/ + +/* helper functions for RNG initialization and accurate time measurement: + uses internal clock counter on x86 */ +CVAPI(int64) cvGetTickCount( void ); +CVAPI(double) cvGetTickFrequency( void ); + +/*********************************** CPU capabilities ***********************************/ + +#define CV_CPU_NONE 0 +#define CV_CPU_MMX 1 +#define CV_CPU_SSE 2 +#define CV_CPU_SSE2 3 +#define CV_CPU_SSE3 4 +#define CV_CPU_SSSE3 5 +#define CV_CPU_SSE4_1 6 +#define CV_CPU_SSE4_2 7 +#define CV_CPU_POPCNT 8 +#define CV_CPU_AVX 10 +#define CV_HARDWARE_MAX_FEATURE 255 + +CVAPI(int) cvCheckHardwareSupport(int feature); + +/*********************************** Multi-Threading ************************************/ + +/* retrieve/set the number of threads used in OpenMP implementations */ +CVAPI(int) cvGetNumThreads( void ); +CVAPI(void) cvSetNumThreads( int threads CV_DEFAULT(0) ); +/* get index of the thread being executed */ +CVAPI(int) cvGetThreadNum( void ); + + +/********************************** Error Handling **************************************/ + +/* Get current OpenCV error status */ +CVAPI(int) cvGetErrStatus( void ); + +/* Sets error status silently */ +CVAPI(void) cvSetErrStatus( int status ); + +#define CV_ErrModeLeaf 0 /* Print error and exit program */ +#define CV_ErrModeParent 1 /* Print error and continue */ +#define CV_ErrModeSilent 2 /* Don't print and continue */ + +/* Retrives current error processing mode */ +CVAPI(int) cvGetErrMode( void ); + +/* Sets error processing mode, returns previously used mode */ +CVAPI(int) cvSetErrMode( int mode ); + +/* Sets error status and performs some additonal actions (displaying message box, + writing message to stderr, terminating application etc.) + depending on the current error mode */ +CVAPI(void) cvError( int status, const char* func_name, + const char* err_msg, const char* file_name, int line ); + +/* Retrieves textual description of the error given its code */ +CVAPI(const char*) cvErrorStr( int status ); + +/* Retrieves detailed information about the last error occured */ +CVAPI(int) cvGetErrInfo( const char** errcode_desc, const char** description, + const char** filename, int* line ); + +/* Maps IPP error codes to the counterparts from OpenCV */ +CVAPI(int) cvErrorFromIppStatus( int ipp_status ); + +typedef int (CV_CDECL *CvErrorCallback)( int status, const char* func_name, + const char* err_msg, const char* file_name, int line, void* userdata ); + +/* Assigns a new error-handling function */ +CVAPI(CvErrorCallback) cvRedirectError( CvErrorCallback error_handler, + void* userdata CV_DEFAULT(NULL), + void** prev_userdata CV_DEFAULT(NULL) ); + +/* + Output to: + cvNulDevReport - nothing + cvStdErrReport - console(fprintf(stderr,...)) + cvGuiBoxReport - MessageBox(WIN32) + */ +CVAPI(int) cvNulDevReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +CVAPI(int) cvStdErrReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +CVAPI(int) cvGuiBoxReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +#define OPENCV_ERROR(status,func,context) \ +cvError((status),(func),(context),__FILE__,__LINE__) + +#define OPENCV_ERRCHK(func,context) \ +{if (cvGetErrStatus() >= 0) \ +{OPENCV_ERROR(CV_StsBackTrace,(func),(context));}} + +#define OPENCV_ASSERT(expr,func,context) \ +{if (! (expr)) \ +{OPENCV_ERROR(CV_StsInternal,(func),(context));}} + +#define OPENCV_RSTERR() (cvSetErrStatus(CV_StsOk)) + +#define OPENCV_CALL( Func ) \ +{ \ +Func; \ +} + + +/* CV_FUNCNAME macro defines icvFuncName constant which is used by CV_ERROR macro */ +#ifdef CV_NO_FUNC_NAMES +#define CV_FUNCNAME( Name ) +#define cvFuncName "" +#else +#define CV_FUNCNAME( Name ) \ +static char cvFuncName[] = Name +#endif + + +/* + CV_ERROR macro unconditionally raises error with passed code and message. + After raising error, control will be transferred to the exit label. + */ +#define CV_ERROR( Code, Msg ) \ +{ \ + cvError( (Code), cvFuncName, Msg, __FILE__, __LINE__ ); \ + __CV_EXIT__; \ +} + +/* Simplified form of CV_ERROR */ +#define CV_ERROR_FROM_CODE( code ) \ + CV_ERROR( code, "" ) + +/* + CV_CHECK macro checks error status after CV (or IPL) + function call. If error detected, control will be transferred to the exit + label. + */ +#define CV_CHECK() \ +{ \ + if( cvGetErrStatus() < 0 ) \ + CV_ERROR( CV_StsBackTrace, "Inner function failed." ); \ +} + + +/* + CV_CALL macro calls CV (or IPL) function, checks error status and + signals a error if the function failed. Useful in "parent node" + error procesing mode + */ +#define CV_CALL( Func ) \ +{ \ + Func; \ + CV_CHECK(); \ +} + + +/* Runtime assertion macro */ +#define CV_ASSERT( Condition ) \ +{ \ + if( !(Condition) ) \ + CV_ERROR( CV_StsInternal, "Assertion: " #Condition " failed" ); \ +} + +#define __CV_BEGIN__ { +#define __CV_END__ goto exit; exit: ; } +#define __CV_EXIT__ goto exit + +#ifdef __cplusplus +} + +// classes for automatic module/RTTI data registration/unregistration +struct CV_EXPORTS CvModule +{ + CvModule( CvModuleInfo* _info ); + ~CvModule(); + CvModuleInfo* info; + + static CvModuleInfo* first; + static CvModuleInfo* last; +}; + +struct CV_EXPORTS CvType +{ + CvType( const char* type_name, + CvIsInstanceFunc is_instance, CvReleaseFunc release=0, + CvReadFunc read=0, CvWriteFunc write=0, CvCloneFunc clone=0 ); + ~CvType(); + CvTypeInfo* info; + + static CvTypeInfo* first; + static CvTypeInfo* last; +}; + +#endif + +#endif diff --git a/opencv/core/opencv2/core/eigen.hpp b/opencv/core/opencv2/core/eigen.hpp new file mode 100644 index 0000000..505652f --- /dev/null +++ b/opencv/core/opencv2/core/eigen.hpp @@ -0,0 +1,186 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_EIGEN_HPP__ +#define __OPENCV_CORE_EIGEN_HPP__ + +#ifdef __cplusplus + +#include "cxcore.h" + +namespace cv +{ + +template +void eigen2cv( const Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& src, Mat& dst ) +{ + if( !(src.Flags & Eigen::RowMajorBit) ) + { + Mat _src(src.cols(), src.rows(), DataType<_Tp>::type, + (void*)src.data(), src.stride()*sizeof(_Tp)); + transpose(_src, dst); + } + else + { + Mat _src(src.rows(), src.cols(), DataType<_Tp>::type, + (void*)src.data(), src.stride()*sizeof(_Tp)); + _src.copyTo(dst); + } +} + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst ) +{ + CV_DbgAssert(src.rows == _rows && src.cols == _cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else if( src.cols == src.rows ) + { + src.convertTo(_dst, _dst.type()); + transpose(_dst, _dst); + } + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst ) +{ + dst.resize(src.rows, src.cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else if( src.cols == src.rows ) + { + src.convertTo(_dst, _dst.type()); + transpose(_dst, _dst); + } + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst ) +{ + CV_Assert(src.cols == 1); + dst.resize(src.rows); + + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst ) +{ + CV_Assert(src.rows == 1); + dst.resize(src.cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +} + +#endif + +#endif + diff --git a/opencv/core/opencv2/core/internal.hpp b/opencv/core/opencv2/core/internal.hpp new file mode 100644 index 0000000..437aa0a --- /dev/null +++ b/opencv/core/opencv2/core/internal.hpp @@ -0,0 +1,710 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* The header is for internal use and it is likely to change. + It contains some macro definitions that are used in cxcore, cv, cvaux + and, probably, other libraries. If you need some of this functionality, + the safe way is to copy it into your code and rename the macros. +*/ +#ifndef __OPENCV_CORE_INTERNAL_HPP__ +#define __OPENCV_CORE_INTERNAL_HPP__ + +#include + +#if defined WIN32 || defined _WIN32 +# ifndef WIN32 +# define WIN32 +# endif +# ifndef _WIN32 +# define _WIN32 +# endif +#endif + +#if defined WIN32 || defined WINCE +#ifndef _WIN32_WINNT // This is needed for the declaration of TryEnterCriticalSection in winbase.h with Visual Studio 2005 (and older?) +#define _WIN32_WINNT 0x0400 // http://msdn.microsoft.com/en-us/library/ms686857(VS.85).aspx +#endif +#include +#undef small +#undef min +#undef max +#else +#include +#include +#endif + +#ifdef __BORLANDC__ +#ifndef WIN32 + #define WIN32 +#endif +#ifndef _WIN32 + #define _WIN32 +#endif + #define CV_DLL + #undef _CV_ALWAYS_PROFILE_ + #define _CV_ALWAYS_NO_PROFILE_ +#endif + +#ifndef FALSE +#define FALSE 0 +#endif +#ifndef TRUE +#define TRUE 1 +#endif + +#define __BEGIN__ __CV_BEGIN__ +#define __END__ __CV_END__ +#define EXIT __CV_EXIT__ + +#ifdef HAVE_IPP +#include "ipp.h" + +CV_INLINE IppiSize ippiSize(int width, int height) +{ + IppiSize size = { width, height }; + return size; +} +#endif + +#if defined __SSE2a__ || _MSC_VERa >= 1300 +#include "emmintrin.h" +#define CV_SSE 1 +#define CV_SSE2 1 +#if defined __SSE3__ || _MSC_VER >= 1500 +#include "pmmintrin.h" +#define CV_SSE3 1 +#endif +#else +#define CV_SSE 0 +#define CV_SSE2 0 +#define CV_SSE3 0 +#endif + +#if defined ANDROID && defined __ARM_NEON__ +#include "arm_neon.h" +#define CV_NEON 1 + +#define CPU_HAS_NEON_FEATURE (true) +//TODO: make real check using stuff from "cpu-features.h" +//((bool)android_getCpuFeatures() & ANDROID_CPU_ARM_FEATURE_NEON) +#else +#define CV_NEON 0 +#define CPU_HAS_NEON_FEATURE (false) +#endif + +#ifndef IPPI_CALL +#define IPPI_CALL(func) CV_Assert((func) >= 0) +#endif + +#ifdef HAVE_TBB + #include "tbb/tbb_stddef.h" + #if TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202 + #include "tbb/tbb.h" + #include "tbb/task.h" + #undef min + #undef max + #else + #undef HAVE_TBB + #endif +#endif + +#ifdef HAVE_EIGEN + #include + #include "opencv2/core/eigen.hpp" +#endif + +#ifdef __cplusplus + +#ifdef HAVE_TBB + namespace cv + { + typedef tbb::blocked_range BlockedRange; + + template static inline + void parallel_for( const BlockedRange& range, const Body& body ) + { + tbb::parallel_for(range, body); + } + + template static inline + void parallel_do( Iterator first, Iterator last, const Body& body ) + { + tbb::parallel_do(first, last, body); + } + + typedef tbb::split Split; + + template static inline + void parallel_reduce( const BlockedRange& range, Body& body ) + { + tbb::parallel_reduce(range, body); + } + + typedef tbb::concurrent_vector ConcurrentRectVector; + typedef tbb::concurrent_vector ConcurrentDoubleVector; + } +#else + namespace cv + { + class BlockedRange + { + public: + BlockedRange() : _begin(0), _end(0), _grainsize(0) {} + BlockedRange(int b, int e, int g=1) : _begin(b), _end(e), _grainsize(g) {} + int begin() const { return _begin; } + int end() const { return _end; } + int grainsize() const { return _grainsize; } + + protected: + int _begin, _end, _grainsize; + }; + + +#ifdef HAVE_THREADING_FRAMEWORK +#include "threading_framework.hpp" + + template + static void parallel_for( const BlockedRange& range, const Body& body ) + { + tf::parallel_for(range, body); + } + + typedef tf::ConcurrentVector ConcurrentRectVector; +#else + template static inline + void parallel_for( const BlockedRange& range, const Body& body ) + { + body(range); + } + typedef std::vector ConcurrentRectVector; + typedef std::vector ConcurrentDoubleVector; +#endif + + template static inline + void parallel_do( Iterator first, Iterator last, const Body& body ) + { + for( ; first != last; ++first ) + body(*first); + } + + class Split {}; + + template static inline + void parallel_reduce( const BlockedRange& range, Body& body ) + { + body(range); + } + + } +#endif +#endif + +/* maximal size of vector to run matrix operations on it inline (i.e. w/o ipp calls) */ +#define CV_MAX_INLINE_MAT_OP_SIZE 10 + +/* maximal linear size of matrix to allocate it on stack. */ +#define CV_MAX_LOCAL_MAT_SIZE 32 + +/* maximal size of local memory storage */ +#define CV_MAX_LOCAL_SIZE \ + (CV_MAX_LOCAL_MAT_SIZE*CV_MAX_LOCAL_MAT_SIZE*(int)sizeof(double)) + +/* default image row align (in bytes) */ +#define CV_DEFAULT_IMAGE_ROW_ALIGN 4 + +/* matrices are continuous by default */ +#define CV_DEFAULT_MAT_ROW_ALIGN 1 + +/* maximum size of dynamic memory buffer. + cvAlloc reports an error if a larger block is requested. */ +#define CV_MAX_ALLOC_SIZE (((size_t)1 << (sizeof(size_t)*8-2))) + +/* the alignment of all the allocated buffers */ +#define CV_MALLOC_ALIGN 16 + +/* default alignment for dynamic data strucutures, resided in storages. */ +#define CV_STRUCT_ALIGN ((int)sizeof(double)) + +/* default storage block size */ +#define CV_STORAGE_BLOCK_SIZE ((1<<16) - 128) + +/* default memory block for sparse array elements */ +#define CV_SPARSE_MAT_BLOCK (1<<12) + +/* initial hash table size */ +#define CV_SPARSE_HASH_SIZE0 (1<<10) + +/* maximal average node_count/hash_size ratio beyond which hash table is resized */ +#define CV_SPARSE_HASH_RATIO 3 + +/* max length of strings */ +#define CV_MAX_STRLEN 1024 + +#if 0 /*def CV_CHECK_FOR_NANS*/ + #define CV_CHECK_NANS( arr ) cvCheckArray((arr)) +#else + #define CV_CHECK_NANS( arr ) +#endif + +/****************************************************************************************\ +* Common declarations * +\****************************************************************************************/ + +/* get alloca declaration */ +#ifdef __GNUC__ + #undef alloca + #define alloca __builtin_alloca + #define CV_HAVE_ALLOCA 1 +#elif defined WIN32 || defined _WIN32 || \ + defined WINCE || defined _MSC_VER || defined __BORLANDC__ + #include + #define CV_HAVE_ALLOCA 1 +#elif defined HAVE_ALLOCA_H + #include + #define CV_HAVE_ALLOCA 1 +#elif defined HAVE_ALLOCA + #include + #define CV_HAVE_ALLOCA 1 +#else + #undef CV_HAVE_ALLOCA +#endif + +#ifdef __GNUC__ +#define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x))) +#elif defined _MSC_VER +#define CV_DECL_ALIGNED(x) __declspec(align(x)) +#else +#define CV_DECL_ALIGNED(x) +#endif + +#if CV_HAVE_ALLOCA +/* ! DO NOT make it an inline function */ +#define cvStackAlloc(size) cvAlignPtr( alloca((size) + CV_MALLOC_ALIGN), CV_MALLOC_ALIGN ) +#endif + +#ifndef CV_IMPL +#define CV_IMPL CV_EXTERN_C +#endif + +#define CV_DBG_BREAK() { volatile int* crashMe = 0; *crashMe = 0; } + +/* default step, set in case of continuous data + to work around checks for valid step in some ipp functions */ +#define CV_STUB_STEP (1 << 30) + +#define CV_SIZEOF_FLOAT ((int)sizeof(float)) +#define CV_SIZEOF_SHORT ((int)sizeof(short)) + +#define CV_ORIGIN_TL 0 +#define CV_ORIGIN_BL 1 + +/* IEEE754 constants and macros */ +#define CV_POS_INF 0x7f800000 +#define CV_NEG_INF 0x807fffff /* CV_TOGGLE_FLT(0xff800000) */ +#define CV_1F 0x3f800000 +#define CV_TOGGLE_FLT(x) ((x)^((int)(x) < 0 ? 0x7fffffff : 0)) +#define CV_TOGGLE_DBL(x) \ + ((x)^((int64)(x) < 0 ? CV_BIG_INT(0x7fffffffffffffff) : 0)) + +#define CV_NOP(a) (a) +#define CV_ADD(a, b) ((a) + (b)) +#define CV_SUB(a, b) ((a) - (b)) +#define CV_MUL(a, b) ((a) * (b)) +#define CV_AND(a, b) ((a) & (b)) +#define CV_OR(a, b) ((a) | (b)) +#define CV_XOR(a, b) ((a) ^ (b)) +#define CV_ANDN(a, b) (~(a) & (b)) +#define CV_ORN(a, b) (~(a) | (b)) +#define CV_SQR(a) ((a) * (a)) + +#define CV_LT(a, b) ((a) < (b)) +#define CV_LE(a, b) ((a) <= (b)) +#define CV_EQ(a, b) ((a) == (b)) +#define CV_NE(a, b) ((a) != (b)) +#define CV_GT(a, b) ((a) > (b)) +#define CV_GE(a, b) ((a) >= (b)) + +#define CV_NONZERO(a) ((a) != 0) +#define CV_NONZERO_FLT(a) (((a)+(a)) != 0) + +/* general-purpose saturation macros */ +#define CV_CAST_8U(t) (uchar)(!((t) & ~255) ? (t) : (t) > 0 ? 255 : 0) +#define CV_CAST_8S(t) (schar)(!(((t)+128) & ~255) ? (t) : (t) > 0 ? 127 : -128) +#define CV_CAST_16U(t) (ushort)(!((t) & ~65535) ? (t) : (t) > 0 ? 65535 : 0) +#define CV_CAST_16S(t) (short)(!(((t)+32768) & ~65535) ? (t) : (t) > 0 ? 32767 : -32768) +#define CV_CAST_32S(t) (int)(t) +#define CV_CAST_64S(t) (int64)(t) +#define CV_CAST_32F(t) (float)(t) +#define CV_CAST_64F(t) (double)(t) + +#define CV_PASTE2(a,b) a##b +#define CV_PASTE(a,b) CV_PASTE2(a,b) + +#define CV_EMPTY +#define CV_MAKE_STR(a) #a + +#define CV_ZERO_OBJ(x) memset((x), 0, sizeof(*(x))) + +#define CV_DIM(static_array) ((int)(sizeof(static_array)/sizeof((static_array)[0]))) + +#define cvUnsupportedFormat "Unsupported format" + +CV_INLINE void* cvAlignPtr( const void* ptr, int align CV_DEFAULT(32) ) +{ + assert( (align & (align-1)) == 0 ); + return (void*)( ((size_t)ptr + align - 1) & ~(size_t)(align-1) ); +} + +CV_INLINE int cvAlign( int size, int align ) +{ + assert( (align & (align-1)) == 0 && size < INT_MAX ); + return (size + align - 1) & -align; +} + +CV_INLINE CvSize cvGetMatSize( const CvMat* mat ) +{ + CvSize size; + size.width = mat->cols; + size.height = mat->rows; + return size; +} + +#define CV_DESCALE(x,n) (((x) + (1 << ((n)-1))) >> (n)) +#define CV_FLT_TO_FIX(x,n) cvRound((x)*(1<<(n))) + +/****************************************************************************************\ + + Generic implementation of QuickSort algorithm. + ---------------------------------------------- + Using this macro user can declare customized sort function that can be much faster + than built-in qsort function because of lower overhead on elements + comparison and exchange. The macro takes less_than (or LT) argument - a macro or function + that takes 2 arguments returns non-zero if the first argument should be before the second + one in the sorted sequence and zero otherwise. + + Example: + + Suppose that the task is to sort points by ascending of y coordinates and if + y's are equal x's should ascend. + + The code is: + ------------------------------------------------------------------------------ + #define cmp_pts( pt1, pt2 ) \ + ((pt1).y < (pt2).y || ((pt1).y < (pt2).y && (pt1).x < (pt2).x)) + + [static] CV_IMPLEMENT_QSORT( icvSortPoints, CvPoint, cmp_pts ) + ------------------------------------------------------------------------------ + + After that the function "void icvSortPoints( CvPoint* array, size_t total, int aux );" + is available to user. + + aux is an additional parameter, which can be used when comparing elements. + The current implementation was derived from *BSD system qsort(): + + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + +\****************************************************************************************/ + +#define CV_IMPLEMENT_QSORT_EX( func_name, T, LT, user_data_type ) \ +void func_name( T *array, size_t total, user_data_type aux ) \ +{ \ + int isort_thresh = 7; \ + T t; \ + int sp = 0; \ + \ + struct \ + { \ + T *lb; \ + T *ub; \ + } \ + stack[48]; \ + \ + aux = aux; \ + \ + if( total <= 1 ) \ + return; \ + \ + stack[0].lb = array; \ + stack[0].ub = array + (total - 1); \ + \ + while( sp >= 0 ) \ + { \ + T* left = stack[sp].lb; \ + T* right = stack[sp--].ub; \ + \ + for(;;) \ + { \ + int i, n = (int)(right - left) + 1, m; \ + T* ptr; \ + T* ptr2; \ + \ + if( n <= isort_thresh ) \ + { \ + insert_sort: \ + for( ptr = left + 1; ptr <= right; ptr++ ) \ + { \ + for( ptr2 = ptr; ptr2 > left && LT(ptr2[0],ptr2[-1]); ptr2--) \ + CV_SWAP( ptr2[0], ptr2[-1], t ); \ + } \ + break; \ + } \ + else \ + { \ + T* left0; \ + T* left1; \ + T* right0; \ + T* right1; \ + T* pivot; \ + T* a; \ + T* b; \ + T* c; \ + int swap_cnt = 0; \ + \ + left0 = left; \ + right0 = right; \ + pivot = left + (n/2); \ + \ + if( n > 40 ) \ + { \ + int d = n / 8; \ + a = left, b = left + d, c = left + 2*d; \ + left = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + \ + a = pivot - d, b = pivot, c = pivot + d; \ + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + \ + a = right - 2*d, b = right - d, c = right; \ + right = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + } \ + \ + a = left, b = pivot, c = right; \ + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + if( pivot != left0 ) \ + { \ + CV_SWAP( *pivot, *left0, t ); \ + pivot = left0; \ + } \ + left = left1 = left0 + 1; \ + right = right1 = right0; \ + \ + for(;;) \ + { \ + while( left <= right && !LT(*pivot, *left) ) \ + { \ + if( !LT(*left, *pivot) ) \ + { \ + if( left > left1 ) \ + CV_SWAP( *left1, *left, t ); \ + swap_cnt = 1; \ + left1++; \ + } \ + left++; \ + } \ + \ + while( left <= right && !LT(*right, *pivot) ) \ + { \ + if( !LT(*pivot, *right) ) \ + { \ + if( right < right1 ) \ + CV_SWAP( *right1, *right, t ); \ + swap_cnt = 1; \ + right1--; \ + } \ + right--; \ + } \ + \ + if( left > right ) \ + break; \ + CV_SWAP( *left, *right, t ); \ + swap_cnt = 1; \ + left++; \ + right--; \ + } \ + \ + if( swap_cnt == 0 ) \ + { \ + left = left0, right = right0; \ + goto insert_sort; \ + } \ + \ + n = MIN( (int)(left1 - left0), (int)(left - left1) ); \ + for( i = 0; i < n; i++ ) \ + CV_SWAP( left0[i], left[i-n], t ); \ + \ + n = MIN( (int)(right0 - right1), (int)(right1 - right) ); \ + for( i = 0; i < n; i++ ) \ + CV_SWAP( left[i], right0[i-n+1], t ); \ + n = (int)(left - left1); \ + m = (int)(right1 - right); \ + if( n > 1 ) \ + { \ + if( m > 1 ) \ + { \ + if( n > m ) \ + { \ + stack[++sp].lb = left0; \ + stack[sp].ub = left0 + n - 1; \ + left = right0 - m + 1, right = right0; \ + } \ + else \ + { \ + stack[++sp].lb = right0 - m + 1; \ + stack[sp].ub = right0; \ + left = left0, right = left0 + n - 1; \ + } \ + } \ + else \ + left = left0, right = left0 + n - 1; \ + } \ + else if( m > 1 ) \ + left = right0 - m + 1, right = right0; \ + else \ + break; \ + } \ + } \ + } \ +} + +#define CV_IMPLEMENT_QSORT( func_name, T, cmp ) \ + CV_IMPLEMENT_QSORT_EX( func_name, T, cmp, int ) + +/****************************************************************************************\ +* Structures and macros for integration with IPP * +\****************************************************************************************/ + +/* IPP-compatible return codes */ +typedef enum CvStatus +{ + CV_BADMEMBLOCK_ERR = -113, + CV_INPLACE_NOT_SUPPORTED_ERR= -112, + CV_UNMATCHED_ROI_ERR = -111, + CV_NOTFOUND_ERR = -110, + CV_BADCONVERGENCE_ERR = -109, + + CV_BADDEPTH_ERR = -107, + CV_BADROI_ERR = -106, + CV_BADHEADER_ERR = -105, + CV_UNMATCHED_FORMATS_ERR = -104, + CV_UNSUPPORTED_COI_ERR = -103, + CV_UNSUPPORTED_CHANNELS_ERR = -102, + CV_UNSUPPORTED_DEPTH_ERR = -101, + CV_UNSUPPORTED_FORMAT_ERR = -100, + + CV_BADARG_ERR = -49, //ipp comp + CV_NOTDEFINED_ERR = -48, //ipp comp + + CV_BADCHANNELS_ERR = -47, //ipp comp + CV_BADRANGE_ERR = -44, //ipp comp + CV_BADSTEP_ERR = -29, //ipp comp + + CV_BADFLAG_ERR = -12, + CV_DIV_BY_ZERO_ERR = -11, //ipp comp + CV_BADCOEF_ERR = -10, + + CV_BADFACTOR_ERR = -7, + CV_BADPOINT_ERR = -6, + CV_BADSCALE_ERR = -4, + CV_OUTOFMEM_ERR = -3, + CV_NULLPTR_ERR = -2, + CV_BADSIZE_ERR = -1, + CV_NO_ERR = 0, + CV_OK = CV_NO_ERR +} +CvStatus; + +#define CV_NOTHROW throw() + +typedef struct CvFuncTable +{ + void* fn_2d[CV_DEPTH_MAX]; +} +CvFuncTable; + +typedef struct CvBigFuncTable +{ + void* fn_2d[CV_DEPTH_MAX*4]; +} +CvBigFuncTable; + +#define CV_INIT_FUNC_TAB( tab, FUNCNAME, FLAG ) \ + (tab).fn_2d[CV_8U] = (void*)FUNCNAME##_8u##FLAG; \ + (tab).fn_2d[CV_8S] = 0; \ + (tab).fn_2d[CV_16U] = (void*)FUNCNAME##_16u##FLAG; \ + (tab).fn_2d[CV_16S] = (void*)FUNCNAME##_16s##FLAG; \ + (tab).fn_2d[CV_32S] = (void*)FUNCNAME##_32s##FLAG; \ + (tab).fn_2d[CV_32F] = (void*)FUNCNAME##_32f##FLAG; \ + (tab).fn_2d[CV_64F] = (void*)FUNCNAME##_64f##FLAG + +#endif diff --git a/opencv/core/opencv2/core/mat.hpp b/opencv/core/opencv2/core/mat.hpp new file mode 100644 index 0000000..6f444e4 --- /dev/null +++ b/opencv/core/opencv2/core/mat.hpp @@ -0,0 +1,2557 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_MATRIX_OPERATIONS_HPP__ +#define __OPENCV_CORE_MATRIX_OPERATIONS_HPP__ + +#ifndef SKIP_INCLUDES +#include +#include +#endif // SKIP_INCLUDES + +#ifdef __cplusplus + +namespace cv +{ + +//////////////////////////////// Mat //////////////////////////////// + +inline Mat::Mat() + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ +} + +inline Mat::Mat(int _rows, int _cols, int _type) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create(_rows, _cols, _type); +} + +inline Mat::Mat(int _rows, int _cols, int _type, const Scalar& _s) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create(_rows, _cols, _type); + *this = _s; +} + +inline Mat::Mat(Size _sz, int _type) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create( _sz.height, _sz.width, _type ); +} + +inline Mat::Mat(Size _sz, int _type, const Scalar& _s) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create(_sz.height, _sz.width, _type); + *this = _s; +} + +inline Mat::Mat(int _dims, const int* _sz, int _type) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create(_dims, _sz, _type); +} + +inline Mat::Mat(int _dims, const int* _sz, int _type, const Scalar& _s) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create(_dims, _sz, _type); + *this = _s; +} + +inline Mat::Mat(const Mat& m) + : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data), + refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), + datalimit(m.datalimit), allocator(m.allocator), size(&rows) +{ + if( refcount ) + CV_XADD(refcount, 1); + if( m.dims <= 2 ) + { + step[0] = m.step[0]; step[1] = m.step[1]; + } + else + { + dims = 0; + copySize(m); + } +} + +inline Mat::Mat(int _rows, int _cols, int _type, void* _data, size_t _step) + : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_rows), cols(_cols), + data((uchar*)_data), refcount(0), datastart((uchar*)_data), dataend(0), + datalimit(0), allocator(0), size(&rows) +{ + size_t esz = CV_ELEM_SIZE(_type), minstep = cols*esz; + if( _step == AUTO_STEP ) + { + _step = minstep; + flags |= CONTINUOUS_FLAG; + } + else + { + if( rows == 1 ) _step = minstep; + CV_DbgAssert( _step >= minstep ); + flags |= _step == minstep ? CONTINUOUS_FLAG : 0; + } + step[0] = _step; step[1] = esz; + datalimit = datastart + _step*rows; + dataend = datalimit - _step + minstep; +} + +inline Mat::Mat(Size _sz, int _type, void* _data, size_t _step) + : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_sz.height), cols(_sz.width), + data((uchar*)_data), refcount(0), datastart((uchar*)_data), dataend(0), + datalimit(0), allocator(0), size(&rows) +{ + size_t esz = CV_ELEM_SIZE(_type), minstep = cols*esz; + if( _step == AUTO_STEP ) + { + _step = minstep; + flags |= CONTINUOUS_FLAG; + } + else + { + if( rows == 1 ) _step = minstep; + CV_DbgAssert( _step >= minstep ); + flags |= _step == minstep ? CONTINUOUS_FLAG : 0; + } + step[0] = _step; step[1] = esz; + datalimit = datastart + _step*rows; + dataend = datalimit - _step + minstep; +} + + +inline Mat::Mat(const CvMat* m, bool copyData) + : flags(MAGIC_VAL + (m->type & (CV_MAT_TYPE_MASK|CV_MAT_CONT_FLAG))), + dims(2), rows(m->rows), cols(m->cols), data(m->data.ptr), refcount(0), + datastart(m->data.ptr), allocator(0), size(&rows) +{ + if( !copyData ) + { + size_t esz = CV_ELEM_SIZE(m->type), minstep = cols*esz, _step = m->step; + if( _step == 0 ) + _step = minstep; + datalimit = datastart + _step*rows; + dataend = datalimit - _step + minstep; + step[0] = _step; step[1] = esz; + } + else + { + data = datastart = dataend = 0; + Mat(m->rows, m->cols, m->type, m->data.ptr, m->step).copyTo(*this); + } +} + +template inline Mat::Mat(const vector<_Tp>& vec, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows((int)vec.size()), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if(vec.empty()) + return; + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&vec[0]; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat((int)vec.size(), 1, DataType<_Tp>::type, (uchar*)&vec[0]).copyTo(*this); +} + + +template inline Mat::Mat(const Vec<_Tp, n>& vec, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(n), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)vec.val; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat(n, 1, DataType<_Tp>::type, (void*)vec.val).copyTo(*this); +} + + +template inline Mat::Mat(const Matx<_Tp,m,n>& M, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(m), cols(n), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = cols*sizeof(_Tp); + step[1] = sizeof(_Tp); + data = datastart = (uchar*)M.val; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat(m, n, DataType<_Tp>::type, (uchar*)M.val).copyTo(*this); +} + + +template inline Mat::Mat(const Point_<_Tp>& pt, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(2), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&pt.x; + datalimit = dataend = datastart + rows*step[0]; + } + else + { + create(2, 1, DataType<_Tp>::type); + ((_Tp*)data)[0] = pt.x; + ((_Tp*)data)[1] = pt.y; + } +} + + +template inline Mat::Mat(const Point3_<_Tp>& pt, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(3), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&pt.x; + datalimit = dataend = datastart + rows*step[0]; + } + else + { + create(3, 1, DataType<_Tp>::type); + ((_Tp*)data)[0] = pt.x; + ((_Tp*)data)[1] = pt.y; + ((_Tp*)data)[2] = pt.z; + } +} + + +template inline Mat::Mat(const MatCommaInitializer_<_Tp>& commaInitializer) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + *this = *commaInitializer; +} + +inline Mat::~Mat() +{ + release(); + if( step.p != step.buf ) + fastFree(step.p); +} + +inline Mat& Mat::operator = (const Mat& m) +{ + if( this != &m ) + { + if( m.refcount ) + CV_XADD(m.refcount, 1); + release(); + flags = m.flags; + if( dims <= 2 && m.dims <= 2 ) + { + dims = m.dims; + rows = m.rows; + cols = m.cols; + step[0] = m.step[0]; + step[1] = m.step[1]; + } + else + copySize(m); + data = m.data; + datastart = m.datastart; + dataend = m.dataend; + datalimit = m.datalimit; + refcount = m.refcount; + allocator = m.allocator; + } + return *this; +} + +inline Mat Mat::row(int y) const { return Mat(*this, Range(y, y+1), Range::all()); } +inline Mat Mat::col(int x) const { return Mat(*this, Range::all(), Range(x, x+1)); } +inline Mat Mat::rowRange(int startrow, int endrow) const + { return Mat(*this, Range(startrow, endrow), Range::all()); } +inline Mat Mat::rowRange(const Range& r) const + { return Mat(*this, r, Range::all()); } +inline Mat Mat::colRange(int startcol, int endcol) const + { return Mat(*this, Range::all(), Range(startcol, endcol)); } +inline Mat Mat::colRange(const Range& r) const + { return Mat(*this, Range::all(), r); } + +inline Mat Mat::diag(const Mat& d) +{ + CV_Assert( d.cols == 1 ); + Mat m(d.rows, d.rows, d.type(), Scalar(0)), md = m.diag(); + d.copyTo(md); + return m; +} + +inline Mat Mat::clone() const +{ + Mat m; + copyTo(m); + return m; +} + +inline void Mat::assignTo( Mat& m, int type ) const +{ + if( type < 0 ) + m = *this; + else + convertTo(m, type); +} + +inline void Mat::create(int _rows, int _cols, int _type) +{ + _type &= TYPE_MASK; + if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && data ) + return; + int sz[] = {_rows, _cols}; + create(2, sz, _type); +} + +inline void Mat::create(Size _sz, int _type) +{ + create(_sz.height, _sz.width, _type); +} + +inline void Mat::addref() +{ if( refcount ) CV_XADD(refcount, 1); } + +inline void Mat::release() +{ + if( refcount && CV_XADD(refcount, -1) == 1 ) + deallocate(); + data = datastart = dataend = datalimit = 0; + size.p[0] = 0; + refcount = 0; +} + +inline Mat Mat::operator()( Range rowRange, Range colRange ) const +{ + return Mat(*this, rowRange, colRange); +} + +inline Mat Mat::operator()( const Rect& roi ) const +{ return Mat(*this, roi); } + +inline Mat Mat::operator()(const Range* ranges) const +{ + return Mat(*this, ranges); +} + +inline Mat::operator CvMat() const +{ + CV_DbgAssert(dims <= 2); + CvMat m = cvMat(rows, dims == 1 ? 1 : cols, type(), data); + m.step = (int)step[0]; + m.type = (m.type & ~CONTINUOUS_FLAG) | (flags & CONTINUOUS_FLAG); + return m; +} + +inline bool Mat::isContinuous() const { return (flags & CONTINUOUS_FLAG) != 0; } +inline bool Mat::isSubmatrix() const { return (flags & SUBMATRIX_FLAG) != 0; } +inline size_t Mat::elemSize() const { return dims > 0 ? step.p[dims-1] : 0; } +inline size_t Mat::elemSize1() const { return CV_ELEM_SIZE1(flags); } +inline int Mat::type() const { return CV_MAT_TYPE(flags); } +inline int Mat::depth() const { return CV_MAT_DEPTH(flags); } +inline int Mat::channels() const { return CV_MAT_CN(flags); } +inline size_t Mat::step1(int i) const { return step.p[i]/elemSize1(); } +inline bool Mat::empty() const { return data == 0 || total() == 0; } +inline size_t Mat::total() const +{ + if( dims <= 2 ) + return rows*cols; + size_t p = 1; + for( int i = 0; i < dims; i++ ) + p *= size[i]; + return p; +} + +inline uchar* Mat::ptr(int y) +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return data + step.p[0]*y; +} + +inline const uchar* Mat::ptr(int y) const +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return data + step.p[0]*y; +} + +template inline _Tp* Mat::ptr(int y) +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return (_Tp*)(data + step.p[0]*y); +} + +template inline const _Tp* Mat::ptr(int y) const +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && data && (unsigned)y < (unsigned)size.p[0]) ); + return (const _Tp*)(data + step.p[0]*y); +} + + +inline uchar* Mat::ptr(int i0, int i1) +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return data + i0*step.p[0] + i1*step.p[1]; +} + +inline const uchar* Mat::ptr(int i0, int i1) const +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return data + i0*step.p[0] + i1*step.p[1]; +} + +template inline _Tp* Mat::ptr(int i0, int i1) +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return (_Tp*)(data + i0*step.p[0] + i1*step.p[1]); +} + +template inline const _Tp* Mat::ptr(int i0, int i1) const +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return (const _Tp*)(data + i0*step.p[0] + i1*step.p[1]); +} + +inline uchar* Mat::ptr(int i0, int i1, int i2) +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]; +} + +inline const uchar* Mat::ptr(int i0, int i1, int i2) const +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]; +} + +template inline _Tp* Mat::ptr(int i0, int i1, int i2) +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return (_Tp*)(data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]); +} + +template inline const _Tp* Mat::ptr(int i0, int i1, int i2) const +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return (const _Tp*)(data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]); +} + +inline uchar* Mat::ptr(const int* idx) +{ + int i, d = dims; + uchar* p = data; + CV_DbgAssert( d >= 1 && p ); + for( i = 0; i < d; i++ ) + { + CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] ); + p += idx[i]*step.p[i]; + } + return p; +} + +inline const uchar* Mat::ptr(const int* idx) const +{ + int i, d = dims; + uchar* p = data; + CV_DbgAssert( d >= 1 && p ); + for( i = 0; i < d; i++ ) + { + CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] ); + p += idx[i]*step.p[i]; + } + return p; +} + +template inline _Tp& Mat::at(int i0, int i1) +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((_Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline const _Tp& Mat::at(int i0, int i1) const +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((const _Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline _Tp& Mat::at(Point pt) +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)(pt.x*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((_Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline const _Tp& Mat::at(Point pt) const +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)(pt.x*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((const _Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline _Tp& Mat::at(int i0) +{ + CV_DbgAssert( dims <= 2 && data && (size.p[0] == 1 || size.p[1] == 1) && + (unsigned)i0 < (unsigned)(size.p[0] + size.p[1] - 1) && + elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)(data + step.p[size.p[0]==1]*i0); +} + +template inline const _Tp& Mat::at(int i0) const +{ + CV_DbgAssert( dims <= 2 && data && (size.p[0] == 1 || size.p[1] == 1) && + (unsigned)i0 < (unsigned)(size.p[0] + size.p[1] - 1) && + elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)(data + step.p[size.p[0]==1]*i0); +} + +template inline _Tp& Mat::at(int i0, int i1, int i2) +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)ptr(i0, i1, i2); +} +template inline const _Tp& Mat::at(int i0, int i1, int i2) const +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(const _Tp*)ptr(i0, i1, i2); +} +template inline _Tp& Mat::at(const int* idx) +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)ptr(idx); +} +template inline const _Tp& Mat::at(const int* idx) const +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(const _Tp*)ptr(idx); +} + + +template inline MatConstIterator_<_Tp> Mat::begin() const +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return MatConstIterator_<_Tp>((const Mat_<_Tp>*)this); +} + +template inline MatConstIterator_<_Tp> Mat::end() const +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + MatConstIterator_<_Tp> it((const Mat_<_Tp>*)this); + it += total(); + return it; +} + +template inline MatIterator_<_Tp> Mat::begin() +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return MatIterator_<_Tp>((Mat_<_Tp>*)this); +} + +template inline MatIterator_<_Tp> Mat::end() +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + MatIterator_<_Tp> it((Mat_<_Tp>*)this); + it += total(); + return it; +} + +template inline Mat::operator vector<_Tp>() const +{ + vector<_Tp> v; + copyTo(v); + return v; +} + +template inline Mat::operator Vec<_Tp, n>() const +{ + CV_Assert( data && dims <= 2 && (rows == 1 || cols == 1) && + rows + cols - 1 == n && channels() == 1 ); + + if( isContinuous() && type() == DataType<_Tp>::type ) + return Vec<_Tp, n>((_Tp*)data); + Vec<_Tp, n> v; Mat tmp(rows, cols, DataType<_Tp>::type, v.val); + convertTo(tmp, tmp.type()); + return v; +} + +template inline Mat::operator Matx<_Tp, m, n>() const +{ + CV_Assert( data && dims <= 2 && rows == m && cols == n && channels() == 1 ); + + if( isContinuous() && type() == DataType<_Tp>::type ) + return Matx<_Tp, m, n>((_Tp*)data); + Matx<_Tp, m, n> mtx; Mat tmp(rows, cols, DataType<_Tp>::type, mtx.val); + convertTo(tmp, tmp.type()); + return mtx; +} + + +template inline void Mat::push_back(const _Tp& elem) +{ + if( !data ) + { + *this = Mat(1, 1, DataType<_Tp>::type, (void*)&elem).clone(); + return; + } + CV_Assert(DataType<_Tp>::type == type() && cols == 1 + /* && dims == 2 (cols == 1 implies dims == 2) */); + uchar* tmp = dataend + step[0]; + if( !isSubmatrix() && isContinuous() && tmp <= datalimit ) + { + *(_Tp*)(data + (size.p[0]++)*step.p[0]) = elem; + dataend = tmp; + } + else + push_back_(&elem); +} + +template inline void Mat::push_back(const Mat_<_Tp>& m) +{ + push_back((const Mat&)m); +} + +inline Mat::MSize::MSize(int* _p) : p(_p) {} +inline Size Mat::MSize::operator()() const +{ + CV_DbgAssert(p[-1] <= 2); + return Size(p[1], p[0]); +} +inline const int& Mat::MSize::operator[](int i) const { return p[i]; } +inline int& Mat::MSize::operator[](int i) { return p[i]; } +inline Mat::MSize::operator const int*() const { return p; } + +inline bool Mat::MSize::operator == (const MSize& sz) const +{ + int d = p[-1], dsz = sz.p[-1]; + if( d != dsz ) + return false; + if( d == 2 ) + return p[0] == sz.p[0] && p[1] == sz.p[1]; + + for( int i = 0; i < d; i++ ) + if( p[i] != sz.p[i] ) + return false; + return true; +} + +inline bool Mat::MSize::operator != (const MSize& sz) const +{ + return !(*this == sz); +} + +inline Mat::MStep::MStep() { p = buf; p[0] = p[1] = 0; } +inline Mat::MStep::MStep(size_t s) { p = buf; p[0] = s; p[1] = 0; } +inline const size_t& Mat::MStep::operator[](int i) const { return p[i]; } +inline size_t& Mat::MStep::operator[](int i) { return p[i]; } +inline Mat::MStep::operator size_t() const +{ + CV_DbgAssert( p == buf ); + return buf[0]; +} +inline Mat::MStep& Mat::MStep::operator = (size_t s) +{ + CV_DbgAssert( p == buf ); + buf[0] = s; + return *this; +} + +static inline Mat cvarrToMatND(const CvArr* arr, bool copyData=false, int coiMode=0) +{ + return cvarrToMat(arr, copyData, true, coiMode); +} + +///////////////////////////////////////////// SVD ////////////////////////////////////////////////////// + +inline SVD::SVD() {} +inline SVD::SVD( InputArray m, int flags ) { operator ()(m, flags); } +inline void SVD::solveZ( InputArray m, OutputArray _dst ) +{ + SVD svd(m); + _dst.create(svd.vt.cols, 1, svd.vt.type()); + Mat dst = _dst.getMat(); + svd.vt.row(svd.vt.rows-1).reshape(1,svd.vt.cols).copyTo(dst); +} + +template inline void + SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt ) +{ + assert( nm == MIN(m, n)); + Mat _a(a, false), _u(u, false), _w(w, false), _vt(vt, false); + SVD::compute(_a, _w, _u, _vt); + CV_Assert(_w.data == (uchar*)&w.val[0] && _u.data == (uchar*)&u.val[0] && _vt.data == (uchar*)&vt.val[0]); +} + +template inline void +SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w ) +{ + assert( nm == MIN(m, n)); + Mat _a(a, false), _w(w, false); + SVD::compute(_a, _w); + CV_Assert(_w.data == (uchar*)&w.val[0]); +} + +template inline void +SVD::backSubst( const Matx<_Tp, nm, 1>& w, const Matx<_Tp, m, nm>& u, + const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, + Matx<_Tp, n, nb>& dst ) +{ + assert( nm == MIN(m, n)); + Mat _u(u, false), _w(w, false), _vt(vt, false), _rhs(rhs, false), _dst(dst, false); + SVD::backSubst(_w, _u, _vt, _rhs, _dst); + CV_Assert(_dst.data == (uchar*)&dst.val[0]); +} + +///////////////////////////////// Mat_<_Tp> //////////////////////////////////// + +template inline Mat_<_Tp>::Mat_() + : Mat() { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type; } + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols) + : Mat(_rows, _cols, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols, const _Tp& value) + : Mat(_rows, _cols, DataType<_Tp>::type) { *this = value; } + +template inline Mat_<_Tp>::Mat_(Size _sz) + : Mat(_sz.height, _sz.width, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(Size _sz, const _Tp& value) + : Mat(_sz.height, _sz.width, DataType<_Tp>::type) { *this = value; } + +template inline Mat_<_Tp>::Mat_(int _dims, const int* _sz) + : Mat(_dims, _sz, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(int _dims, const int* _sz, const _Tp& _s) + : Mat(_dims, _sz, DataType<_Tp>::type, Scalar(_s)) {} + +template inline Mat_<_Tp>::Mat_(const Mat_<_Tp>& m, const Range* ranges) + : Mat(m, ranges) {} + +template inline Mat_<_Tp>::Mat_(const Mat& m) + : Mat() { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type; *this = m; } + +template inline Mat_<_Tp>::Mat_(const Mat_& m) + : Mat(m) {} + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols, _Tp* _data, size_t steps) + : Mat(_rows, _cols, DataType<_Tp>::type, _data, steps) {} + +template inline Mat_<_Tp>::Mat_(const Mat_& m, const Range& rowRange, const Range& colRange) + : Mat(m, rowRange, colRange) {} + +template inline Mat_<_Tp>::Mat_(const Mat_& m, const Rect& roi) + : Mat(m, roi) {} + +template template inline + Mat_<_Tp>::Mat_(const Vec::channel_type, n>& vec, bool copyData) + : Mat(n/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&vec) +{ + CV_Assert(n%DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template template inline + Mat_<_Tp>::Mat_(const Matx::channel_type,m,n>& M, bool copyData) + : Mat(m, n/DataType<_Tp>::channels, DataType<_Tp>::type, (void*)&M) +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const Point_::channel_type>& pt, bool copyData) + : Mat(2/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&pt) +{ + CV_Assert(2 % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const Point3_::channel_type>& pt, bool copyData) + : Mat(3/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&pt) +{ + CV_Assert(3 % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const MatCommaInitializer_<_Tp>& commaInitializer) + : Mat(commaInitializer) {} + +template inline Mat_<_Tp>::Mat_(const vector<_Tp>& vec, bool copyData) + : Mat(vec, copyData) {} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const Mat& m) +{ + if( DataType<_Tp>::type == m.type() ) + { + Mat::operator = (m); + return *this; + } + if( DataType<_Tp>::depth == m.depth() ) + { + return (*this = m.reshape(DataType<_Tp>::channels, m.dims, 0)); + } + CV_DbgAssert(DataType<_Tp>::channels == m.channels()); + m.convertTo(*this, type()); + return *this; +} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const Mat_& m) +{ + Mat::operator=(m); + return *this; +} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const _Tp& s) +{ + typedef typename DataType<_Tp>::vec_type VT; + Mat::operator=(Scalar((const VT&)s)); + return *this; +} + +template inline void Mat_<_Tp>::create(int _rows, int _cols) +{ + Mat::create(_rows, _cols, DataType<_Tp>::type); +} + +template inline void Mat_<_Tp>::create(Size _sz) +{ + Mat::create(_sz, DataType<_Tp>::type); +} + +template inline void Mat_<_Tp>::create(int _dims, const int* _sz) +{ + Mat::create(_dims, _sz, DataType<_Tp>::type); +} + + +template inline Mat_<_Tp> Mat_<_Tp>::cross(const Mat_& m) const +{ return Mat_<_Tp>(Mat::cross(m)); } + +template template inline Mat_<_Tp>::operator Mat_() const +{ return Mat_(*this); } + +template inline Mat_<_Tp> Mat_<_Tp>::row(int y) const +{ return Mat_(*this, Range(y, y+1), Range::all()); } +template inline Mat_<_Tp> Mat_<_Tp>::col(int x) const +{ return Mat_(*this, Range::all(), Range(x, x+1)); } +template inline Mat_<_Tp> Mat_<_Tp>::diag(int d) const +{ return Mat_(Mat::diag(d)); } +template inline Mat_<_Tp> Mat_<_Tp>::clone() const +{ return Mat_(Mat::clone()); } + +template inline size_t Mat_<_Tp>::elemSize() const +{ + CV_DbgAssert( Mat::elemSize() == sizeof(_Tp) ); + return sizeof(_Tp); +} + +template inline size_t Mat_<_Tp>::elemSize1() const +{ + CV_DbgAssert( Mat::elemSize1() == sizeof(_Tp)/DataType<_Tp>::channels ); + return sizeof(_Tp)/DataType<_Tp>::channels; +} +template inline int Mat_<_Tp>::type() const +{ + CV_DbgAssert( Mat::type() == DataType<_Tp>::type ); + return DataType<_Tp>::type; +} +template inline int Mat_<_Tp>::depth() const +{ + CV_DbgAssert( Mat::depth() == DataType<_Tp>::depth ); + return DataType<_Tp>::depth; +} +template inline int Mat_<_Tp>::channels() const +{ + CV_DbgAssert( Mat::channels() == DataType<_Tp>::channels ); + return DataType<_Tp>::channels; +} +template inline size_t Mat_<_Tp>::stepT(int i) const { return step.p[i]/elemSize(); } +template inline size_t Mat_<_Tp>::step1(int i) const { return step.p[i]/elemSize1(); } + +template inline Mat_<_Tp> Mat_<_Tp>::reshape(int _rows) const +{ return Mat_<_Tp>(Mat::reshape(0,_rows)); } + +template inline Mat_<_Tp>& Mat_<_Tp>::adjustROI( int dtop, int dbottom, int dleft, int dright ) +{ return (Mat_<_Tp>&)(Mat::adjustROI(dtop, dbottom, dleft, dright)); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Range& rowRange, const Range& colRange ) const +{ return Mat_<_Tp>(*this, rowRange, colRange); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Rect& roi ) const +{ return Mat_<_Tp>(*this, roi); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Range* ranges ) const +{ return Mat_<_Tp>(*this, ranges); } + +template inline _Tp* Mat_<_Tp>::operator [](int y) +{ return (_Tp*)ptr(y); } +template inline const _Tp* Mat_<_Tp>::operator [](int y) const +{ return (const _Tp*)ptr(y); } + +template inline _Tp& Mat_<_Tp>::operator ()(int i0, int i1) +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((_Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0, int i1) const +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((const _Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline _Tp& Mat_<_Tp>::operator ()(Point pt) +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)pt.x < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((_Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline const _Tp& Mat_<_Tp>::operator ()(Point pt) const +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)pt.x < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((const _Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline _Tp& Mat_<_Tp>::operator ()(const int* idx) +{ + return Mat::at<_Tp>(idx); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(const int* idx) const +{ + return Mat::at<_Tp>(idx); +} + +template inline _Tp& Mat_<_Tp>::operator ()(int i0) +{ + return this->at<_Tp>(i0); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0) const +{ + return this->at<_Tp>(i0); +} + +template inline _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2) +{ + return this->at<_Tp>(i0, i1, i2); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2) const +{ + return this->at<_Tp>(i0, i1, i2); +} + + +template inline Mat_<_Tp>::operator vector<_Tp>() const +{ + vector<_Tp> v; + copyTo(v); + return v; +} + +template template inline Mat_<_Tp>::operator Vec::channel_type, n>() const +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + return this->Mat::operator Vec::channel_type, n>(); +} + +template template inline Mat_<_Tp>::operator Matx::channel_type, m, n>() const +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + return this->Mat::operator Matx::channel_type, m, n>(); +} + +template inline void +process( const Mat_& m1, Mat_& m2, Op op ) +{ + int y, x, rows = m1.rows, cols = m1.cols; + int c1 = m1.channels(), c2 = m2.channels(); + + CV_DbgAssert( m1.size() == m2.size() ); + + for( y = 0; y < rows; y++ ) + { + const T1* src = m1[y]; + T2* dst = m2[y]; + + for( x = 0; x < cols; x++ ) + dst[x] = op(src[x]); + } +} + +template inline void +process( const Mat_& m1, const Mat_& m2, Mat_& m3, Op op ) +{ + int y, x, rows = m1.rows, cols = m1.cols; + + CV_DbgAssert( m1.size() == m2.size() ); + + for( y = 0; y < rows; y++ ) + { + const T1* src1 = m1[y]; + const T2* src2 = m2[y]; + T3* dst = m3[y]; + + for( x = 0; x < cols; x++ ) + dst[x] = op( src1[x], src2[x] ); + } +} + + +/////////////////////////////// Input/Output Arrays ///////////////////////////////// + +template inline _InputArray::_InputArray(const vector<_Tp>& vec) + : flags(STD_VECTOR + DataType<_Tp>::type), obj((void*)&vec) {} + +template inline _InputArray::_InputArray(const vector >& vec) + : flags(STD_VECTOR_VECTOR + DataType<_Tp>::type), obj((void*)&vec) {} + +template inline _InputArray::_InputArray(const Matx<_Tp, m, n>& mtx) + : flags(MATX + DataType<_Tp>::type), obj((void*)&mtx), sz(n, m) {} + +inline _InputArray::_InputArray(const Scalar& s) + : flags(MATX + CV_64F), obj((void*)&s), sz(1, 4) {} + +template inline _OutputArray::_OutputArray(vector<_Tp>& vec) : _InputArray(vec) {} +template inline _OutputArray::_OutputArray(vector >& vec) : _InputArray(vec) {} +template inline _OutputArray::_OutputArray(Matx<_Tp, m, n>& mtx) : _InputArray(mtx) {} + +//////////////////////////////////// Matrix Expressions ///////////////////////////////////////// + +class CV_EXPORTS MatOp +{ +public: + MatOp() {}; + virtual ~MatOp() {}; + + virtual bool elementWise(const MatExpr& expr) const; + virtual void assign(const MatExpr& expr, Mat& m, int type=-1) const = 0; + virtual void roi(const MatExpr& expr, const Range& rowRange, + const Range& colRange, MatExpr& res) const; + virtual void diag(const MatExpr& expr, int d, MatExpr& res) const; + virtual void augAssignAdd(const MatExpr& expr, Mat& m) const; + virtual void augAssignSubtract(const MatExpr& expr, Mat& m) const; + virtual void augAssignMultiply(const MatExpr& expr, Mat& m) const; + virtual void augAssignDivide(const MatExpr& expr, Mat& m) const; + virtual void augAssignAnd(const MatExpr& expr, Mat& m) const; + virtual void augAssignOr(const MatExpr& expr, Mat& m) const; + virtual void augAssignXor(const MatExpr& expr, Mat& m) const; + + virtual void add(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void add(const MatExpr& expr1, const Scalar& s, MatExpr& res) const; + + virtual void subtract(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void subtract(const Scalar& s, const MatExpr& expr, MatExpr& res) const; + + virtual void multiply(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const; + virtual void multiply(const MatExpr& expr1, double s, MatExpr& res) const; + + virtual void divide(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const; + virtual void divide(double s, const MatExpr& expr, MatExpr& res) const; + + virtual void abs(const MatExpr& expr, MatExpr& res) const; + + virtual void transpose(const MatExpr& expr, MatExpr& res) const; + virtual void matmul(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void invert(const MatExpr& expr, int method, MatExpr& res) const; + + virtual Size size(const MatExpr& expr) const; + virtual int type(const MatExpr& expr) const; +}; + + +class CV_EXPORTS MatExpr +{ +public: + MatExpr() : op(0), flags(0), a(Mat()), b(Mat()), c(Mat()), alpha(0), beta(0), s(Scalar()) {} + MatExpr(const MatOp* _op, int _flags, const Mat& _a=Mat(), const Mat& _b=Mat(), + const Mat& _c=Mat(), double _alpha=1, double _beta=1, const Scalar& _s=Scalar()) + : op(_op), flags(_flags), a(_a), b(_b), c(_c), alpha(_alpha), beta(_beta), s(_s) {} + explicit MatExpr(const Mat& m); + operator Mat() const + { + Mat m; + op->assign(*this, m); + return m; + } + + template operator Mat_<_Tp>() const + { + Mat_<_Tp> m; + op->assign(*this, m, DataType<_Tp>::type); + return m; + } + + MatExpr row(int y) const; + MatExpr col(int x) const; + MatExpr diag(int d=0) const; + MatExpr operator()( const Range& rowRange, const Range& colRange ) const; + MatExpr operator()( const Rect& roi ) const; + + Mat cross(const Mat& m) const; + double dot(const Mat& m) const; + + MatExpr t() const; + MatExpr inv(int method = DECOMP_LU) const; + MatExpr mul(const MatExpr& e, double scale=1) const; + MatExpr mul(const Mat& m, double scale=1) const; + + Size size() const; + int type() const; + + const MatOp* op; + int flags; + + Mat a, b, c; + double alpha, beta; + Scalar s; +}; + + +CV_EXPORTS MatExpr operator + (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator + (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator + (const Scalar& s, const Mat& a); +CV_EXPORTS MatExpr operator + (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator + (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator + (const MatExpr& e, const Scalar& s); +CV_EXPORTS MatExpr operator + (const Scalar& s, const MatExpr& e); +CV_EXPORTS MatExpr operator + (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator - (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator - (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator - (const Scalar& s, const Mat& a); +CV_EXPORTS MatExpr operator - (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator - (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator - (const MatExpr& e, const Scalar& s); +CV_EXPORTS MatExpr operator - (const Scalar& s, const MatExpr& e); +CV_EXPORTS MatExpr operator - (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator - (const Mat& m); +CV_EXPORTS MatExpr operator - (const MatExpr& e); + +CV_EXPORTS MatExpr operator * (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator * (const Mat& a, double s); +CV_EXPORTS MatExpr operator * (double s, const Mat& a); +CV_EXPORTS MatExpr operator * (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator * (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator * (const MatExpr& e, double s); +CV_EXPORTS MatExpr operator * (double s, const MatExpr& e); +CV_EXPORTS MatExpr operator * (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator / (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator / (const Mat& a, double s); +CV_EXPORTS MatExpr operator / (double s, const Mat& a); +CV_EXPORTS MatExpr operator / (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator / (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator / (const MatExpr& e, double s); +CV_EXPORTS MatExpr operator / (double s, const MatExpr& e); +CV_EXPORTS MatExpr operator / (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator < (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator < (const Mat& a, double s); +CV_EXPORTS MatExpr operator < (double s, const Mat& a); + +CV_EXPORTS MatExpr operator <= (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator <= (const Mat& a, double s); +CV_EXPORTS MatExpr operator <= (double s, const Mat& a); + +CV_EXPORTS MatExpr operator == (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator == (const Mat& a, double s); +CV_EXPORTS MatExpr operator == (double s, const Mat& a); + +CV_EXPORTS MatExpr operator != (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator != (const Mat& a, double s); +CV_EXPORTS MatExpr operator != (double s, const Mat& a); + +CV_EXPORTS MatExpr operator >= (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator >= (const Mat& a, double s); +CV_EXPORTS MatExpr operator >= (double s, const Mat& a); + +CV_EXPORTS MatExpr operator > (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator > (const Mat& a, double s); +CV_EXPORTS MatExpr operator > (double s, const Mat& a); + +CV_EXPORTS MatExpr min(const Mat& a, const Mat& b); +CV_EXPORTS MatExpr min(const Mat& a, double s); +CV_EXPORTS MatExpr min(double s, const Mat& a); + +CV_EXPORTS MatExpr max(const Mat& a, const Mat& b); +CV_EXPORTS MatExpr max(const Mat& a, double s); +CV_EXPORTS MatExpr max(double s, const Mat& a); + +template static inline MatExpr min(const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + return cv::min((const Mat&)a, (const Mat&)b); +} + +template static inline MatExpr min(const Mat_<_Tp>& a, double s) +{ + return cv::min((const Mat&)a, s); +} + +template static inline MatExpr min(double s, const Mat_<_Tp>& a) +{ + return cv::min((const Mat&)a, s); +} + +template static inline MatExpr max(const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + return cv::max((const Mat&)a, (const Mat&)b); +} + +template static inline MatExpr max(const Mat_<_Tp>& a, double s) +{ + return cv::max((const Mat&)a, s); +} + +template static inline MatExpr max(double s, const Mat_<_Tp>& a) +{ + return cv::max((const Mat&)a, s); +} + +template static inline void min(const Mat_<_Tp>& a, const Mat_<_Tp>& b, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, (const Mat&)b, (Mat&)c); +} + +template static inline void min(const Mat_<_Tp>& a, double s, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + +template static inline void min(double s, const Mat_<_Tp>& a, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + +template static inline void max(const Mat_<_Tp>& a, const Mat_<_Tp>& b, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, (const Mat&)b, (Mat&)c); +} + +template static inline void max(const Mat_<_Tp>& a, double s, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + +template static inline void max(double s, const Mat_<_Tp>& a, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + + +CV_EXPORTS MatExpr operator & (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator & (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator & (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator | (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator | (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator | (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator ^ (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator ^ (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator ^ (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator ~(const Mat& m); + +CV_EXPORTS MatExpr abs(const Mat& m); +CV_EXPORTS MatExpr abs(const MatExpr& e); + +template static inline MatExpr abs(const Mat_<_Tp>& m) +{ + return cv::abs((const Mat&)m); +} + +////////////////////////////// Augmenting algebraic operations ////////////////////////////////// + +inline Mat& Mat::operator = (const MatExpr& e) +{ + e.op->assign(e, *this); + return *this; +} + +template inline Mat_<_Tp>::Mat_(const MatExpr& e) +{ + e.op->assign(e, *this, DataType<_Tp>::type); +} + +template Mat_<_Tp>& Mat_<_Tp>::operator = (const MatExpr& e) +{ + e.op->assign(e, *this, DataType<_Tp>::type); + return *this; +} + +static inline Mat& operator += (const Mat& a, const Mat& b) +{ + add(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator += (const Mat& a, const Scalar& s) +{ + add(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + add(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const Scalar& s) +{ + add(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator += (const Mat& a, const MatExpr& b) +{ + b.op->augAssignAdd(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignAdd(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator -= (const Mat& a, const Mat& b) +{ + subtract(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator -= (const Mat& a, const Scalar& s) +{ + subtract(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + subtract(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const Scalar& s) +{ + subtract(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator -= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignSubtract(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignSubtract(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator *= (const Mat& a, const Mat& b) +{ + gemm(a, b, 1, Mat(), 0, (Mat&)a, 0); + return (Mat&)a; +} + +static inline Mat& operator *= (const Mat& a, double s) +{ + a.convertTo((Mat&)a, -1, s); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + gemm(a, b, 1, Mat(), 0, (Mat&)a, 0); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, double s) +{ + a.convertTo((Mat&)a, -1, s); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator *= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignMultiply(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignMultiply(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator /= (const Mat& a, const Mat& b) +{ + divide(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator /= (const Mat& a, double s) +{ + a.convertTo((Mat&)a, -1, 1./s); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + divide(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, double s) +{ + a.convertTo((Mat&)a, -1, 1./s); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator /= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignDivide(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignDivide(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +////////////////////////////// Logical operations /////////////////////////////// + +static inline Mat& operator &= (const Mat& a, const Mat& b) +{ + bitwise_and(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator &= (const Mat& a, const Scalar& s) +{ + bitwise_and(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator &= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_and(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator &= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_and(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator |= (const Mat& a, const Mat& b) +{ + bitwise_or(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator |= (const Mat& a, const Scalar& s) +{ + bitwise_or(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator |= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_or(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator |= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_or(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator ^= (const Mat& a, const Mat& b) +{ + bitwise_xor(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator ^= (const Mat& a, const Scalar& s) +{ + bitwise_xor(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator ^= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_xor(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator ^= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_xor(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +/////////////////////////////// Miscellaneous operations ////////////////////////////// + +template void split(const Mat& src, vector >& mv) +{ split(src, (vector&)mv ); } + +////////////////////////////////////////////////////////////// + +template inline MatExpr Mat_<_Tp>::zeros(int rows, int cols) +{ + return Mat::zeros(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::zeros(Size sz) +{ + return Mat::zeros(sz, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::ones(int rows, int cols) +{ + return Mat::ones(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::ones(Size sz) +{ + return Mat::ones(sz, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::eye(int rows, int cols) +{ + return Mat::eye(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::eye(Size sz) +{ + return Mat::eye(sz, DataType<_Tp>::type); +} + +//////////////////////////////// Iterators & Comma initializers ////////////////////////////////// + +inline MatConstIterator::MatConstIterator() + : m(0), elemSize(0), ptr(0), sliceStart(0), sliceEnd(0) {} + +inline MatConstIterator::MatConstIterator(const Mat* _m) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + if( m && m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + seek((const int*)0); +} + +inline MatConstIterator::MatConstIterator(const Mat* _m, int _row, int _col) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + CV_Assert(m && m->dims <= 2); + if( m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + int idx[]={_row, _col}; + seek(idx); +} + +inline MatConstIterator::MatConstIterator(const Mat* _m, Point _pt) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + CV_Assert(m && m->dims <= 2); + if( m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + int idx[]={_pt.y, _pt.x}; + seek(idx); +} + +inline MatConstIterator::MatConstIterator(const MatConstIterator& it) + : m(it.m), elemSize(it.elemSize), ptr(it.ptr), sliceStart(it.sliceStart), sliceEnd(it.sliceEnd) +{} + +inline MatConstIterator& MatConstIterator::operator = (const MatConstIterator& it ) +{ + m = it.m; elemSize = it.elemSize; ptr = it.ptr; + sliceStart = it.sliceStart; sliceEnd = it.sliceEnd; + return *this; +} + +inline uchar* MatConstIterator::operator *() const { return ptr; } + +inline MatConstIterator& MatConstIterator::operator += (ptrdiff_t ofs) +{ + if( !m || ofs == 0 ) + return *this; + ptrdiff_t ofsb = ofs*elemSize; + ptr += ofsb; + if( ptr < sliceStart || sliceEnd <= ptr ) + { + ptr -= ofsb; + seek(ofs, true); + } + return *this; +} + +inline MatConstIterator& MatConstIterator::operator -= (ptrdiff_t ofs) +{ return (*this += -ofs); } + +inline MatConstIterator& MatConstIterator::operator --() +{ + if( m && (ptr -= elemSize) < sliceStart ) + { + ptr += elemSize; + seek(-1, true); + } + return *this; +} + +inline MatConstIterator MatConstIterator::operator --(int) +{ + MatConstIterator b = *this; + *this += -1; + return b; +} + +inline MatConstIterator& MatConstIterator::operator ++() +{ + if( m && (ptr += elemSize) >= sliceEnd ) + { + ptr -= elemSize; + seek(1, true); + } + return *this; +} + +inline MatConstIterator MatConstIterator::operator ++(int) +{ + MatConstIterator b = *this; + *this += 1; + return b; +} + +template inline MatConstIterator_<_Tp>::MatConstIterator_() {} + +template inline MatConstIterator_<_Tp>::MatConstIterator_(const Mat_<_Tp>* _m) + : MatConstIterator(_m) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col) + : MatConstIterator(_m, _row, _col) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const Mat_<_Tp>* _m, Point _pt) + : MatConstIterator(_m, _pt) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const MatConstIterator_& it) + : MatConstIterator(it) {} + +template inline MatConstIterator_<_Tp>& + MatConstIterator_<_Tp>::operator = (const MatConstIterator_& it ) +{ + MatConstIterator::operator = (it); + return *this; +} + +template inline _Tp MatConstIterator_<_Tp>::operator *() const { return *(_Tp*)(this->ptr); } + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator += (ptrdiff_t ofs) +{ + MatConstIterator::operator += (ofs); + return *this; +} + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator -= (ptrdiff_t ofs) +{ return (*this += -ofs); } + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator --() +{ + MatConstIterator::operator --(); + return *this; +} + +template inline MatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator --(int) +{ + MatConstIterator_ b = *this; + MatConstIterator::operator --(); + return b; +} + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator ++() +{ + MatConstIterator::operator ++(); + return *this; +} + +template inline MatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator ++(int) +{ + MatConstIterator_ b = *this; + MatConstIterator::operator ++(); + return b; +} + +template inline MatIterator_<_Tp>::MatIterator_() : MatConstIterator_<_Tp>() {} + +template inline MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m) + : MatConstIterator_<_Tp>(_m) {} + +template inline MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m, int _row, int _col) + : MatConstIterator_<_Tp>(_m, _row, _col) {} + +template inline MatIterator_<_Tp>::MatIterator_(const Mat_<_Tp>* _m, Point _pt) + : MatConstIterator_<_Tp>(_m, _pt) {} + +template inline MatIterator_<_Tp>::MatIterator_(const Mat_<_Tp>* _m, const int* _idx) + : MatConstIterator_<_Tp>(_m, _idx) {} + +template inline MatIterator_<_Tp>::MatIterator_(const MatIterator_& it) + : MatConstIterator_<_Tp>(it) {} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator = (const MatIterator_<_Tp>& it ) +{ + MatConstIterator::operator = (it); + return *this; +} + +template inline _Tp& MatIterator_<_Tp>::operator *() const { return *(_Tp*)(this->ptr); } + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator += (ptrdiff_t ofs) +{ + MatConstIterator::operator += (ofs); + return *this; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator -= (ptrdiff_t ofs) +{ + MatConstIterator::operator += (-ofs); + return *this; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator --() +{ + MatConstIterator::operator --(); + return *this; +} + +template inline MatIterator_<_Tp> MatIterator_<_Tp>::operator --(int) +{ + MatIterator_ b = *this; + MatConstIterator::operator --(); + return b; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator ++() +{ + MatConstIterator::operator ++(); + return *this; +} + +template inline MatIterator_<_Tp> MatIterator_<_Tp>::operator ++(int) +{ + MatIterator_ b = *this; + MatConstIterator::operator ++(); + return b; +} + +template inline Point MatConstIterator_<_Tp>::pos() const +{ + if( !m ) + return Point(); + CV_DbgAssert( m->dims <= 2 ); + if( m->isContinuous() ) + { + ptrdiff_t ofs = (const _Tp*)ptr - (const _Tp*)m->data; + int y = (int)(ofs / m->cols), x = (int)(ofs - (ptrdiff_t)y*m->cols); + return Point(x, y); + } + else + { + ptrdiff_t ofs = (uchar*)ptr - m->data; + int y = (int)(ofs / m->step), x = (int)((ofs - y*m->step)/sizeof(_Tp)); + return Point(x, y); + } +} + +static inline bool +operator == (const MatConstIterator& a, const MatConstIterator& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatConstIterator& a, const MatConstIterator& b) +{ return !(a == b); } + +template static inline bool +operator == (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b) +{ return a.m != b.m || a.ptr != b.ptr; } + +template static inline bool +operator == (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b) +{ return a.m != b.m || a.ptr != b.ptr; } + +static inline bool +operator < (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr < b.ptr; } + +static inline bool +operator > (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr > b.ptr; } + +static inline bool +operator <= (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr <= b.ptr; } + +static inline bool +operator >= (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr >= b.ptr; } + +CV_EXPORTS ptrdiff_t operator - (const MatConstIterator& b, const MatConstIterator& a); + +static inline MatConstIterator operator + (const MatConstIterator& a, ptrdiff_t ofs) +{ MatConstIterator b = a; return b += ofs; } + +static inline MatConstIterator operator + (ptrdiff_t ofs, const MatConstIterator& a) +{ MatConstIterator b = a; return b += ofs; } + +static inline MatConstIterator operator - (const MatConstIterator& a, ptrdiff_t ofs) +{ MatConstIterator b = a; return b += -ofs; } + +template static inline MatConstIterator_<_Tp> +operator + (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; } + +template static inline MatConstIterator_<_Tp> +operator + (ptrdiff_t ofs, const MatConstIterator_<_Tp>& a) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; } + +template static inline MatConstIterator_<_Tp> +operator - (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatConstIterator_<_Tp>&)t; } + +inline uchar* MatConstIterator::operator [](ptrdiff_t i) const +{ return *(*this + i); } + +template inline _Tp MatConstIterator_<_Tp>::operator [](ptrdiff_t i) const +{ return *(_Tp*)MatConstIterator::operator [](i); } + +template static inline MatIterator_<_Tp> +operator + (const MatIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatIterator_<_Tp>&)t; } + +template static inline MatIterator_<_Tp> +operator + (ptrdiff_t ofs, const MatIterator_<_Tp>& a) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatIterator_<_Tp>&)t; } + +template static inline MatIterator_<_Tp> +operator - (const MatIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatIterator_<_Tp>&)t; } + +template inline _Tp& MatIterator_<_Tp>::operator [](ptrdiff_t i) const +{ return *(*this + i); } + +template inline MatConstIterator_<_Tp> Mat_<_Tp>::begin() const +{ return Mat::begin<_Tp>(); } + +template inline MatConstIterator_<_Tp> Mat_<_Tp>::end() const +{ return Mat::end<_Tp>(); } + +template inline MatIterator_<_Tp> Mat_<_Tp>::begin() +{ return Mat::begin<_Tp>(); } + +template inline MatIterator_<_Tp> Mat_<_Tp>::end() +{ return Mat::end<_Tp>(); } + +template inline MatCommaInitializer_<_Tp>::MatCommaInitializer_(Mat_<_Tp>* _m) : it(_m) {} + +template template inline MatCommaInitializer_<_Tp>& +MatCommaInitializer_<_Tp>::operator , (T2 v) +{ + CV_DbgAssert( this->it < ((const Mat_<_Tp>*)this->it.m)->end() ); + *this->it = _Tp(v); ++this->it; + return *this; +} + +template inline Mat_<_Tp> MatCommaInitializer_<_Tp>::operator *() const +{ + CV_DbgAssert( this->it == ((const Mat_<_Tp>*)this->it.m)->end() ); + return Mat_<_Tp>(*this->it.m); +} + +template inline MatCommaInitializer_<_Tp>::operator Mat_<_Tp>() const +{ + CV_DbgAssert( this->it == ((const Mat_<_Tp>*)this->it.m)->end() ); + return Mat_<_Tp>(*this->it.m); +} + +template static inline MatCommaInitializer_<_Tp> +operator << (const Mat_<_Tp>& m, T2 val) +{ + MatCommaInitializer_<_Tp> commaInitializer((Mat_<_Tp>*)&m); + return (commaInitializer, val); +} + +//////////////////////////////// SparseMat //////////////////////////////// + +inline SparseMat::SparseMat() +: flags(MAGIC_VAL), hdr(0) +{ +} + +inline SparseMat::SparseMat(int _dims, const int* _sizes, int _type) +: flags(MAGIC_VAL), hdr(0) +{ + create(_dims, _sizes, _type); +} + +inline SparseMat::SparseMat(const SparseMat& m) +: flags(m.flags), hdr(m.hdr) +{ + addref(); +} + +inline SparseMat::~SparseMat() +{ + release(); +} + +inline SparseMat& SparseMat::operator = (const SparseMat& m) +{ + if( this != &m ) + { + if( m.hdr ) + CV_XADD(&m.hdr->refcount, 1); + release(); + flags = m.flags; + hdr = m.hdr; + } + return *this; +} + +inline SparseMat& SparseMat::operator = (const Mat& m) +{ return (*this = SparseMat(m)); } + +inline SparseMat SparseMat::clone() const +{ + SparseMat temp; + this->copyTo(temp); + return temp; +} + + +inline void SparseMat::assignTo( SparseMat& m, int type ) const +{ + if( type < 0 ) + m = *this; + else + convertTo(m, type); +} + +inline void SparseMat::addref() +{ if( hdr ) CV_XADD(&hdr->refcount, 1); } + +inline void SparseMat::release() +{ + if( hdr && CV_XADD(&hdr->refcount, -1) == 1 ) + delete hdr; + hdr = 0; +} + +inline size_t SparseMat::elemSize() const +{ return CV_ELEM_SIZE(flags); } + +inline size_t SparseMat::elemSize1() const +{ return CV_ELEM_SIZE1(flags); } + +inline int SparseMat::type() const +{ return CV_MAT_TYPE(flags); } + +inline int SparseMat::depth() const +{ return CV_MAT_DEPTH(flags); } + +inline int SparseMat::channels() const +{ return CV_MAT_CN(flags); } + +inline const int* SparseMat::size() const +{ + return hdr ? hdr->size : 0; +} + +inline int SparseMat::size(int i) const +{ + if( hdr ) + { + CV_DbgAssert((unsigned)i < (unsigned)hdr->dims); + return hdr->size[i]; + } + return 0; +} + +inline int SparseMat::dims() const +{ + return hdr ? hdr->dims : 0; +} + +inline size_t SparseMat::nzcount() const +{ + return hdr ? hdr->nodeCount : 0; +} + +inline size_t SparseMat::hash(int i0) const +{ + return (size_t)i0; +} + +inline size_t SparseMat::hash(int i0, int i1) const +{ + return (size_t)(unsigned)i0*HASH_SCALE + (unsigned)i1; +} + +inline size_t SparseMat::hash(int i0, int i1, int i2) const +{ + return ((size_t)(unsigned)i0*HASH_SCALE + (unsigned)i1)*HASH_SCALE + (unsigned)i2; +} + +inline size_t SparseMat::hash(const int* idx) const +{ + size_t h = (unsigned)idx[0]; + if( !hdr ) + return 0; + int i, d = hdr->dims; + for( i = 1; i < d; i++ ) + h = h*HASH_SCALE + (unsigned)idx[i]; + return h; +} + +template inline _Tp& SparseMat::ref(int i0, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, true, hashval); } + +template inline _Tp& SparseMat::ref(int i0, int i1, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, true, hashval); } + +template inline _Tp& SparseMat::ref(int i0, int i1, int i2, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, i2, true, hashval); } + +template inline _Tp& SparseMat::ref(const int* idx, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(idx, true, hashval); } + +template inline _Tp SparseMat::value(int i0, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(int i0, int i1, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(int i0, int i1, int i2, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(const int* idx, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval); + return p ? *p : _Tp(); +} + +template inline const _Tp* SparseMat::find(int i0, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval); } + +template inline const _Tp* SparseMat::find(int i0, int i1, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval); } + +template inline const _Tp* SparseMat::find(int i0, int i1, int i2, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval); } + +template inline const _Tp* SparseMat::find(const int* idx, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval); } + +template inline _Tp& SparseMat::value(Node* n) +{ return *(_Tp*)((uchar*)n + hdr->valueOffset); } + +template inline const _Tp& SparseMat::value(const Node* n) const +{ return *(const _Tp*)((const uchar*)n + hdr->valueOffset); } + +inline SparseMat::Node* SparseMat::node(size_t nidx) +{ return (Node*)&hdr->pool[nidx]; } + +inline const SparseMat::Node* SparseMat::node(size_t nidx) const +{ return (const Node*)&hdr->pool[nidx]; } + +inline SparseMatIterator SparseMat::begin() +{ return SparseMatIterator(this); } + +inline SparseMatConstIterator SparseMat::begin() const +{ return SparseMatConstIterator(this); } + +inline SparseMatIterator SparseMat::end() +{ SparseMatIterator it(this); it.seekEnd(); return it; } + +inline SparseMatConstIterator SparseMat::end() const +{ SparseMatConstIterator it(this); it.seekEnd(); return it; } + +template inline SparseMatIterator_<_Tp> SparseMat::begin() +{ return SparseMatIterator_<_Tp>(this); } + +template inline SparseMatConstIterator_<_Tp> SparseMat::begin() const +{ return SparseMatConstIterator_<_Tp>(this); } + +template inline SparseMatIterator_<_Tp> SparseMat::end() +{ SparseMatIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline SparseMatConstIterator_<_Tp> SparseMat::end() const +{ SparseMatConstIterator_<_Tp> it(this); it.seekEnd(); return it; } + + +inline SparseMatConstIterator::SparseMatConstIterator() +: m(0), hashidx(0), ptr(0) +{ +} + +inline SparseMatConstIterator::SparseMatConstIterator(const SparseMatConstIterator& it) +: m(it.m), hashidx(it.hashidx), ptr(it.ptr) +{ +} + +static inline bool operator == (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2) +{ return it1.m == it2.m && it1.hashidx == it2.hashidx && it1.ptr == it2.ptr; } + +static inline bool operator != (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2) +{ return !(it1 == it2); } + + +inline SparseMatConstIterator& SparseMatConstIterator::operator = (const SparseMatConstIterator& it) +{ + if( this != &it ) + { + m = it.m; + hashidx = it.hashidx; + ptr = it.ptr; + } + return *this; +} + +template inline const _Tp& SparseMatConstIterator::value() const +{ return *(_Tp*)ptr; } + +inline const SparseMat::Node* SparseMatConstIterator::node() const +{ + return ptr && m && m->hdr ? + (const SparseMat::Node*)(ptr - m->hdr->valueOffset) : 0; +} + +inline SparseMatConstIterator SparseMatConstIterator::operator ++(int) +{ + SparseMatConstIterator it = *this; + ++*this; + return it; +} + + +inline void SparseMatConstIterator::seekEnd() +{ + if( m && m->hdr ) + { + hashidx = m->hdr->hashtab.size(); + ptr = 0; + } +} + +inline SparseMatIterator::SparseMatIterator() +{} + +inline SparseMatIterator::SparseMatIterator(SparseMat* _m) +: SparseMatConstIterator(_m) +{} + +inline SparseMatIterator::SparseMatIterator(const SparseMatIterator& it) +: SparseMatConstIterator(it) +{ +} + +inline SparseMatIterator& SparseMatIterator::operator = (const SparseMatIterator& it) +{ + (SparseMatConstIterator&)*this = it; + return *this; +} + +template inline _Tp& SparseMatIterator::value() const +{ return *(_Tp*)ptr; } + +inline SparseMat::Node* SparseMatIterator::node() const +{ + return (SparseMat::Node*)SparseMatConstIterator::node(); +} + +inline SparseMatIterator& SparseMatIterator::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +inline SparseMatIterator SparseMatIterator::operator ++(int) +{ + SparseMatIterator it = *this; + ++*this; + return it; +} + + +template inline SparseMat_<_Tp>::SparseMat_() +{ flags = MAGIC_VAL | DataType<_Tp>::type; } + +template inline SparseMat_<_Tp>::SparseMat_(int _dims, const int* _sizes) +: SparseMat(_dims, _sizes, DataType<_Tp>::type) +{} + +template inline SparseMat_<_Tp>::SparseMat_(const SparseMat& m) +{ + if( m.type() == DataType<_Tp>::type ) + *this = (const SparseMat_<_Tp>&)m; + else + m.convertTo(this, DataType<_Tp>::type); +} + +template inline SparseMat_<_Tp>::SparseMat_(const SparseMat_<_Tp>& m) +{ + this->flags = m.flags; + this->hdr = m.hdr; + if( this->hdr ) + CV_XADD(&this->hdr->refcount, 1); +} + +template inline SparseMat_<_Tp>::SparseMat_(const Mat& m) +{ + SparseMat sm(m); + *this = sm; +} + +template inline SparseMat_<_Tp>::SparseMat_(const CvSparseMat* m) +{ + SparseMat sm(m); + *this = sm; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const SparseMat_<_Tp>& m) +{ + if( this != &m ) + { + if( m.hdr ) CV_XADD(&m.hdr->refcount, 1); + release(); + flags = m.flags; + hdr = m.hdr; + } + return *this; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const SparseMat& m) +{ + if( m.type() == DataType<_Tp>::type ) + return (*this = (const SparseMat_<_Tp>&)m); + m.convertTo(*this, DataType<_Tp>::type); + return *this; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const Mat& m) +{ return (*this = SparseMat(m)); } + +template inline SparseMat_<_Tp> +SparseMat_<_Tp>::clone() const +{ + SparseMat_<_Tp> m; + this->copyTo(m); + return m; +} + +template inline void +SparseMat_<_Tp>::create(int _dims, const int* _sizes) +{ + SparseMat::create(_dims, _sizes, DataType<_Tp>::type); +} + +template inline +SparseMat_<_Tp>::operator CvSparseMat*() const +{ + return SparseMat::operator CvSparseMat*(); +} + +template inline int SparseMat_<_Tp>::type() const +{ return DataType<_Tp>::type; } + +template inline int SparseMat_<_Tp>::depth() const +{ return DataType<_Tp>::depth; } + +template inline int SparseMat_<_Tp>::channels() const +{ return DataType<_Tp>::channels; } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, int i1, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, i1, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, int i1, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, i1, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, int i1, int i2, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, i1, i2, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, int i1, int i2, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, i1, i2, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(const int* idx, size_t* hashval) +{ return SparseMat::ref<_Tp>(idx, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(const int* idx, size_t* hashval) const +{ return SparseMat::value<_Tp>(idx, hashval); } + +template inline SparseMatIterator_<_Tp> SparseMat_<_Tp>::begin() +{ return SparseMatIterator_<_Tp>(this); } + +template inline SparseMatConstIterator_<_Tp> SparseMat_<_Tp>::begin() const +{ return SparseMatConstIterator_<_Tp>(this); } + +template inline SparseMatIterator_<_Tp> SparseMat_<_Tp>::end() +{ SparseMatIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline SparseMatConstIterator_<_Tp> SparseMat_<_Tp>::end() const +{ SparseMatConstIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_() +{} + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMat_<_Tp>* _m) +: SparseMatConstIterator(_m) +{} + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMatConstIterator_<_Tp>& it) +: SparseMatConstIterator(it) +{} + +template inline SparseMatConstIterator_<_Tp>& +SparseMatConstIterator_<_Tp>::operator = (const SparseMatConstIterator_<_Tp>& it) +{ return ((SparseMatConstIterator&)*this = it); } + +template inline const _Tp& +SparseMatConstIterator_<_Tp>::operator *() const +{ return *(const _Tp*)this->ptr; } + +template inline SparseMatConstIterator_<_Tp>& +SparseMatConstIterator_<_Tp>::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +template inline SparseMatConstIterator_<_Tp> +SparseMatConstIterator_<_Tp>::operator ++(int) +{ + SparseMatConstIterator it = *this; + SparseMatConstIterator::operator ++(); + return it; +} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_() +{} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_(SparseMat_<_Tp>* _m) +: SparseMatConstIterator_<_Tp>(_m) +{} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_(const SparseMatIterator_<_Tp>& it) +: SparseMatConstIterator_<_Tp>(it) +{} + +template inline SparseMatIterator_<_Tp>& +SparseMatIterator_<_Tp>::operator = (const SparseMatIterator_<_Tp>& it) +{ return ((SparseMatIterator&)*this = it); } + +template inline _Tp& +SparseMatIterator_<_Tp>::operator *() const +{ return *(_Tp*)this->ptr; } + +template inline SparseMatIterator_<_Tp>& +SparseMatIterator_<_Tp>::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +template inline SparseMatIterator_<_Tp> +SparseMatIterator_<_Tp>::operator ++(int) +{ + SparseMatIterator it = *this; + SparseMatConstIterator::operator ++(); + return it; +} + +} + +#endif +#endif diff --git a/opencv/core/opencv2/core/operations.hpp b/opencv/core/opencv2/core/operations.hpp new file mode 100644 index 0000000..310c85f --- /dev/null +++ b/opencv/core/opencv2/core/operations.hpp @@ -0,0 +1,3627 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_OPERATIONS_HPP__ +#define __OPENCV_CORE_OPERATIONS_HPP__ + +#ifndef SKIP_INCLUDES + #include + #include +#endif // SKIP_INCLUDES + +#ifdef __cplusplus + +/////// exchange-add operation for atomic operations on reference counters /////// +#ifdef __GNUC__Lior + + #if __GNUC__*10 + __GNUC_MINOR__ >= 42 + + #if !defined WIN32 && (defined __i486__ || defined __i586__ || \ + defined __i686__ || defined __MMX__ || defined __SSE__ || defined __ppc__) + #define CV_XADD __sync_fetch_and_add + #else + #include + #define CV_XADD __gnu_cxx::__exchange_and_add + #endif + + #else + #include + #if __GNUC__*10 + __GNUC_MINOR__ >= 34 + #define CV_XADD __gnu_cxx::__exchange_and_add + #else + #define CV_XADD __exchange_and_add + #endif + #endif + +#elif defined WIN32 || defined _WIN32 + #include + #define CV_XADD(addr,delta) _InterlockedExchangeAdd((long volatile*)(addr), (delta)) +#else + + template static inline _Tp CV_XADD(_Tp* addr, _Tp delta) + { int tmp = *addr; *addr += delta; return tmp; } + +#endif + +#include + +namespace cv +{ + +using std::cos; +using std::sin; +using std::max; +using std::min; +using std::exp; +using std::log; +using std::pow; +using std::sqrt; + + +/////////////// saturate_cast (used in image & signal processing) /////////////////// + +template static inline _Tp saturate_cast(uchar v) { return _Tp(v); } +template static inline _Tp saturate_cast(schar v) { return _Tp(v); } +template static inline _Tp saturate_cast(ushort v) { return _Tp(v); } +template static inline _Tp saturate_cast(short v) { return _Tp(v); } +template static inline _Tp saturate_cast(unsigned v) { return _Tp(v); } +template static inline _Tp saturate_cast(int v) { return _Tp(v); } +template static inline _Tp saturate_cast(float v) { return _Tp(v); } +template static inline _Tp saturate_cast(double v) { return _Tp(v); } + +template<> inline uchar saturate_cast(schar v) +{ return (uchar)std::max((int)v, 0); } +template<> inline uchar saturate_cast(ushort v) +{ return (uchar)std::min((unsigned)v, (unsigned)UCHAR_MAX); } +template<> inline uchar saturate_cast(int v) +{ return (uchar)((unsigned)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); } +template<> inline uchar saturate_cast(short v) +{ return saturate_cast((int)v); } +template<> inline uchar saturate_cast(unsigned v) +{ return (uchar)std::min(v, (unsigned)UCHAR_MAX); } +template<> inline uchar saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline uchar saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline schar saturate_cast(uchar v) +{ return (schar)std::min((int)v, SCHAR_MAX); } +template<> inline schar saturate_cast(ushort v) +{ return (schar)std::min((unsigned)v, (unsigned)SCHAR_MAX); } +template<> inline schar saturate_cast(int v) +{ + return (schar)((unsigned)(v-SCHAR_MIN) <= (unsigned)UCHAR_MAX ? + v : v > 0 ? SCHAR_MAX : SCHAR_MIN); +} +template<> inline schar saturate_cast(short v) +{ return saturate_cast((int)v); } +template<> inline schar saturate_cast(unsigned v) +{ return (schar)std::min(v, (unsigned)SCHAR_MAX); } + +template<> inline schar saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline schar saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline ushort saturate_cast(schar v) +{ return (ushort)std::max((int)v, 0); } +template<> inline ushort saturate_cast(short v) +{ return (ushort)std::max((int)v, 0); } +template<> inline ushort saturate_cast(int v) +{ return (ushort)((unsigned)v <= (unsigned)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); } +template<> inline ushort saturate_cast(unsigned v) +{ return (ushort)std::min(v, (unsigned)USHRT_MAX); } +template<> inline ushort saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline ushort saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline short saturate_cast(ushort v) +{ return (short)std::min((int)v, SHRT_MAX); } +template<> inline short saturate_cast(int v) +{ + return (short)((unsigned)(v - SHRT_MIN) <= (unsigned)USHRT_MAX ? + v : v > 0 ? SHRT_MAX : SHRT_MIN); +} +template<> inline short saturate_cast(unsigned v) +{ return (short)std::min(v, (unsigned)SHRT_MAX); } +//XXXXXXXXXXXLior +//template<> inline short saturate_cast(long int v) +//{ return (short)std::min(v, (long int)SHRT_MAX); } +//XXXXXXXXXXXX +template<> inline short saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline short saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline int saturate_cast(float v) { return cvRound(v); } +template<> inline int saturate_cast(double v) { return cvRound(v); } + +// we intentionally do not clip negative numbers, to make -1 become 0xffffffff etc. +template<> inline unsigned saturate_cast(float v){ return cvRound(v); } +template<> inline unsigned saturate_cast(double v) { return cvRound(v); } + + +//////////////////////////////// Matx ///////////////////////////////// + + +template inline Matx<_Tp, m, n>::Matx() +{ + for(int i = 0; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0) +{ + val[0] = v0; + for(int i = 1; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1) +{ + assert(channels >= 2); + val[0] = v0; val[1] = v1; + for(int i = 2; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2) +{ + assert(channels >= 3); + val[0] = v0; val[1] = v1; val[2] = v2; + for(int i = 3; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3) +{ + assert(channels >= 4); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + for(int i = 4; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4) +{ + assert(channels >= 5); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[4] = v4; + for(int i = 5; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5) +{ + assert(channels >= 6); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; + for(int i = 6; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6) +{ + assert(channels >= 7); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; + for(int i = 7; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7) +{ + assert(channels >= 8); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + for(int i = 8; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8) +{ + assert(channels >= 9); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; + for(int i = 9; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9) +{ + assert(channels >= 10); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; + for(int i = 10; i < channels; i++) val[i] = _Tp(0); +} + + +template +inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11) +{ + assert(channels == 12); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11; +} + +template +inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11, + _Tp v12, _Tp v13, _Tp v14, _Tp v15) +{ + assert(channels == 16); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11; + val[12] = v12; val[13] = v13; val[14] = v14; val[15] = v15; +} + +template inline Matx<_Tp, m, n>::Matx(const _Tp* values) +{ + for( int i = 0; i < channels; i++ ) val[i] = values[i]; +} + +template inline Matx<_Tp, m, n> Matx<_Tp, m, n>::all(_Tp alpha) +{ + Matx<_Tp, m, n> M; + for( int i = 0; i < m*n; i++ ) M.val[i] = alpha; + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::zeros() +{ + return all(0); +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::ones() +{ + return all(1); +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::eye() +{ + Matx<_Tp,m,n> M; + for(int i = 0; i < MIN(m,n); i++) + M(i,i) = 1; + return M; +} + +template inline _Tp Matx<_Tp, m, n>::dot(const Matx<_Tp, m, n>& M) const +{ + _Tp s = 0; + for( int i = 0; i < m*n; i++ ) s += val[i]*M.val[i]; + return s; +} + + +template inline double Matx<_Tp, m, n>::ddot(const Matx<_Tp, m, n>& M) const +{ + double s = 0; + for( int i = 0; i < m*n; i++ ) s += (double)val[i]*M.val[i]; + return s; +} + + + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::diag(const Matx<_Tp,MIN(m,n),1>& d) +{ + Matx<_Tp,m,n> M; + for(int i = 0; i < MIN(m,n); i++) + M(i,i) = d(i, 0); + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::randu(_Tp a, _Tp b) +{ + Matx<_Tp,m,n> M; + Mat matM(M, false); + cv::randu(matM, Scalar(a), Scalar(b)); + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::randn(_Tp a, _Tp b) +{ + Matx<_Tp,m,n> M; + Mat matM(M, false); + cv::randn(matM, Scalar(a), Scalar(b)); + return M; +} + +template template +inline Matx<_Tp, m, n>::operator Matx() const +{ + Matx M; + for( int i = 0; i < m*n; i++ ) M.val[i] = saturate_cast(val[i]); + return M; +} + + +template template inline +Matx<_Tp, m1, n1> Matx<_Tp, m, n>::reshape() const +{ + CV_DbgAssert(m1*n1 == m*n); + return (const Matx<_Tp, m1, n1>&)*this; +} + + +template +template inline +Matx<_Tp, m1, n1> Matx<_Tp, m, n>::get_minor(int i, int j) const +{ + CV_DbgAssert(0 <= i && i+m1 <= m && 0 <= j && j+n1 <= n); + Matx<_Tp, m1, n1> s; + for( int di = 0; di < m1; di++ ) + for( int dj = 0; dj < n1; dj++ ) + s(di, dj) = (*this)(i+di, j+dj); + return s; +} + + +template inline +Matx<_Tp, 1, n> Matx<_Tp, m, n>::row(int i) const +{ + CV_DbgAssert((unsigned)i < (unsigned)m); + return Matx<_Tp, 1, n>(&val[i*n]); +} + + +template inline +Matx<_Tp, m, 1> Matx<_Tp, m, n>::col(int j) const +{ + CV_DbgAssert((unsigned)j < (unsigned)n); + Matx<_Tp, m, 1> v; + for( int i = 0; i < m; i++ ) + v[i] = val[i*n + j]; + return v; +} + + +template inline +Matx<_Tp, MIN(m,n), 1> Matx<_Tp, m, n>::diag() const +{ + diag_type d; + for( int i = 0; i < MIN(m, n); i++ ) + d.val[i] = val[i*n + i]; + return d; +} + + +template inline +const _Tp& Matx<_Tp, m, n>::operator ()(int i, int j) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n ); + return this->val[i*n + j]; +} + + +template inline +_Tp& Matx<_Tp, m, n>::operator ()(int i, int j) +{ + CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n ); + return val[i*n + j]; +} + + +template inline +const _Tp& Matx<_Tp, m, n>::operator ()(int i) const +{ + CV_DbgAssert( (m == 1 || n == 1) && (unsigned)i < (unsigned)(m+n-1) ); + return val[i]; +} + + +template inline +_Tp& Matx<_Tp, m, n>::operator ()(int i) +{ + CV_DbgAssert( (m == 1 || n == 1) && (unsigned)i < (unsigned)(m+n-1) ); + return val[i]; +} + + +template static inline +Matx<_Tp1, m, n>& operator += (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]); + return a; +} + + +template static inline +Matx<_Tp1, m, n>& operator -= (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]); + return a; +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] + b.val[i]); +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] - b.val[i]); +} + + +template template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] * alpha); +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] * b.val[i]); +} + + +template template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp) +{ + for( int i = 0; i < m; i++ ) + for( int j = 0; j < n; j++ ) + { + _Tp s = 0; + for( int k = 0; k < l; k++ ) + s += a(i, k) * b(k, j); + val[i*n + j] = s; + } +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, n, m>& a, Matx_TOp) +{ + for( int i = 0; i < m; i++ ) + for( int j = 0; j < n; j++ ) + val[i*n + j] = a(j, i); +} + + +template static inline +Matx<_Tp, m, n> operator + (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_AddOp()); +} + + +template static inline +Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_SubOp()); +} + + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, int alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, float alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, double alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, int alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, float alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, double alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (int alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (float alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (double alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, -1, Matx_ScaleOp()); +} + + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_MatMulOp()); +} + + +template static inline +Point_<_Tp> operator * (const Matx<_Tp, 2, 2>& a, const Point_<_Tp>& b) +{ + Matx<_Tp, 2, 1> tmp = a*Vec<_Tp,2>(b.x, b.y); + return Point_<_Tp>(tmp.val[0], tmp.val[1]); +} + + +template static inline +Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point3_<_Tp>& b) +{ + Matx<_Tp, 3, 1> tmp = a*Vec<_Tp,3>(b.x, b.y, b.z); + return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]); +} + + +template static inline +Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point_<_Tp>& b) +{ + Matx<_Tp, 3, 1> tmp = a*Vec<_Tp,3>(b.x, b.y, 1); + return Point3_<_Tp>(tmp.val[0], tmp.val[1], tmp.val[2]); +} + + +template static inline +Matx<_Tp, 4, 1> operator * (const Matx<_Tp, 4, 4>& a, const Point3_<_Tp>& b) +{ + return a*Matx<_Tp, 4, 1>(b.x, b.y, b.z, 1); +} + + +template static inline +Scalar operator * (const Matx<_Tp, 4, 4>& a, const Scalar& b) +{ + return Scalar(a*Matx<_Tp, 4, 1>(b[0],b[1],b[2],b[3])); +} + + +template inline +Matx<_Tp, m, n> Matx<_Tp, m, n>::mul(const Matx<_Tp, m, n>& a) const +{ + return Matx<_Tp, m, n>(*this, a, Matx_MulOp()); +} + + +CV_EXPORTS int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n); +CV_EXPORTS int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n); +CV_EXPORTS bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n); +CV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n); + + +template struct CV_EXPORTS Matx_DetOp +{ + double operator ()(const Matx<_Tp, m, m>& a) const + { + Matx<_Tp, m, m> temp = a; + double p = LU(temp.val, m, m, 0, 0, 0); + if( p == 0 ) + return p; + for( int i = 0; i < m; i++ ) + p *= temp(i, i); + return p; + } +}; + + +template struct CV_EXPORTS Matx_DetOp<_Tp, 1> +{ + double operator ()(const Matx<_Tp, 1, 1>& a) const + { + return a(0,0); + } +}; + + +template struct CV_EXPORTS Matx_DetOp<_Tp, 2> +{ + double operator ()(const Matx<_Tp, 2, 2>& a) const + { + return a(0,0)*a(1,1) - a(0,1)*a(1,0); + } +}; + + +template struct CV_EXPORTS Matx_DetOp<_Tp, 3> +{ + double operator ()(const Matx<_Tp, 3, 3>& a) const + { + return a(0,0)*(a(1,1)*a(2,2) - a(2,1)*a(1,2)) - + a(0,1)*(a(1,0)*a(2,2) - a(2,0)*a(1,2)) + + a(0,2)*(a(1,0)*a(2,1) - a(2,0)*a(1,1)); + } +}; + +template static inline +double determinant(const Matx<_Tp, m, m>& a) +{ + return Matx_DetOp<_Tp, m>()(a); +} + + +template static inline +double trace(const Matx<_Tp, m, n>& a) +{ + _Tp s = 0; + for( int i = 0; i < std::min(m, n); i++ ) + s += a(i,i); + return s; +} + + +template inline +Matx<_Tp, n, m> Matx<_Tp, m, n>::t() const +{ + return Matx<_Tp, n, m>(*this, Matx_TOp()); +} + + +template struct CV_EXPORTS Matx_FastInvOp +{ + bool operator()(const Matx<_Tp, m, m>& a, Matx<_Tp, m, m>& b, int method) const + { + Matx<_Tp, m, m> temp = a; + + // assume that b is all 0's on input => make it a unity matrix + for( int i = 0; i < m; i++ ) + b(i, i) = (_Tp)1; + + if( method == DECOMP_CHOLESKY ) + return Cholesky(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m); + + return LU(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m) != 0; + } +}; + + +template struct CV_EXPORTS Matx_FastInvOp<_Tp, 2> +{ + bool operator()(const Matx<_Tp, 2, 2>& a, Matx<_Tp, 2, 2>& b, int) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + b(1,1) = a(0,0)*d; + b(0,0) = a(1,1)*d; + b(0,1) = -a(0,1)*d; + b(1,0) = -a(1,0)*d; + return true; + } +}; + + +template struct CV_EXPORTS Matx_FastInvOp<_Tp, 3> +{ + bool operator()(const Matx<_Tp, 3, 3>& a, Matx<_Tp, 3, 3>& b, int) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + b(0,0) = (a(1,1) * a(2,2) - a(1,2) * a(2,1)) * d; + b(0,1) = (a(0,2) * a(2,1) - a(0,1) * a(2,2)) * d; + b(0,2) = (a(0,1) * a(1,2) - a(0,2) * a(1,1)) * d; + + b(1,0) = (a(1,2) * a(2,0) - a(1,0) * a(2,2)) * d; + b(1,1) = (a(0,0) * a(2,2) - a(0,2) * a(2,0)) * d; + b(1,2) = (a(0,2) * a(1,0) - a(0,0) * a(1,2)) * d; + + b(2,0) = (a(1,0) * a(2,1) - a(1,1) * a(2,0)) * d; + b(2,1) = (a(0,1) * a(2,0) - a(0,0) * a(2,1)) * d; + b(2,2) = (a(0,0) * a(1,1) - a(0,1) * a(1,0)) * d; + return true; + } +}; + + +template inline +Matx<_Tp, n, m> Matx<_Tp, m, n>::inv(int method) const +{ + Matx<_Tp, n, m> b; + bool ok; + if( method == DECOMP_LU || method == DECOMP_CHOLESKY ) + ok = Matx_FastInvOp<_Tp, m>()(*this, b, method); + else + { + Mat A(*this, false), B(b, false); + ok = invert(A, B, method); + } + return ok ? b : Matx<_Tp, n, m>::zeros(); +} + + +template struct CV_EXPORTS Matx_FastSolveOp +{ + bool operator()(const Matx<_Tp, m, m>& a, const Matx<_Tp, m, n>& b, + Matx<_Tp, m, n>& x, int method) const + { + Matx<_Tp, m, m> temp = a; + x = b; + if( method == DECOMP_CHOLESKY ) + return Cholesky(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n); + + return LU(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n) != 0; + } +}; + + +template struct CV_EXPORTS Matx_FastSolveOp<_Tp, 2, 1> +{ + bool operator()(const Matx<_Tp, 2, 2>& a, const Matx<_Tp, 2, 1>& b, + Matx<_Tp, 2, 1>& x, int method) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + x(0) = (b(0)*a(1,1) - b(1)*a(0,1))*d; + x(1) = (b(1)*a(0,0) - b(0)*a(1,0))*d; + return true; + } +}; + + +template struct CV_EXPORTS Matx_FastSolveOp<_Tp, 3, 1> +{ + bool operator()(const Matx<_Tp, 3, 3>& a, const Matx<_Tp, 3, 1>& b, + Matx<_Tp, 3, 1>& x, int method) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + x(0) = d*(b(0)*(a(1,1)*a(2,2) - a(1,2)*a(2,1)) - + a(0,1)*(b(1)*a(2,2) - a(1,2)*b(2)) + + a(0,2)*(b(1)*a(2,1) - a(1,1)*b(2))); + + x(1) = d*(a(0,0)*(b(1)*a(2,2) - a(1,2)*b(2)) - + b(0)*(a(1,0)*a(2,2) - a(1,2)*a(2,0)) + + a(0,2)*(a(1,0)*b(2) - b(1)*a(2,0))); + + x(2) = d*(a(0,0)*(a(1,1)*b(2) - b(1)*a(2,1)) - + a(0,1)*(a(1,0)*b(2) - b(1)*a(2,0)) + + b(0)*(a(1,0)*a(2,1) - a(1,1)*a(2,0))); + return true; + } +}; + + +template template inline +Matx<_Tp, n, l> Matx<_Tp, m, n>::solve(const Matx<_Tp, m, l>& rhs, int method) const +{ + Matx<_Tp, n, l> x; + bool ok; + if( method == DECOMP_LU || method == DECOMP_CHOLESKY ) + ok = Matx_FastSolveOp<_Tp, m, l>()(*this, rhs, x, method); + else + { + Mat A(*this, false), B(rhs, false), X(x, false); + ok = cv::solve(A, B, X, method); + } + + return ok ? x : Matx<_Tp, n, l>::zeros(); +} + + +template static inline +double norm(const Matx<_Tp, m, n>& M) +{ + double s = 0; + for( int i = 0; i < m*n; i++ ) + s += (double)M.val[i]*M.val[i]; + return std::sqrt(s); +} + + +template static inline +double norm(const Matx<_Tp, m, n>& M, int normType) +{ + if( normType == NORM_INF ) + { + _Tp s = 0; + for( int i = 0; i < m*n; i++ ) + s = std::max(s, std::abs(M.val[i])); + return s; + } + + if( normType == NORM_L1 ) + { + _Tp s = 0; + for( int i = 0; i < m*n; i++ ) + s += std::abs(M.val[i]); + return s; + } + + CV_DbgAssert( normType == NORM_L2 ); + return norm(M); +} + + +template static inline +bool operator == (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + if( a.val[i] != b.val[i] ) return false; + return true; +} + +template static inline +bool operator != (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return !(a == b); +} + + +template static inline +MatxCommaInitializer<_Tp, m, n> operator << (const Matx<_Tp, m, n>& mtx, _T2 val) +{ + MatxCommaInitializer<_Tp, m, n> commaInitializer((Matx<_Tp, m, n>*)&mtx); + return (commaInitializer, val); +} + +template inline +MatxCommaInitializer<_Tp, m, n>::MatxCommaInitializer(Matx<_Tp, m, n>* _mtx) + : dst(_mtx), idx(0) +{} + +template template inline +MatxCommaInitializer<_Tp, m, n>& MatxCommaInitializer<_Tp, m, n>::operator , (_T2 value) +{ + CV_DbgAssert( idx < m*n ); + dst->val[idx++] = saturate_cast<_Tp>(value); + return *this; +} + +template inline +Matx<_Tp, m, n> MatxCommaInitializer<_Tp, m, n>::operator *() const +{ + CV_DbgAssert( idx == n*m ); + return *dst; +} + +/////////////////////////// short vector (Vec) ///////////////////////////// + +template inline Vec<_Tp, cn>::Vec() +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0) + : Matx<_Tp, cn, 1>(v0) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1) + : Matx<_Tp, cn, 1>(v0, v1) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2) + : Matx<_Tp, cn, 1>(v0, v1, v2) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9) +{} + +template inline Vec<_Tp, cn>::Vec(const _Tp* values) + : Matx<_Tp, cn, 1>(values) +{} + + +template inline Vec<_Tp, cn>::Vec(const Vec<_Tp, cn>& m) + : Matx<_Tp, cn, 1>(m.val) +{} + +template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp op) +: Matx<_Tp, cn, 1>(a, b, op) +{} + +template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp op) +: Matx<_Tp, cn, 1>(a, b, op) +{} + +template template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp op) +: Matx<_Tp, cn, 1>(a, alpha, op) +{} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::all(_Tp alpha) +{ + Vec v; + for( int i = 0; i < cn; i++ ) v.val[i] = alpha; + return v; +} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::mul(const Vec<_Tp, cn>& v) const +{ + Vec<_Tp, cn> w; + for( int i = 0; i < cn; i++ ) w.val[i] = saturate_cast<_Tp>(this->val[i]*v.val[i]); + return w; +} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::cross(const Vec<_Tp, cn>& v) const +{ + CV_Error(CV_StsError, "for arbitrary-size vector there is no cross-product defined"); + return Vec<_Tp, cn>(); +} + +template template +inline Vec<_Tp, cn>::operator Vec() const +{ + Vec v; + for( int i = 0; i < cn; i++ ) v.val[i] = saturate_cast(this->val[i]); + return v; +} + +template inline Vec<_Tp, cn>::operator CvScalar() const +{ + CvScalar s = {{0,0,0,0}}; + int i; + for( i = 0; i < std::min(cn, 4); i++ ) s.val[i] = this->val[i]; + for( ; i < 4; i++ ) s.val[i] = 0; + return s; +} + +template inline const _Tp& Vec<_Tp, cn>::operator [](int i) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline _Tp& Vec<_Tp, cn>::operator [](int i) +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline const _Tp& Vec<_Tp, cn>::operator ()(int i) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline _Tp& Vec<_Tp, cn>::operator ()(int i) +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template static inline Vec<_Tp1, cn>& +operator += (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b) +{ + for( int i = 0; i < cn; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]); + return a; +} + +template static inline Vec<_Tp1, cn>& +operator -= (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b) +{ + for( int i = 0; i < cn; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]); + return a; +} + +template static inline Vec<_Tp, cn> +operator + (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b) +{ + return Vec<_Tp, cn>(a, b, Matx_AddOp()); +} + +template static inline Vec<_Tp, cn> +operator - (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b) +{ + return Vec<_Tp, cn>(a, b, Matx_SubOp()); +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, int alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, float alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, double alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, int alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (int alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, float alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (float alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, double alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (double alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator - (const Vec<_Tp, cn>& a) +{ + Vec<_Tp,cn> t; + for( int i = 0; i < cn; i++ ) t.val[i] = saturate_cast<_Tp>(-a.val[i]); + return t; +} + +template<> inline Vec Vec::cross(const Vec& v) const +{ + return Vec(val[1]*v.val[2] - val[2]*v.val[1], + val[2]*v.val[0] - val[0]*v.val[2], + val[0]*v.val[1] - val[1]*v.val[0]); +} + +template<> inline Vec Vec::cross(const Vec& v) const +{ + return Vec(val[1]*v.val[2] - val[2]*v.val[1], + val[2]*v.val[0] - val[0]*v.val[2], + val[0]*v.val[1] - val[1]*v.val[0]); +} + +template static inline +Vec& operator += (Vec& a, const Vec& b) +{ + a[0] = saturate_cast(a[0] + b[0]); + a[1] = saturate_cast(a[1] + b[1]); + return a; +} + +template static inline +Vec& operator += (Vec& a, const Vec& b) +{ + a[0] = saturate_cast(a[0] + b[0]); + a[1] = saturate_cast(a[1] + b[1]); + a[2] = saturate_cast(a[2] + b[2]); + return a; +} + + +template static inline +Vec& operator += (Vec& a, const Vec& b) +{ + a[0] = saturate_cast(a[0] + b[0]); + a[1] = saturate_cast(a[1] + b[1]); + a[2] = saturate_cast(a[2] + b[2]); + a[3] = saturate_cast(a[3] + b[3]); + return a; +} + + +template static inline +VecCommaInitializer<_Tp, cn> operator << (const Vec<_Tp, cn>& vec, _T2 val) +{ + VecCommaInitializer<_Tp, cn> commaInitializer((Vec<_Tp, cn>*)&vec); + return (commaInitializer, val); +} + +template inline +VecCommaInitializer<_Tp, cn>::VecCommaInitializer(Vec<_Tp, cn>* _vec) + : MatxCommaInitializer<_Tp, cn, 1>(_vec) +{} + +template template inline +VecCommaInitializer<_Tp, cn>& VecCommaInitializer<_Tp, cn>::operator , (_T2 value) +{ + CV_DbgAssert( this->idx < cn ); + this->dst->val[this->idx++] = saturate_cast<_Tp>(value); + return *this; +} + +template inline +Vec<_Tp, cn> VecCommaInitializer<_Tp, cn>::operator *() const +{ + CV_DbgAssert( this->idx == cn ); + return *this->dst; +} + +//////////////////////////////// Complex ////////////////////////////// + +template inline Complex<_Tp>::Complex() : re(0), im(0) {} +template inline Complex<_Tp>::Complex( _Tp _re, _Tp _im ) : re(_re), im(_im) {} +template template inline Complex<_Tp>::operator Complex() const +{ return Complex(saturate_cast(re), saturate_cast(im)); } +template inline Complex<_Tp> Complex<_Tp>::conj() const +{ return Complex<_Tp>(re, -im); } + +template static inline +bool operator == (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return a.re == b.re && a.im == b.im; } + +template static inline +bool operator != (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return a.re != b.re || a.im != b.im; } + +template static inline +Complex<_Tp> operator + (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re + b.re, a.im + b.im ); } + +template static inline +Complex<_Tp>& operator += (Complex<_Tp>& a, const Complex<_Tp>& b) +{ a.re += b.re; a.im += b.im; return a; } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re - b.re, a.im - b.im ); } + +template static inline +Complex<_Tp>& operator -= (Complex<_Tp>& a, const Complex<_Tp>& b) +{ a.re -= b.re; a.im -= b.im; return a; } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a) +{ return Complex<_Tp>(-a.re, -a.im); } + +template static inline +Complex<_Tp> operator * (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re*b.re - a.im*b.im, a.re*b.im + a.im*b.re ); } + +template static inline +Complex<_Tp> operator * (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re*b, a.im*b ); } + +template static inline +Complex<_Tp> operator * (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( a.re*b, a.im*b ); } + +template static inline +Complex<_Tp> operator + (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re + b, a.im ); } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re - b, a.im ); } + +template static inline +Complex<_Tp> operator + (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( a.re + b, a.im ); } + +template static inline +Complex<_Tp> operator - (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( b - a.re, -a.im ); } + +template static inline +Complex<_Tp>& operator += (Complex<_Tp>& a, _Tp b) +{ a.re += b; return a; } + +template static inline +Complex<_Tp>& operator -= (Complex<_Tp>& a, _Tp b) +{ a.re -= b; return a; } + +template static inline +Complex<_Tp>& operator *= (Complex<_Tp>& a, _Tp b) +{ a.re *= b; a.im *= b; return a; } + +template static inline +double abs(const Complex<_Tp>& a) +{ return std::sqrt( (double)a.re*a.re + (double)a.im*a.im); } + +template static inline +Complex<_Tp> operator / (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ + double t = 1./((double)b.re*b.re + (double)b.im*b.im); + return Complex<_Tp>( (_Tp)((a.re*b.re + a.im*b.im)*t), + (_Tp)((-a.re*b.im + a.im*b.re)*t) ); +} + +template static inline +Complex<_Tp>& operator /= (Complex<_Tp>& a, const Complex<_Tp>& b) +{ + return (a = a / b); +} + +template static inline +Complex<_Tp> operator / (const Complex<_Tp>& a, _Tp b) +{ + _Tp t = (_Tp)1/b; + return Complex<_Tp>( a.re*t, a.im*t ); +} + +template static inline +Complex<_Tp> operator / (_Tp b, const Complex<_Tp>& a) +{ + return Complex<_Tp>(b)/a; +} + +template static inline +Complex<_Tp> operator /= (const Complex<_Tp>& a, _Tp b) +{ + _Tp t = (_Tp)1/b; + a.re *= t; a.im *= t; return a; +} + +//////////////////////////////// 2D Point //////////////////////////////// + +template inline Point_<_Tp>::Point_() : x(0), y(0) {} +template inline Point_<_Tp>::Point_(_Tp _x, _Tp _y) : x(_x), y(_y) {} +template inline Point_<_Tp>::Point_(const Point_& pt) : x(pt.x), y(pt.y) {} +template inline Point_<_Tp>::Point_(const CvPoint& pt) : x((_Tp)pt.x), y((_Tp)pt.y) {} +template inline Point_<_Tp>::Point_(const CvPoint2D32f& pt) + : x(saturate_cast<_Tp>(pt.x)), y(saturate_cast<_Tp>(pt.y)) {} +template inline Point_<_Tp>::Point_(const Size_<_Tp>& sz) : x(sz.width), y(sz.height) {} +template inline Point_<_Tp>::Point_(const Vec<_Tp,2>& v) : x(v[0]), y(v[1]) {} +template inline Point_<_Tp>& Point_<_Tp>::operator = (const Point_& pt) +{ x = pt.x; y = pt.y; return *this; } + +template template inline Point_<_Tp>::operator Point_<_Tp2>() const +{ return Point_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y)); } +template inline Point_<_Tp>::operator CvPoint() const +{ return cvPoint(saturate_cast(x), saturate_cast(y)); } +template inline Point_<_Tp>::operator CvPoint2D32f() const +{ return cvPoint2D32f((float)x, (float)y); } +template inline Point_<_Tp>::operator Vec<_Tp, 2>() const +{ return Vec<_Tp, 2>(x, y); } + +template inline _Tp Point_<_Tp>::dot(const Point_& pt) const +{ return saturate_cast<_Tp>(x*pt.x + y*pt.y); } +template inline double Point_<_Tp>::ddot(const Point_& pt) const +{ return (double)x*pt.x + (double)y*pt.y; } + +template static inline Point_<_Tp>& +operator += (Point_<_Tp>& a, const Point_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x + b.x); + a.y = saturate_cast<_Tp>(a.y + b.y); + return a; +} + +template static inline Point_<_Tp>& +operator -= (Point_<_Tp>& a, const Point_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x - b.x); + a.y = saturate_cast<_Tp>(a.y - b.y); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, int b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, float b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, double b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline double norm(const Point_<_Tp>& pt) +{ return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y); } + +template static inline bool operator == (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return a.x == b.x && a.y == b.y; } + +template static inline bool operator != (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return a.x != b.x || a.y != b.y; } + +template static inline Point_<_Tp> operator + (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x + b.x), saturate_cast<_Tp>(a.y + b.y) ); } + +template static inline Point_<_Tp> operator - (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x - b.x), saturate_cast<_Tp>(a.y - b.y) ); } + +template static inline Point_<_Tp> operator - (const Point_<_Tp>& a) +{ return Point_<_Tp>( saturate_cast<_Tp>(-a.x), saturate_cast<_Tp>(-a.y) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, int b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (int a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, float b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (float a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, double b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (double a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +//////////////////////////////// 3D Point //////////////////////////////// + +template inline Point3_<_Tp>::Point3_() : x(0), y(0), z(0) {} +template inline Point3_<_Tp>::Point3_(_Tp _x, _Tp _y, _Tp _z) : x(_x), y(_y), z(_z) {} +template inline Point3_<_Tp>::Point3_(const Point3_& pt) : x(pt.x), y(pt.y), z(pt.z) {} +template inline Point3_<_Tp>::Point3_(const Point_<_Tp>& pt) : x(pt.x), y(pt.y), z(_Tp()) {} +template inline Point3_<_Tp>::Point3_(const CvPoint3D32f& pt) : + x(saturate_cast<_Tp>(pt.x)), y(saturate_cast<_Tp>(pt.y)), z(saturate_cast<_Tp>(pt.z)) {} +template inline Point3_<_Tp>::Point3_(const Vec<_Tp, 3>& v) : x(v[0]), y(v[1]), z(v[2]) {} + +template template inline Point3_<_Tp>::operator Point3_<_Tp2>() const +{ return Point3_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), saturate_cast<_Tp2>(z)); } + +template inline Point3_<_Tp>::operator CvPoint3D32f() const +{ return cvPoint3D32f((float)x, (float)y, (float)z); } + +template inline Point3_<_Tp>::operator Vec<_Tp, 3>() const +{ return Vec<_Tp, 3>(x, y, z); } + +template inline Point3_<_Tp>& Point3_<_Tp>::operator = (const Point3_& pt) +{ x = pt.x; y = pt.y; z = pt.z; return *this; } + +template inline _Tp Point3_<_Tp>::dot(const Point3_& pt) const +{ return saturate_cast<_Tp>(x*pt.x + y*pt.y + z*pt.z); } +template inline double Point3_<_Tp>::ddot(const Point3_& pt) const +{ return (double)x*pt.x + (double)y*pt.y + (double)z*pt.z; } + +template inline Point3_<_Tp> Point3_<_Tp>::cross(const Point3_<_Tp>& pt) const +{ + return Point3_<_Tp>(y*pt.z - z*pt.y, z*pt.x - x*pt.z, x*pt.y - y*pt.x); +} + +template static inline Point3_<_Tp>& +operator += (Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x + b.x); + a.y = saturate_cast<_Tp>(a.y + b.y); + a.z = saturate_cast<_Tp>(a.z + b.z); + return a; +} + +template static inline Point3_<_Tp>& +operator -= (Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x - b.x); + a.y = saturate_cast<_Tp>(a.y - b.y); + a.z = saturate_cast<_Tp>(a.z - b.z); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, int b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, float b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, double b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline double norm(const Point3_<_Tp>& pt) +{ return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y + (double)pt.z*pt.z); } + +template static inline bool operator == (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return a.x == b.x && a.y == b.y && a.z == b.z; } + +template static inline bool operator != (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return a.x != b.x || a.y != b.y || a.z != b.z; } + +template static inline Point3_<_Tp> operator + (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x + b.x), + saturate_cast<_Tp>(a.y + b.y), + saturate_cast<_Tp>(a.z + b.z)); } + +template static inline Point3_<_Tp> operator - (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x - b.x), + saturate_cast<_Tp>(a.y - b.y), + saturate_cast<_Tp>(a.z - b.z)); } + +template static inline Point3_<_Tp> operator - (const Point3_<_Tp>& a) +{ return Point3_<_Tp>( saturate_cast<_Tp>(-a.x), + saturate_cast<_Tp>(-a.y), + saturate_cast<_Tp>(-a.z) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, int b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (int a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, float b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (float a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, double b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (double a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +//////////////////////////////// Size //////////////////////////////// + +template inline Size_<_Tp>::Size_() + : width(0), height(0) {} +template inline Size_<_Tp>::Size_(_Tp _width, _Tp _height) + : width(_width), height(_height) {} +template inline Size_<_Tp>::Size_(const Size_& sz) + : width(sz.width), height(sz.height) {} +template inline Size_<_Tp>::Size_(const CvSize& sz) + : width(saturate_cast<_Tp>(sz.width)), height(saturate_cast<_Tp>(sz.height)) {} +template inline Size_<_Tp>::Size_(const CvSize2D32f& sz) + : width(saturate_cast<_Tp>(sz.width)), height(saturate_cast<_Tp>(sz.height)) {} +template inline Size_<_Tp>::Size_(const Point_<_Tp>& pt) : width(pt.x), height(pt.y) {} + +template template inline Size_<_Tp>::operator Size_<_Tp2>() const +{ return Size_<_Tp2>(saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); } +template inline Size_<_Tp>::operator CvSize() const +{ return cvSize(saturate_cast(width), saturate_cast(height)); } +template inline Size_<_Tp>::operator CvSize2D32f() const +{ return cvSize2D32f((float)width, (float)height); } + +template inline Size_<_Tp>& Size_<_Tp>::operator = (const Size_<_Tp>& sz) +{ width = sz.width; height = sz.height; return *this; } +template static inline Size_<_Tp> operator * (const Size_<_Tp>& a, _Tp b) +{ return Size_<_Tp>(a.width * b, a.height * b); } +template static inline Size_<_Tp> operator + (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return Size_<_Tp>(a.width + b.width, a.height + b.height); } +template static inline Size_<_Tp> operator - (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return Size_<_Tp>(a.width - b.width, a.height - b.height); } +template inline _Tp Size_<_Tp>::area() const { return width*height; } + +template static inline Size_<_Tp>& operator += (Size_<_Tp>& a, const Size_<_Tp>& b) +{ a.width += b.width; a.height += b.height; return a; } +template static inline Size_<_Tp>& operator -= (Size_<_Tp>& a, const Size_<_Tp>& b) +{ a.width -= b.width; a.height -= b.height; return a; } + +template static inline bool operator == (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return a.width == b.width && a.height == b.height; } +template static inline bool operator != (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return a.width != b.width || a.height != b.height; } + +//////////////////////////////// Rect //////////////////////////////// + + +template inline Rect_<_Tp>::Rect_() : x(0), y(0), width(0), height(0) {} +template inline Rect_<_Tp>::Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height) : x(_x), y(_y), width(_width), height(_height) {} +template inline Rect_<_Tp>::Rect_(const Rect_<_Tp>& r) : x(r.x), y(r.y), width(r.width), height(r.height) {} +template inline Rect_<_Tp>::Rect_(const CvRect& r) : x((_Tp)r.x), y((_Tp)r.y), width((_Tp)r.width), height((_Tp)r.height) {} +template inline Rect_<_Tp>::Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz) : + x(org.x), y(org.y), width(sz.width), height(sz.height) {} +template inline Rect_<_Tp>::Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2) +{ + x = std::min(pt1.x, pt2.x); y = std::min(pt1.y, pt2.y); + width = std::max(pt1.x, pt2.x) - x; height = std::max(pt1.y, pt2.y) - y; +} +template inline Rect_<_Tp>& Rect_<_Tp>::operator = ( const Rect_<_Tp>& r ) +{ x = r.x; y = r.y; width = r.width; height = r.height; return *this; } + +template inline Point_<_Tp> Rect_<_Tp>::tl() const { return Point_<_Tp>(x,y); } +template inline Point_<_Tp> Rect_<_Tp>::br() const { return Point_<_Tp>(x+width, y+height); } + +template static inline Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Point_<_Tp>& b ) +{ a.x += b.x; a.y += b.y; return a; } +template static inline Rect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Point_<_Tp>& b ) +{ a.x -= b.x; a.y -= b.y; return a; } + +template static inline Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Size_<_Tp>& b ) +{ a.width += b.width; a.height += b.height; return a; } + +template static inline Rect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Size_<_Tp>& b ) +{ a.width -= b.width; a.height -= b.height; return a; } + +template static inline Rect_<_Tp>& operator &= ( Rect_<_Tp>& a, const Rect_<_Tp>& b ) +{ + _Tp x1 = std::max(a.x, b.x), y1 = std::max(a.y, b.y); + a.width = std::min(a.x + a.width, b.x + b.width) - x1; + a.height = std::min(a.y + a.height, b.y + b.height) - y1; + a.x = x1; a.y = y1; + if( a.width <= 0 || a.height <= 0 ) + a = Rect(); + return a; +} + +template static inline Rect_<_Tp>& operator |= ( Rect_<_Tp>& a, const Rect_<_Tp>& b ) +{ + _Tp x1 = std::min(a.x, b.x), y1 = std::min(a.y, b.y); + a.width = std::max(a.x + a.width, b.x + b.width) - x1; + a.height = std::max(a.y + a.height, b.y + b.height) - y1; + a.x = x1; a.y = y1; + return a; +} + +template inline Size_<_Tp> Rect_<_Tp>::size() const { return Size_<_Tp>(width, height); } +template inline _Tp Rect_<_Tp>::area() const { return width*height; } + +template template inline Rect_<_Tp>::operator Rect_<_Tp2>() const +{ return Rect_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), + saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); } +template inline Rect_<_Tp>::operator CvRect() const +{ return cvRect(saturate_cast(x), saturate_cast(y), + saturate_cast(width), saturate_cast(height)); } + +template inline bool Rect_<_Tp>::contains(const Point_<_Tp>& pt) const +{ return x <= pt.x && pt.x < x + width && y <= pt.y && pt.y < y + height; } + +template static inline bool operator == (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + return a.x == b.x && a.y == b.y && a.width == b.width && a.height == b.height; +} + +template static inline bool operator != (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + return a.x != b.x || a.y != b.y || a.width != b.width || a.height != b.height; +} + +template static inline Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Point_<_Tp>& b) +{ + return Rect_<_Tp>( a.x + b.x, a.y + b.y, a.width, a.height ); +} + +template static inline Rect_<_Tp> operator - (const Rect_<_Tp>& a, const Point_<_Tp>& b) +{ + return Rect_<_Tp>( a.x - b.x, a.y - b.y, a.width, a.height ); +} + +template static inline Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Size_<_Tp>& b) +{ + return Rect_<_Tp>( a.x, a.y, a.width + b.width, a.height + b.height ); +} + +template static inline Rect_<_Tp> operator & (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + Rect_<_Tp> c = a; + return c &= b; +} + +template static inline Rect_<_Tp> operator | (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + Rect_<_Tp> c = a; + return c |= b; +} + +template inline bool Point_<_Tp>::inside( const Rect_<_Tp>& r ) const +{ + return r.contains(*this); +} + +inline RotatedRect::RotatedRect() { angle = 0; } +inline RotatedRect::RotatedRect(const Point2f& _center, const Size2f& _size, float _angle) + : center(_center), size(_size), angle(_angle) {} +inline RotatedRect::RotatedRect(const CvBox2D& box) + : center(box.center), size(box.size), angle(box.angle) {} +inline RotatedRect::operator CvBox2D() const +{ + CvBox2D box; box.center = center; box.size = size; box.angle = angle; + return box; +} + +//////////////////////////////// Scalar_ /////////////////////////////// + +template inline Scalar_<_Tp>::Scalar_() +{ this->val[0] = this->val[1] = this->val[2] = this->val[3] = 0; } + +template inline Scalar_<_Tp>::Scalar_(_Tp v0, _Tp v1, _Tp v2, _Tp v3) +{ this->val[0] = v0; this->val[1] = v1; this->val[2] = v2; this->val[3] = v3; } + +template inline Scalar_<_Tp>::Scalar_(const CvScalar& s) +{ + this->val[0] = saturate_cast<_Tp>(s.val[0]); + this->val[1] = saturate_cast<_Tp>(s.val[1]); + this->val[2] = saturate_cast<_Tp>(s.val[2]); + this->val[3] = saturate_cast<_Tp>(s.val[3]); +} + +template inline Scalar_<_Tp>::Scalar_(_Tp v0) +{ this->val[0] = v0; this->val[1] = this->val[2] = this->val[3] = 0; } + +template inline Scalar_<_Tp> Scalar_<_Tp>::all(_Tp v0) +{ return Scalar_<_Tp>(v0, v0, v0, v0); } +template inline Scalar_<_Tp>::operator CvScalar() const +{ return cvScalar(this->val[0], this->val[1], this->val[2], this->val[3]); } + +template template inline Scalar_<_Tp>::operator Scalar_() const +{ + return Scalar_(saturate_cast(this->val[0]), + saturate_cast(this->val[1]), + saturate_cast(this->val[2]), + saturate_cast(this->val[3])); +} + +template static inline Scalar_<_Tp>& operator += (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] + b.val[0]); + a.val[1] = saturate_cast<_Tp>(a.val[1] + b.val[1]); + a.val[2] = saturate_cast<_Tp>(a.val[2] + b.val[2]); + a.val[3] = saturate_cast<_Tp>(a.val[3] + b.val[3]); + return a; +} + +template static inline Scalar_<_Tp>& operator -= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] - b.val[0]); + a.val[1] = saturate_cast<_Tp>(a.val[1] - b.val[1]); + a.val[2] = saturate_cast<_Tp>(a.val[2] - b.val[2]); + a.val[3] = saturate_cast<_Tp>(a.val[3] - b.val[3]); + return a; +} + +template static inline Scalar_<_Tp>& operator *= ( Scalar_<_Tp>& a, _Tp v ) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] * v); + a.val[1] = saturate_cast<_Tp>(a.val[1] * v); + a.val[2] = saturate_cast<_Tp>(a.val[2] * v); + a.val[3] = saturate_cast<_Tp>(a.val[3] * v); + return a; +} + +template inline Scalar_<_Tp> Scalar_<_Tp>::mul(const Scalar_<_Tp>& t, double scale ) const +{ + return Scalar_<_Tp>( saturate_cast<_Tp>(this->val[0]*t.val[0]*scale), + saturate_cast<_Tp>(this->val[1]*t.val[1]*scale), + saturate_cast<_Tp>(this->val[2]*t.val[2]*scale), + saturate_cast<_Tp>(this->val[3]*t.val[3]*scale)); +} + +template static inline bool operator == ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b ) +{ + return a.val[0] == b.val[0] && a.val[1] == b.val[1] && + a.val[2] == b.val[2] && a.val[3] == b.val[3]; +} + +template static inline bool operator != ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b ) +{ + return a.val[0] != b.val[0] || a.val[1] != b.val[1] || + a.val[2] != b.val[2] || a.val[3] != b.val[3]; +} + +template static inline Scalar_<_Tp> operator + (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] + b.val[0]), + saturate_cast<_Tp>(a.val[1] + b.val[1]), + saturate_cast<_Tp>(a.val[2] + b.val[2]), + saturate_cast<_Tp>(a.val[3] + b.val[3])); +} + +template static inline Scalar_<_Tp> operator - (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] - b.val[0]), + saturate_cast<_Tp>(a.val[1] - b.val[1]), + saturate_cast<_Tp>(a.val[2] - b.val[2]), + saturate_cast<_Tp>(a.val[3] - b.val[3])); +} + +template static inline Scalar_<_Tp> operator * (const Scalar_<_Tp>& a, _Tp alpha) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] * alpha), + saturate_cast<_Tp>(a.val[1] * alpha), + saturate_cast<_Tp>(a.val[2] * alpha), + saturate_cast<_Tp>(a.val[3] * alpha)); +} + +template static inline Scalar_<_Tp> operator * (_Tp alpha, const Scalar_<_Tp>& a) +{ + return a*alpha; +} + +template static inline Scalar_<_Tp> operator - (const Scalar_<_Tp>& a) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(-a.val[0]), saturate_cast<_Tp>(-a.val[1]), + saturate_cast<_Tp>(-a.val[2]), saturate_cast<_Tp>(-a.val[3])); +} + + +template static inline Scalar_<_Tp> +operator * (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a[0]*b[0] - a[1]*b[1] - a[2]*b[2] - a[3]*b[3]), + saturate_cast<_Tp>(a[0]*b[1] + a[1]*b[0] + a[2]*b[3] - a[3]*b[2]), + saturate_cast<_Tp>(a[0]*b[2] - a[1]*b[3] + a[2]*b[0] - a[3]*b[1]), + saturate_cast<_Tp>(a[0]*b[3] + a[1]*b[2] - a[2]*b[1] - a[3]*b[0])); +} + +template static inline Scalar_<_Tp>& +operator *= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a = a*b; + return a; +} + +template inline Scalar_<_Tp> Scalar_<_Tp>::conj() const +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(this->val[0]), + saturate_cast<_Tp>(-this->val[1]), + saturate_cast<_Tp>(-this->val[2]), + saturate_cast<_Tp>(-this->val[3])); +} + +template inline bool Scalar_<_Tp>::isReal() const +{ + return this->val[1] == 0 && this->val[2] == 0 && this->val[3] == 0; +} + +template static inline +Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, _Tp alpha) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] / alpha), + saturate_cast<_Tp>(a.val[1] / alpha), + saturate_cast<_Tp>(a.val[2] / alpha), + saturate_cast<_Tp>(a.val[3] / alpha)); +} + +template static inline +Scalar_ operator / (const Scalar_& a, float alpha) +{ + float s = 1/alpha; + return Scalar_(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s); +} + +template static inline +Scalar_ operator / (const Scalar_& a, double alpha) +{ + double s = 1/alpha; + return Scalar_(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s); +} + +template static inline +Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, _Tp alpha) +{ + a = a/alpha; + return a; +} + +template static inline +Scalar_<_Tp> operator / (_Tp a, const Scalar_<_Tp>& b) +{ + _Tp s = a/(b[0]*b[0] + b[1]*b[1] + b[2]*b[2] + b[3]*b[3]); + return b.conj()*s; +} + +template static inline +Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return a*((_Tp)1/b); +} + +template static inline +Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a = a/b; + return a; +} + +//////////////////////////////// Range ///////////////////////////////// + +inline Range::Range() : start(0), end(0) {} +inline Range::Range(int _start, int _end) : start(_start), end(_end) {} +inline Range::Range(const CvSlice& slice) : start(slice.start_index), end(slice.end_index) +{ + if( start == 0 && end == CV_WHOLE_SEQ_END_INDEX ) + *this = Range::all(); +} + +inline int Range::size() const { return end - start; } +inline bool Range::empty() const { return start == end; } +inline Range Range::all() { return Range(INT_MIN, INT_MAX); } + +static inline bool operator == (const Range& r1, const Range& r2) +{ return r1.start == r2.start && r1.end == r2.end; } + +static inline bool operator != (const Range& r1, const Range& r2) +{ return !(r1 == r2); } + +static inline bool operator !(const Range& r) +{ return r.start == r.end; } + +static inline Range operator & (const Range& r1, const Range& r2) +{ + Range r(std::max(r1.start, r2.start), std::min(r1.end, r2.end)); + r.end = std::max(r.end, r.start); + return r; +} + +static inline Range& operator &= (Range& r1, const Range& r2) +{ + r1 = r1 & r2; + return r1; +} + +static inline Range operator + (const Range& r1, int delta) +{ + return Range(r1.start + delta, r1.end + delta); +} + +static inline Range operator + (int delta, const Range& r1) +{ + return Range(r1.start + delta, r1.end + delta); +} + +static inline Range operator - (const Range& r1, int delta) +{ + return r1 + (-delta); +} + +inline Range::operator CvSlice() const +{ return *this != Range::all() ? cvSlice(start, end) : CV_WHOLE_SEQ; } + + + +//////////////////////////////// Vector //////////////////////////////// + +// template vector class. It is similar to STL's vector, +// with a few important differences: +// 1) it can be created on top of user-allocated data w/o copying it +// 2) vector b = a means copying the header, +// not the underlying data (use clone() to make a deep copy) +template class CV_EXPORTS Vector +{ +public: + typedef _Tp value_type; + typedef _Tp* iterator; + typedef const _Tp* const_iterator; + typedef _Tp& reference; + typedef const _Tp& const_reference; + + struct CV_EXPORTS Hdr + { + Hdr() : data(0), datastart(0), refcount(0), size(0), capacity(0) {}; + _Tp* data; + _Tp* datastart; + int* refcount; + size_t size; + size_t capacity; + }; + + Vector() {} + Vector(size_t _size) { resize(_size); } + Vector(size_t _size, const _Tp& val) + { + resize(_size); + for(size_t i = 0; i < _size; i++) + hdr.data[i] = val; + } + Vector(_Tp* _data, size_t _size, bool _copyData=false) + { set(_data, _size, _copyData); } + + template Vector(const Vec<_Tp, n>& vec) + { set((_Tp*)&vec.val[0], n, true); } + + Vector(const std::vector<_Tp>& vec, bool _copyData=false) + { set((_Tp*)&vec[0], vec.size(), _copyData); } + + Vector(const Vector& d) { *this = d; } + + Vector(const Vector& d, const Range& r_) + { + Range r = r_ == Range::all() ? Range(0, d.size()) : r_; + /*if( r == Range::all() ) + r = Range(0, d.size());*/ + if( r.size() > 0 && r.start >= 0 && r.end <= d.size() ) + { + if( d.hdr.refcount ) + CV_XADD(d.hdr.refcount, 1); + hdr.refcount = d.hdr.refcount; + hdr.datastart = d.hdr.datastart; + hdr.data = d.hdr.data + r.start; + hdr.capacity = hdr.size = r.size(); + } + } + + Vector<_Tp>& operator = (const Vector& d) + { + if( this != &d ) + { + if( d.hdr.refcount ) + CV_XADD(d.hdr.refcount, 1); + release(); + hdr = d.hdr; + } + return *this; + } + + ~Vector() { release(); } + + Vector<_Tp> clone() const + { return hdr.data ? Vector<_Tp>(hdr.data, hdr.size, true) : Vector<_Tp>(); } + + void copyTo(Vector<_Tp>& vec) const + { + size_t i, sz = size(); + vec.resize(sz); + const _Tp* src = hdr.data; + _Tp* dst = vec.hdr.data; + for( i = 0; i < sz; i++ ) + dst[i] = src[i]; + } + + void copyTo(std::vector<_Tp>& vec) const + { + size_t i, sz = size(); + vec.resize(sz); + const _Tp* src = hdr.data; + _Tp* dst = sz ? &vec[0] : 0; + for( i = 0; i < sz; i++ ) + dst[i] = src[i]; + } + + operator CvMat() const + { return cvMat((int)size(), 1, type(), (void*)hdr.data); } + + _Tp& operator [] (size_t i) { CV_DbgAssert( i < size() ); return hdr.data[i]; } + const _Tp& operator [] (size_t i) const { CV_DbgAssert( i < size() ); return hdr.data[i]; } + Vector operator() (const Range& r) const { return Vector(*this, r); } + _Tp& back() { CV_DbgAssert(!empty()); return hdr.data[hdr.size-1]; } + const _Tp& back() const { CV_DbgAssert(!empty()); return hdr.data[hdr.size-1]; } + _Tp& front() { CV_DbgAssert(!empty()); return hdr.data[0]; } + const _Tp& front() const { CV_DbgAssert(!empty()); return hdr.data[0]; } + + _Tp* begin() { return hdr.data; } + _Tp* end() { return hdr.data + hdr.size; } + const _Tp* begin() const { return hdr.data; } + const _Tp* end() const { return hdr.data + hdr.size; } + + void addref() { if( hdr.refcount ) CV_XADD(hdr.refcount, 1); } + void release() + { + if( hdr.refcount && CV_XADD(hdr.refcount, -1) == 1 ) + { + delete[] hdr.datastart; + delete hdr.refcount; + } + hdr = Hdr(); + } + + void set(_Tp* _data, size_t _size, bool _copyData=false) + { + if( !_copyData ) + { + release(); + hdr.data = hdr.datastart = _data; + hdr.size = hdr.capacity = _size; + hdr.refcount = 0; + } + else + { + reserve(_size); + for( size_t i = 0; i < _size; i++ ) + hdr.data[i] = _data[i]; + hdr.size = _size; + } + } + + void reserve(size_t newCapacity) + { + _Tp* newData; + int* newRefcount; + size_t i, oldSize = hdr.size; + if( (!hdr.refcount || *hdr.refcount == 1) && hdr.capacity >= newCapacity ) + return; + newCapacity = std::max(newCapacity, oldSize); + newData = new _Tp[newCapacity]; + newRefcount = new int(1); + for( i = 0; i < oldSize; i++ ) + newData[i] = hdr.data[i]; + release(); + hdr.data = hdr.datastart = newData; + hdr.capacity = newCapacity; + hdr.size = oldSize; + hdr.refcount = newRefcount; + } + + void resize(size_t newSize) + { + size_t i; + newSize = std::max(newSize, (size_t)0); + if( (!hdr.refcount || *hdr.refcount == 1) && hdr.size == newSize ) + return; + if( newSize > hdr.capacity ) + reserve(std::max(newSize, std::max((size_t)4, hdr.capacity*2))); + for( i = hdr.size; i < newSize; i++ ) + hdr.data[i] = _Tp(); + hdr.size = newSize; + } + + Vector<_Tp>& push_back(const _Tp& elem) + { + if( hdr.size == hdr.capacity ) + reserve( std::max((size_t)4, hdr.capacity*2) ); + hdr.data[hdr.size++] = elem; + return *this; + } + + Vector<_Tp>& pop_back() + { + if( hdr.size > 0 ) + --hdr.size; + return *this; + } + + size_t size() const { return hdr.size; } + size_t capacity() const { return hdr.capacity; } + bool empty() const { return hdr.size == 0; } + void clear() { resize(0); } + int type() const { return DataType<_Tp>::type; } + +protected: + Hdr hdr; +}; + + +template inline typename DataType<_Tp>::work_type +dot(const Vector<_Tp>& v1, const Vector<_Tp>& v2) +{ + typedef typename DataType<_Tp>::work_type _Tw; + size_t i, n = v1.size(); + assert(v1.size() == v2.size()); + + _Tw s = 0; + const _Tp *ptr1 = &v1[0], *ptr2 = &v2[0]; + for( i = 0; i <= n - 4; i += 4 ) + s += (_Tw)ptr1[i]*ptr2[i] + (_Tw)ptr1[i+1]*ptr2[i+1] + + (_Tw)ptr1[i+2]*ptr2[i+2] + (_Tw)ptr1[i+3]*ptr2[i+3]; + for( ; i < n; i++ ) + s += (_Tw)ptr1[i]*ptr2[i]; + return s; +} + +// Multiply-with-Carry RNG +inline RNG::RNG() { state = 0xffffffff; } +inline RNG::RNG(uint64 _state) { state = _state ? _state : 0xffffffff; } +inline unsigned RNG::next() +{ + state = (uint64)(unsigned)state*CV_RNG_COEFF + (unsigned)(state >> 32); + return (unsigned)state; +} + +inline RNG::operator uchar() { return (uchar)next(); } +inline RNG::operator schar() { return (schar)next(); } +inline RNG::operator ushort() { return (ushort)next(); } +inline RNG::operator short() { return (short)next(); } +inline RNG::operator unsigned() { return next(); } +inline unsigned RNG::operator ()(unsigned N) {return (unsigned)uniform(0,N);} +inline unsigned RNG::operator ()() {return next();} +inline RNG::operator int() { return (int)next(); } +// * (2^32-1)^-1 +inline RNG::operator float() { return next()*2.3283064365386962890625e-10f; } +inline RNG::operator double() +{ + unsigned t = next(); + return (((uint64)t << 32) | next())*5.4210108624275221700372640043497e-20; +} +inline int RNG::uniform(int a, int b) { return a == b ? a : next()%(b - a) + a; } +inline float RNG::uniform(float a, float b) { return ((float)*this)*(b - a) + a; } +inline double RNG::uniform(double a, double b) { return ((double)*this)*(b - a) + a; } + +inline TermCriteria::TermCriteria() : type(0), maxCount(0), epsilon(0) {} +inline TermCriteria::TermCriteria(int _type, int _maxCount, double _epsilon) + : type(_type), maxCount(_maxCount), epsilon(_epsilon) {} +inline TermCriteria::TermCriteria(const CvTermCriteria& criteria) + : type(criteria.type), maxCount(criteria.max_iter), epsilon(criteria.epsilon) {} +inline TermCriteria::operator CvTermCriteria() const +{ return cvTermCriteria(type, maxCount, epsilon); } + +inline uchar* LineIterator::operator *() { return ptr; } +inline LineIterator& LineIterator::operator ++() +{ + int mask = err < 0 ? -1 : 0; + err += minusDelta + (plusDelta & mask); + ptr += minusStep + (plusStep & mask); + return *this; +} +inline LineIterator LineIterator::operator ++(int) +{ + LineIterator it = *this; + ++(*this); + return it; +} +inline Point LineIterator::pos() const +{ + Point p; + p.y = (int)((ptr - ptr0)/step); + p.x = (int)(((ptr - ptr0) - p.y*step)/elemSize); + return p; +} + +/////////////////////////////// AutoBuffer //////////////////////////////////////// + +template inline AutoBuffer<_Tp, fixed_size>::AutoBuffer() +{ + ptr = buf; + size = fixed_size; +} + +template inline AutoBuffer<_Tp, fixed_size>::AutoBuffer(size_t _size) +{ + ptr = buf; + size = fixed_size; + allocate(_size); +} + +template inline AutoBuffer<_Tp, fixed_size>::~AutoBuffer() +{ deallocate(); } + +template inline void AutoBuffer<_Tp, fixed_size>::allocate(size_t _size) +{ + if(_size <= size) + return; + deallocate(); + if(_size > fixed_size) + { + ptr = cv::allocate<_Tp>(_size); + size = _size; + } +} + +template inline void AutoBuffer<_Tp, fixed_size>::deallocate() +{ + if( ptr != buf ) + { + cv::deallocate<_Tp>(ptr, size); + ptr = buf; + size = fixed_size; + } +} + +template inline AutoBuffer<_Tp, fixed_size>::operator _Tp* () +{ return ptr; } + +template inline AutoBuffer<_Tp, fixed_size>::operator const _Tp* () const +{ return ptr; } + + +/////////////////////////////////// Ptr //////////////////////////////////////// + +template inline Ptr<_Tp>::Ptr() : obj(0), refcount(0) {} +template inline Ptr<_Tp>::Ptr(_Tp* _obj) : obj(_obj) +{ + if(obj) + { + refcount = (int*)fastMalloc(sizeof(*refcount)); + *refcount = 1; + } + else + refcount = 0; +} + +template inline void Ptr<_Tp>::addref() +{ if( refcount ) CV_XADD(refcount, 1); } + +template inline void Ptr<_Tp>::release() +{ + if( refcount && CV_XADD(refcount, -1) == 1 ) + { + delete_obj(); + fastFree(refcount); + } + refcount = 0; + obj = 0; +} + +template inline void Ptr<_Tp>::delete_obj() +{ + if( obj ) delete obj; +} + +template inline Ptr<_Tp>::~Ptr() { release(); } + +template inline Ptr<_Tp>::Ptr(const Ptr<_Tp>& ptr) +{ + obj = ptr.obj; + refcount = ptr.refcount; + addref(); +} + +template inline Ptr<_Tp>& Ptr<_Tp>::operator = (const Ptr<_Tp>& ptr) +{ + int* _refcount = ptr.refcount; + if( _refcount ) + CV_XADD(_refcount, 1); + release(); + obj = ptr.obj; + refcount = _refcount; + return *this; +} + +template inline _Tp* Ptr<_Tp>::operator -> () { return obj; } +template inline const _Tp* Ptr<_Tp>::operator -> () const { return obj; } + +template inline Ptr<_Tp>::operator _Tp* () { return obj; } +template inline Ptr<_Tp>::operator const _Tp*() const { return obj; } + +template inline bool Ptr<_Tp>::empty() const { return obj == 0; } + +//// specializied implementations of Ptr::delete_obj() for classic OpenCV types + +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); + +//////////////////////////////////////// XML & YAML I/O //////////////////////////////////// + +CV_EXPORTS_W void write( FileStorage& fs, const string& name, int value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, float value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, double value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, const string& value ); + +template inline void write(FileStorage& fs, const _Tp& value) +{ write(fs, string(), value); } + +CV_EXPORTS void writeScalar( FileStorage& fs, int value ); +CV_EXPORTS void writeScalar( FileStorage& fs, float value ); +CV_EXPORTS void writeScalar( FileStorage& fs, double value ); +CV_EXPORTS void writeScalar( FileStorage& fs, const string& value ); + +template<> inline void write( FileStorage& fs, const int& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const float& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const double& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const string& value ) +{ + writeScalar(fs, value); +} + +template inline void write(FileStorage& fs, const Point_<_Tp>& pt ) +{ + write(fs, pt.x); + write(fs, pt.y); +} + +template inline void write(FileStorage& fs, const Point3_<_Tp>& pt ) +{ + write(fs, pt.x); + write(fs, pt.y); + write(fs, pt.z); +} + +template inline void write(FileStorage& fs, const Size_<_Tp>& sz ) +{ + write(fs, sz.width); + write(fs, sz.height); +} + +template inline void write(FileStorage& fs, const Complex<_Tp>& c ) +{ + write(fs, c.re); + write(fs, c.im); +} + +template inline void write(FileStorage& fs, const Rect_<_Tp>& r ) +{ + write(fs, r.x); + write(fs, r.y); + write(fs, r.width); + write(fs, r.height); +} + +template inline void write(FileStorage& fs, const Vec<_Tp, cn>& v ) +{ + for(int i = 0; i < cn; i++) + write(fs, v.val[i]); +} + +template inline void write(FileStorage& fs, const Scalar_<_Tp>& s ) +{ + write(fs, s.val[0]); + write(fs, s.val[1]); + write(fs, s.val[2]); + write(fs, s.val[3]); +} + +inline void write(FileStorage& fs, const Range& r ) +{ + write(fs, r.start); + write(fs, r.end); +} + +class CV_EXPORTS WriteStructContext +{ +public: + WriteStructContext(FileStorage& _fs, const string& name, + int flags, const string& typeName=string()); + ~WriteStructContext(); + FileStorage* fs; +}; + +template inline void write(FileStorage& fs, const string& name, const Point_<_Tp>& pt ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, pt.x); + write(fs, pt.y); +} + +template inline void write(FileStorage& fs, const string& name, const Point3_<_Tp>& pt ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, pt.x); + write(fs, pt.y); + write(fs, pt.z); +} + +template inline void write(FileStorage& fs, const string& name, const Size_<_Tp>& sz ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, sz.width); + write(fs, sz.height); +} + +template inline void write(FileStorage& fs, const string& name, const Complex<_Tp>& c ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, c.re); + write(fs, c.im); +} + +template inline void write(FileStorage& fs, const string& name, const Rect_<_Tp>& r ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, r.x); + write(fs, r.y); + write(fs, r.width); + write(fs, r.height); +} + +template inline void write(FileStorage& fs, const string& name, const Vec<_Tp, cn>& v ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + for(int i = 0; i < cn; i++) + write(fs, v.val[i]); +} + +template inline void write(FileStorage& fs, const string& name, const Scalar_<_Tp>& s ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, s.val[0]); + write(fs, s.val[1]); + write(fs, s.val[2]); + write(fs, s.val[3]); +} + +inline void write(FileStorage& fs, const string& name, const Range& r ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, r.start); + write(fs, r.end); +} + +template class CV_EXPORTS VecWriterProxy +{ +public: + VecWriterProxy( FileStorage* _fs ) : fs(_fs) {} + void operator()(const vector<_Tp>& vec) const + { + size_t i, count = vec.size(); + for( i = 0; i < count; i++ ) + write( *fs, vec[i] ); + } + FileStorage* fs; +}; + +template class CV_EXPORTS VecWriterProxy<_Tp,1> +{ +public: + VecWriterProxy( FileStorage* _fs ) : fs(_fs) {} + void operator()(const vector<_Tp>& vec) const + { + int _fmt = DataType<_Tp>::fmt; + char fmt[] = { (char)((_fmt>>8)+'1'), (char)_fmt, '\0' }; + fs->writeRaw( string(fmt), (uchar*)&vec[0], vec.size()*sizeof(_Tp) ); + } + FileStorage* fs; +}; + + +template static inline void write( FileStorage& fs, const vector<_Tp>& vec ) +{ + VecWriterProxy<_Tp, DataType<_Tp>::fmt != 0> w(&fs); + w(vec); +} + +template static inline FileStorage& +operator << ( FileStorage& fs, const vector<_Tp>& vec ) +{ + VecWriterProxy<_Tp, DataType<_Tp>::generic_type == 0> w(&fs); + w(vec); + return fs; +} + +CV_EXPORTS_W void write( FileStorage& fs, const string& name, const Mat& value ); +CV_EXPORTS void write( FileStorage& fs, const string& name, const SparseMat& value ); + +template static inline FileStorage& operator << (FileStorage& fs, const _Tp& value) +{ + if( !fs.isOpened() ) + return fs; + if( fs.state == FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP ) + CV_Error( CV_StsError, "No element name has been given" ); + write( fs, fs.elname, value ); + if( fs.state & FileStorage::INSIDE_MAP ) + fs.state = FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP; + return fs; +} + +CV_EXPORTS FileStorage& operator << (FileStorage& fs, const string& str); + +static inline FileStorage& operator << (FileStorage& fs, const char* str) +{ return (fs << string(str)); } + +inline FileNode::FileNode() : fs(0), node(0) {} +inline FileNode::FileNode(const CvFileStorage* _fs, const CvFileNode* _node) + : fs(_fs), node(_node) {} + +inline FileNode::FileNode(const FileNode& _node) : fs(_node.fs), node(_node.node) {} + +inline int FileNode::type() const { return !node ? NONE : (node->tag & TYPE_MASK); } +inline bool FileNode::empty() const { return node == 0; } +inline bool FileNode::isNone() const { return type() == NONE; } +inline bool FileNode::isSeq() const { return type() == SEQ; } +inline bool FileNode::isMap() const { return type() == MAP; } +inline bool FileNode::isInt() const { return type() == INT; } +inline bool FileNode::isReal() const { return type() == REAL; } +inline bool FileNode::isString() const { return type() == STR; } +inline bool FileNode::isNamed() const { return !node ? false : (node->tag & NAMED) != 0; } +inline size_t FileNode::size() const +{ + int t = type(); + return t == MAP ? ((CvSet*)node->data.map)->active_count : + t == SEQ ? node->data.seq->total : node != 0; +} + +inline CvFileNode* FileNode::operator *() { return (CvFileNode*)node; } +inline const CvFileNode* FileNode::operator* () const { return node; } + +static inline void read(const FileNode& node, int& value, int default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? cvRound(node.node->data.f) : 0x7fffffff; +} + +static inline void read(const FileNode& node, bool& value, bool default_value) +{ + int temp; read(node, temp, (int)default_value); + value = temp != 0; +} + +static inline void read(const FileNode& node, uchar& value, uchar default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, schar& value, schar default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, ushort& value, ushort default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, short& value, short default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, float& value, float default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? (float)node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? (float)node.node->data.f : 1e30f; +} + +static inline void read(const FileNode& node, double& value, double default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? (double)node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? node.node->data.f : 1e300; +} + +static inline void read(const FileNode& node, string& value, const string& default_value) +{ + value = !node.node ? default_value : CV_NODE_IS_STRING(node.node->tag) ? string(node.node->data.str.ptr) : string(""); +} + +CV_EXPORTS_W void read(const FileNode& node, Mat& mat, const Mat& default_mat=Mat() ); +CV_EXPORTS void read(const FileNode& node, SparseMat& mat, const SparseMat& default_mat=SparseMat() ); + +inline FileNode::operator int() const +{ + int value; + read(*this, value, 0); + return value; +} +inline FileNode::operator float() const +{ + float value; + read(*this, value, 0.f); + return value; +} +inline FileNode::operator double() const +{ + double value; + read(*this, value, 0.); + return value; +} +inline FileNode::operator string() const +{ + string value; + read(*this, value, value); + return value; +} + +inline void FileNode::readRaw( const string& fmt, uchar* vec, size_t len ) const +{ + begin().readRaw( fmt, vec, len ); +} + +template class CV_EXPORTS VecReaderProxy +{ +public: + VecReaderProxy( FileNodeIterator* _it ) : it(_it) {} + void operator()(vector<_Tp>& vec, size_t count) const + { + count = std::min(count, it->remaining); + vec.resize(count); + for( size_t i = 0; i < count; i++, ++(*it) ) + read(**it, vec[i], _Tp()); + } + FileNodeIterator* it; +}; + +template class CV_EXPORTS VecReaderProxy<_Tp,1> +{ +public: + VecReaderProxy( FileNodeIterator* _it ) : it(_it) {} + void operator()(vector<_Tp>& vec, size_t count) const + { + size_t remaining = it->remaining, cn = DataType<_Tp>::channels; + int _fmt = DataType<_Tp>::fmt; + char fmt[] = { (char)((_fmt>>8)+'1'), (char)_fmt, '\0' }; + size_t remaining1 = remaining/cn; + count = count < remaining1 ? count : remaining1; + vec.resize(count); + it->readRaw( string(fmt), (uchar*)&vec[0], count*sizeof(_Tp) ); + } + FileNodeIterator* it; +}; + +template static inline void +read( FileNodeIterator& it, vector<_Tp>& vec, size_t maxCount=(size_t)INT_MAX ) +{ + VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it); + r(vec, maxCount); +} + +template static inline void +read( FileNode& node, vector<_Tp>& vec, const vector<_Tp>& default_value=vector<_Tp>() ) +{ + read( node.begin(), vec ); +} + +inline FileNodeIterator FileNode::begin() const +{ + return FileNodeIterator(fs, node); +} + +inline FileNodeIterator FileNode::end() const +{ + return FileNodeIterator(fs, node, size()); +} + +inline FileNode FileNodeIterator::operator *() const +{ return FileNode(fs, (const CvFileNode*)reader.ptr); } + +inline FileNode FileNodeIterator::operator ->() const +{ return FileNode(fs, (const CvFileNode*)reader.ptr); } + +template static inline FileNodeIterator& operator >> (FileNodeIterator& it, _Tp& value) +{ read( *it, value, _Tp()); return ++it; } + +template static inline +FileNodeIterator& operator >> (FileNodeIterator& it, vector<_Tp>& vec) +{ + VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it); + r(vec, (size_t)INT_MAX); + return it; +} + +template static inline void operator >> (const FileNode& n, _Tp& value) +{ read( n, value, _Tp()); } + +template static inline void operator >> (const FileNode& n, vector<_Tp>& vec) +{ FileNodeIterator it = n.begin(); it >> vec; } + +static inline bool operator == (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it1.fs == it2.fs && it1.container == it2.container && + it1.reader.ptr == it2.reader.ptr && it1.remaining == it2.remaining; +} + +static inline bool operator != (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return !(it1 == it2); +} + +static inline ptrdiff_t operator - (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it2.remaining - it1.remaining; +} + +static inline bool operator < (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it1.remaining > it2.remaining; +} + +inline FileNode FileStorage::getFirstTopLevelNode() const +{ + FileNode r = root(); + FileNodeIterator it = r.begin(); + return it != r.end() ? *it : FileNode(); +} + +//////////////////////////////////////// Various algorithms //////////////////////////////////// + +template static inline _Tp gcd(_Tp a, _Tp b) +{ + if( a < b ) + std::swap(a, b); + while( b > 0 ) + { + _Tp r = a % b; + a = b; + b = r; + } + return a; +} + +/****************************************************************************************\ + + Generic implementation of QuickSort algorithm + Use it as: vector<_Tp> a; ... sort(a,); + + The current implementation was derived from *BSD system qsort(): + + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + +\****************************************************************************************/ + +template void sort( vector<_Tp>& vec, _LT LT=_LT() ) +{ + int isort_thresh = 7; + int sp = 0; + + struct + { + _Tp *lb; + _Tp *ub; + } stack[48]; + + size_t total = vec.size(); + + if( total <= 1 ) + return; + + _Tp* arr = &vec[0]; + stack[0].lb = arr; + stack[0].ub = arr + (total - 1); + + while( sp >= 0 ) + { + _Tp* left = stack[sp].lb; + _Tp* right = stack[sp--].ub; + + for(;;) + { + int i, n = (int)(right - left) + 1, m; + _Tp* ptr; + _Tp* ptr2; + + if( n <= isort_thresh ) + { + insert_sort: + for( ptr = left + 1; ptr <= right; ptr++ ) + { + for( ptr2 = ptr; ptr2 > left && LT(ptr2[0],ptr2[-1]); ptr2--) + std::swap( ptr2[0], ptr2[-1] ); + } + break; + } + else + { + _Tp* left0; + _Tp* left1; + _Tp* right0; + _Tp* right1; + _Tp* pivot; + _Tp* a; + _Tp* b; + _Tp* c; + int swap_cnt = 0; + + left0 = left; + right0 = right; + pivot = left + (n/2); + + if( n > 40 ) + { + int d = n / 8; + a = left, b = left + d, c = left + 2*d; + left = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + + a = pivot - d, b = pivot, c = pivot + d; + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + + a = right - 2*d, b = right - d, c = right; + right = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + } + + a = left, b = pivot, c = right; + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + if( pivot != left0 ) + { + std::swap( *pivot, *left0 ); + pivot = left0; + } + left = left1 = left0 + 1; + right = right1 = right0; + + for(;;) + { + while( left <= right && !LT(*pivot, *left) ) + { + if( !LT(*left, *pivot) ) + { + if( left > left1 ) + std::swap( *left1, *left ); + swap_cnt = 1; + left1++; + } + left++; + } + + while( left <= right && !LT(*right, *pivot) ) + { + if( !LT(*pivot, *right) ) + { + if( right < right1 ) + std::swap( *right1, *right ); + swap_cnt = 1; + right1--; + } + right--; + } + + if( left > right ) + break; + std::swap( *left, *right ); + swap_cnt = 1; + left++; + right--; + } + + if( swap_cnt == 0 ) + { + left = left0, right = right0; + goto insert_sort; + } + + n = std::min( (int)(left1 - left0), (int)(left - left1) ); + for( i = 0; i < n; i++ ) + std::swap( left0[i], left[i-n] ); + + n = std::min( (int)(right0 - right1), (int)(right1 - right) ); + for( i = 0; i < n; i++ ) + std::swap( left[i], right0[i-n+1] ); + n = (int)(left - left1); + m = (int)(right1 - right); + if( n > 1 ) + { + if( m > 1 ) + { + if( n > m ) + { + stack[++sp].lb = left0; + stack[sp].ub = left0 + n - 1; + left = right0 - m + 1, right = right0; + } + else + { + stack[++sp].lb = right0 - m + 1; + stack[sp].ub = right0; + left = left0, right = left0 + n - 1; + } + } + else + left = left0, right = left0 + n - 1; + } + else if( m > 1 ) + left = right0 - m + 1, right = right0; + else + break; + } + } + } +} + +template class CV_EXPORTS LessThan +{ +public: + bool operator()(const _Tp& a, const _Tp& b) const { return a < b; } +}; + +template class CV_EXPORTS GreaterEq +{ +public: + bool operator()(const _Tp& a, const _Tp& b) const { return a >= b; } +}; + +template class CV_EXPORTS LessThanIdx +{ +public: + LessThanIdx( const _Tp* _arr ) : arr(_arr) {} + bool operator()(int a, int b) const { return arr[a] < arr[b]; } + const _Tp* arr; +}; + +template class CV_EXPORTS GreaterEqIdx +{ +public: + GreaterEqIdx( const _Tp* _arr ) : arr(_arr) {} + bool operator()(int a, int b) const { return arr[a] >= arr[b]; } + const _Tp* arr; +}; + + +// This function splits the input sequence or set into one or more equivalence classes and +// returns the vector of labels - 0-based class indexes for each element. +// predicate(a,b) returns true if the two sequence elements certainly belong to the same class. +// +// The algorithm is described in "Introduction to Algorithms" +// by Cormen, Leiserson and Rivest, the chapter "Data structures for disjoint sets" +template int +partition( const vector<_Tp>& _vec, vector& labels, + _EqPredicate predicate=_EqPredicate()) +{ + int i, j, N = (int)_vec.size(); + const _Tp* vec = &_vec[0]; + + const int PARENT=0; + const int RANK=1; + + vector _nodes(N*2); + int (*nodes)[2] = (int(*)[2])&_nodes[0]; + + // The first O(N) pass: create N single-vertex trees + for(i = 0; i < N; i++) + { + nodes[i][PARENT]=-1; + nodes[i][RANK] = 0; + } + + // The main O(N^2) pass: merge connected components + for( i = 0; i < N; i++ ) + { + int root = i; + + // find root + while( nodes[root][PARENT] >= 0 ) + root = nodes[root][PARENT]; + + for( j = 0; j < N; j++ ) + { + if( i == j || !predicate(vec[i], vec[j])) + continue; + int root2 = j; + + while( nodes[root2][PARENT] >= 0 ) + root2 = nodes[root2][PARENT]; + + if( root2 != root ) + { + // unite both trees + int rank = nodes[root][RANK], rank2 = nodes[root2][RANK]; + if( rank > rank2 ) + nodes[root2][PARENT] = root; + else + { + nodes[root][PARENT] = root2; + nodes[root2][RANK] += rank == rank2; + root = root2; + } + assert( nodes[root][PARENT] < 0 ); + + int k = j, parent; + + // compress the path from node2 to root + while( (parent = nodes[k][PARENT]) >= 0 ) + { + nodes[k][PARENT] = root; + k = parent; + } + + // compress the path from node to root + k = i; + while( (parent = nodes[k][PARENT]) >= 0 ) + { + nodes[k][PARENT] = root; + k = parent; + } + } + } + } + + // Final O(N) pass: enumerate classes + labels.resize(N); + int nclasses = 0; + + for( i = 0; i < N; i++ ) + { + int root = i; + while( nodes[root][PARENT] >= 0 ) + root = nodes[root][PARENT]; + // re-use the rank as the class label + if( nodes[root][RANK] >= 0 ) + nodes[root][RANK] = ~nclasses++; + labels[i] = ~nodes[root][RANK]; + } + + return nclasses; +} + + +////////////////////////////////////////////////////////////////////////////// + +// bridge C++ => C Seq API +CV_EXPORTS schar* seqPush( CvSeq* seq, const void* element=0); +CV_EXPORTS schar* seqPushFront( CvSeq* seq, const void* element=0); +CV_EXPORTS void seqPop( CvSeq* seq, void* element=0); +CV_EXPORTS void seqPopFront( CvSeq* seq, void* element=0); +CV_EXPORTS void seqPopMulti( CvSeq* seq, void* elements, + int count, int in_front=0 ); +CV_EXPORTS void seqRemove( CvSeq* seq, int index ); +CV_EXPORTS void clearSeq( CvSeq* seq ); +CV_EXPORTS schar* getSeqElem( const CvSeq* seq, int index ); +CV_EXPORTS void seqRemoveSlice( CvSeq* seq, CvSlice slice ); +CV_EXPORTS void seqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr ); + +template inline Seq<_Tp>::Seq() : seq(0) {} +template inline Seq<_Tp>::Seq( const CvSeq* _seq ) : seq((CvSeq*)_seq) +{ + CV_Assert(!_seq || _seq->elem_size == sizeof(_Tp)); +} + +template inline Seq<_Tp>::Seq( MemStorage& storage, + int headerSize ) +{ + CV_Assert(headerSize >= (int)sizeof(CvSeq)); + seq = cvCreateSeq(DataType<_Tp>::type, headerSize, sizeof(_Tp), storage); +} + +template inline _Tp& Seq<_Tp>::operator [](int idx) +{ return *(_Tp*)getSeqElem(seq, idx); } + +template inline const _Tp& Seq<_Tp>::operator [](int idx) const +{ return *(_Tp*)getSeqElem(seq, idx); } + +template inline SeqIterator<_Tp> Seq<_Tp>::begin() const +{ return SeqIterator<_Tp>(*this); } + +template inline SeqIterator<_Tp> Seq<_Tp>::end() const +{ return SeqIterator<_Tp>(*this, true); } + +template inline size_t Seq<_Tp>::size() const +{ return seq ? seq->total : 0; } + +template inline int Seq<_Tp>::type() const +{ return seq ? CV_MAT_TYPE(seq->flags) : 0; } + +template inline int Seq<_Tp>::depth() const +{ return seq ? CV_MAT_DEPTH(seq->flags) : 0; } + +template inline int Seq<_Tp>::channels() const +{ return seq ? CV_MAT_CN(seq->flags) : 0; } + +template inline size_t Seq<_Tp>::elemSize() const +{ return seq ? seq->elem_size : 0; } + +template inline size_t Seq<_Tp>::index(const _Tp& elem) const +{ return cvSeqElemIdx(seq, &elem); } + +template inline void Seq<_Tp>::push_back(const _Tp& elem) +{ cvSeqPush(seq, &elem); } + +template inline void Seq<_Tp>::push_front(const _Tp& elem) +{ cvSeqPushFront(seq, &elem); } + +template inline void Seq<_Tp>::push_back(const _Tp* elem, size_t count) +{ cvSeqPushMulti(seq, elem, (int)count, 0); } + +template inline void Seq<_Tp>::push_front(const _Tp* elem, size_t count) +{ cvSeqPushMulti(seq, elem, (int)count, 1); } + +template inline _Tp& Seq<_Tp>::back() +{ return *(_Tp*)getSeqElem(seq, -1); } + +template inline const _Tp& Seq<_Tp>::back() const +{ return *(const _Tp*)getSeqElem(seq, -1); } + +template inline _Tp& Seq<_Tp>::front() +{ return *(_Tp*)getSeqElem(seq, 0); } + +template inline const _Tp& Seq<_Tp>::front() const +{ return *(const _Tp*)getSeqElem(seq, 0); } + +template inline bool Seq<_Tp>::empty() const +{ return !seq || seq->total == 0; } + +template inline void Seq<_Tp>::clear() +{ if(seq) clearSeq(seq); } + +template inline void Seq<_Tp>::pop_back() +{ seqPop(seq); } + +template inline void Seq<_Tp>::pop_front() +{ seqPopFront(seq); } + +template inline void Seq<_Tp>::pop_back(_Tp* elem, size_t count) +{ seqPopMulti(seq, elem, (int)count, 0); } + +template inline void Seq<_Tp>::pop_front(_Tp* elem, size_t count) +{ seqPopMulti(seq, elem, (int)count, 1); } + +template inline void Seq<_Tp>::insert(int idx, const _Tp& elem) +{ seqInsert(seq, idx, &elem); } + +template inline void Seq<_Tp>::insert(int idx, const _Tp* elems, size_t count) +{ + CvMat m = cvMat(1, count, DataType<_Tp>::type, elems); + seqInsertSlice(seq, idx, &m); +} + +template inline void Seq<_Tp>::remove(int idx) +{ seqRemove(seq, idx); } + +template inline void Seq<_Tp>::remove(const Range& r) +{ seqRemoveSlice(seq, r); } + +template inline void Seq<_Tp>::copyTo(vector<_Tp>& vec, const Range& range) const +{ + size_t len = !seq ? 0 : range == Range::all() ? seq->total : range.end - range.start; + vec.resize(len); + if( seq && len ) + cvCvtSeqToArray(seq, &vec[0], range); +} + +template inline Seq<_Tp>::operator vector<_Tp>() const +{ + vector<_Tp> vec; + copyTo(vec); + return vec; +} + +template inline SeqIterator<_Tp>::SeqIterator() +{ memset(this, 0, sizeof(*this)); } + +template inline SeqIterator<_Tp>::SeqIterator(const Seq<_Tp>& seq, bool seekEnd) +{ + cvStartReadSeq(seq.seq, this); + index = seekEnd ? seq.seq->total : 0; +} + +template inline void SeqIterator<_Tp>::seek(size_t pos) +{ + cvSetSeqReaderPos(this, (int)pos, false); + index = pos; +} + +template inline size_t SeqIterator<_Tp>::tell() const +{ return index; } + +template inline _Tp& SeqIterator<_Tp>::operator *() +{ return *(_Tp*)ptr; } + +template inline const _Tp& SeqIterator<_Tp>::operator *() const +{ return *(const _Tp*)ptr; } + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator ++() +{ + CV_NEXT_SEQ_ELEM(sizeof(_Tp), *this); + if( ++index >= seq->total*2 ) + index = 0; + return *this; +} + +template inline SeqIterator<_Tp> SeqIterator<_Tp>::operator ++(int) const +{ + SeqIterator<_Tp> it = *this; + ++*this; + return it; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator --() +{ + CV_PREV_SEQ_ELEM(sizeof(_Tp), *this); + if( --index < 0 ) + index = seq->total*2-1; + return *this; +} + +template inline SeqIterator<_Tp> SeqIterator<_Tp>::operator --(int) const +{ + SeqIterator<_Tp> it = *this; + --*this; + return it; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator +=(int delta) +{ + cvSetSeqReaderPos(this, delta, 1); + index += delta; + int n = seq->total*2; + if( index < 0 ) + index += n; + if( index >= n ) + index -= n; + return *this; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator -=(int delta) +{ + return (*this += -delta); +} + +template inline ptrdiff_t operator - (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + ptrdiff_t delta = a.index - b.index, n = a.seq->total; + if( std::abs(static_cast(delta)) > n ) + delta += delta < 0 ? n : -n; + return delta; +} + +template inline bool operator == (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + return a.seq == b.seq && a.index == b.index; +} + +template inline bool operator != (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + return !(a == b); +} + + +template struct CV_EXPORTS RTTIImpl +{ +public: + static int isInstance(const void* ptr) + { + static _ClsName dummy; + union + { + const void* p; + const void** pp; + } a, b; + a.p = &dummy; + b.p = ptr; + return *a.pp == *b.pp; + } + static void release(void** dbptr) + { + if(dbptr && *dbptr) + { + delete (_ClsName*)*dbptr; + *dbptr = 0; + } + } + static void* read(CvFileStorage* fs, CvFileNode* n) + { + FileNode fn(fs, n); + _ClsName* obj = new _ClsName; + if(obj->read(fn)) + return obj; + delete obj; + return 0; + } + + static void write(CvFileStorage* _fs, const char* name, const void* ptr, CvAttrList) + { + if(ptr && _fs) + { + FileStorage fs(_fs); + fs.fs.addref(); + ((const _ClsName*)ptr)->write(fs, string(name)); + } + } + + static void* clone(const void* ptr) + { + if(!ptr) + return 0; + return new _ClsName(*(const _ClsName*)ptr); + } +}; + + +class CV_EXPORTS Formatter +{ +public: + virtual ~Formatter() {} + virtual void write(std::ostream& out, const Mat& m, const int* params=0, int nparams=0) const = 0; + virtual void write(std::ostream& out, const void* data, int nelems, int type, + const int* params=0, int nparams=0) const = 0; + static const Formatter* get(const char* fmt=""); + static const Formatter* setDefault(const Formatter* fmt); +}; + + +struct CV_EXPORTS Formatted +{ + Formatted(const Mat& m, const Formatter* fmt, + const vector& params); + Formatted(const Mat& m, const Formatter* fmt, + const int* params=0); + Mat mtx; + const Formatter* fmt; + vector params; +}; + + +/** Writes a point to an output stream in Matlab notation + */ +template inline std::ostream& operator<<(std::ostream& out, const Point_<_Tp>& p) +{ + out << "[" << p.x << ", " << p.y << "]"; + return out; +} + +/** Writes a point to an output stream in Matlab notation + */ +template inline std::ostream& operator<<(std::ostream& out, const Point3_<_Tp>& p) +{ + out << "[" << p.x << ", " << p.y << ", " << p.z << "]"; + return out; +} + +static inline Formatted format(const Mat& mtx, const char* fmt, + const vector& params=vector()) +{ + return Formatted(mtx, Formatter::get(fmt), params); +} + +template static inline Formatted format(const vector >& vec, + const char* fmt, const vector& params=vector()) +{ + return Formatted(Mat(vec), Formatter::get(fmt), params); +} + +template static inline Formatted format(const vector >& vec, + const char* fmt, const vector& params=vector()) +{ + return Formatted(Mat(vec), Formatter::get(fmt), params); +} + +/** \brief prints Mat to the output stream in Matlab notation + * use like + @verbatim + Mat my_mat = Mat::eye(3,3,CV_32F); + std::cout << my_mat; + @endverbatim + */ +static inline std::ostream& operator << (std::ostream& out, const Mat& mtx) +{ + Formatter::get()->write(out, mtx); + return out; +} + +/** \brief prints Mat to the output stream allows in the specified notation (see format) + * use like + @verbatim + Mat my_mat = Mat::eye(3,3,CV_32F); + std::cout << my_mat; + @endverbatim + */ +static inline std::ostream& operator << (std::ostream& out, const Formatted& fmtd) +{ + fmtd.fmt->write(out, fmtd.mtx); + return out; +} + + +template static inline std::ostream& operator << (std::ostream& out, + const vector >& vec) +{ + Formatter::get()->write(out, Mat(vec)); + return out; +} + + +template static inline std::ostream& operator << (std::ostream& out, + const vector >& vec) +{ + Formatter::get()->write(out, Mat(vec)); + return out; +} + +/*template struct AlgorithmParamType {}; +template<> struct AlgorithmParamType { enum { type = CV_PARAM_TYPE_INT }; }; +template<> struct AlgorithmParamType { enum { type = CV_PARAM_TYPE_REAL }; }; +template<> struct AlgorithmParamType { enum { type = CV_PARAM_TYPE_STRING }; }; +template<> struct AlgorithmParamType { enum { type = CV_PARAM_TYPE_MAT }; }; + +template _Tp Algorithm::get(int paramId) const +{ + _Tp value = _Tp(); + get_(paramId, AlgorithmParamType<_Tp>::type, &value); + return value; +} + +template bool Algorithm::set(int paramId, const _Tp& value) +{ + set_(paramId, AlgorithmParamType<_Tp>::type, &value); + return value; +} + +template _Tp Algorithm::paramDefaultValue(int paramId) const +{ + _Tp value = _Tp(); + paramDefaultValue_(paramId, AlgorithmParamType<_Tp>::type, &value); + return value; +} + +template bool Algorithm::paramRange(int paramId, _Tp& minVal, _Tp& maxVal) const +{ + return paramRange_(paramId, AlgorithmParamType<_Tp>::type, &minVal, &maxVal); +} + +template void Algorithm::addParam(int propId, _Tp& value, bool readOnly, const string& name, + const string& help, const _Tp& defaultValue, + _Tp (Algorithm::*getter)(), bool (Algorithm::*setter)(const _Tp&)) +{ + addParam_(propId, AlgorithmParamType<_Tp>::type, &value, readOnly, name, help, &defaultValue, + (void*)getter, (void*)setter); +} + +template void Algorithm::setParamRange(int propId, const _Tp& minVal, const _Tp& maxVal) +{ + setParamRange_(propId, AlgorithmParamType<_Tp>::type, &minVal, &maxVal); +}*/ + +} + +#endif // __cplusplus +#endif diff --git a/opencv/core/opencv2/core/types_c.h b/opencv/core/opencv2/core/types_c.h new file mode 100644 index 0000000..f0d4213 --- /dev/null +++ b/opencv/core/opencv2/core/types_c.h @@ -0,0 +1,1875 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_TYPES_H__ +#define __OPENCV_CORE_TYPES_H__ + +#if !defined _CRT_SECURE_NO_DEPRECATE && _MSC_VER > 1300 +#define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio 2005 warnings */ +#endif + + +#ifndef SKIP_INCLUDES + #include + #include + #include + #include + +#if !defined _MSC_VER && !defined __BORLANDC__ + #include +#endif + + #if defined __ICL + #define CV_ICC __ICL + #elif defined __ICC + #define CV_ICC __ICC + #elif defined __ECL + #define CV_ICC __ECL + #elif defined __ECC + #define CV_ICC __ECC + #elif defined __INTEL_COMPILER + #define CV_ICC __INTEL_COMPILER + #endif + + #if (_MSC_VER >= 1400 && defined _M_X64) || (__GNUC__ >= 4 && defined __x86_64__) + #if defined WIN32 + #include + #endif + #include + #endif + + #if defined __BORLANDC__ + #include + #else + #include + #endif + + #ifdef HAVE_IPL + #ifndef __IPL_H__ + #if defined WIN32 || defined _WIN32 + #include + #else + #include + #endif + #endif + #elif defined __IPL_H__ + #define HAVE_IPL + #endif +#endif // SKIP_INCLUDES + +#if defined WIN32 || defined _WIN32 + #define CV_CDECL __cdecl + #define CV_STDCALL __stdcall +#else + #define CV_CDECL + #define CV_STDCALL +#endif + +#ifndef CV_EXTERN_C + #ifdef __cplusplus + #define CV_EXTERN_C extern "C" + #define CV_DEFAULT(val) = val + #else + #define CV_EXTERN_C + #define CV_DEFAULT(val) + #endif +#endif + +#ifndef CV_EXTERN_C_FUNCPTR + #ifdef __cplusplus + #define CV_EXTERN_C_FUNCPTR(x) extern "C" { typedef x; } + #else + #define CV_EXTERN_C_FUNCPTR(x) typedef x + #endif +#endif + +#ifndef CV_INLINE +#if defined __cplusplus + #define CV_INLINE inline +#elif (defined WIN32 || defined _WIN32 || defined WINCE) && !defined __GNUC__ + #define CV_INLINE __inline +#else + #define CV_INLINE static +#endif +#endif /* CV_INLINE */ + +#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS + #define CV_EXPORTS __declspec(dllexport) +#else + #define CV_EXPORTS +#endif + +#ifndef CVAPI + #define CVAPI(rettype) CV_EXTERN_C CV_EXPORTS rettype CV_CDECL +#endif + +#if defined _MSC_VER || defined __BORLANDC__ +typedef __int64 int64; +typedef unsigned __int64 uint64; +#define CV_BIG_INT(n) n##I64 +#define CV_BIG_UINT(n) n##UI64 +#else +typedef int64_t int64; +typedef uint64_t uint64; +#define CV_BIG_INT(n) n##LL +#define CV_BIG_UINT(n) n##ULL +#endif + +#ifndef HAVE_IPL +typedef unsigned char uchar; +typedef unsigned short ushort; +#endif + +typedef signed char schar; + +/* special informative macros for wrapper generators */ +#define CV_CARRAY(counter) +#define CV_CUSTOM_CARRAY(args) +#define CV_EXPORTS_W CV_EXPORTS +#define CV_EXPORTS_W_SIMPLE CV_EXPORTS +#define CV_EXPORTS_AS(synonym) CV_EXPORTS +#define CV_EXPORTS_W_MAP CV_EXPORTS +#define CV_IN_OUT +#define CV_OUT +#define CV_PROP +#define CV_PROP_RW +#define CV_WRAP +#define CV_WRAP_AS(synonym) +#define CV_WRAP_DEFAULT(value) + +/* CvArr* is used to pass arbitrary + * array-like data structures + * into functions where the particular + * array type is recognized at runtime: + */ +typedef void CvArr; + +typedef union Cv32suf +{ + int i; + unsigned u; + float f; +} +Cv32suf; + +typedef union Cv64suf +{ + int64 i; + uint64 u; + double f; +} +Cv64suf; + +typedef int CVStatus; + +enum { + CV_StsOk= 0, /* everithing is ok */ + CV_StsBackTrace= -1, /* pseudo error for back trace */ + CV_StsError= -2, /* unknown /unspecified error */ + CV_StsInternal= -3, /* internal error (bad state) */ + CV_StsNoMem= -4, /* insufficient memory */ + CV_StsBadArg= -5, /* function arg/param is bad */ + CV_StsBadFunc= -6, /* unsupported function */ + CV_StsNoConv= -7, /* iter. didn't converge */ + CV_StsAutoTrace= -8, /* tracing */ + CV_HeaderIsNull= -9, /* image header is NULL */ + CV_BadImageSize= -10, /* image size is invalid */ + CV_BadOffset= -11, /* offset is invalid */ + CV_BadDataPtr= -12, /**/ + CV_BadStep= -13, /**/ + CV_BadModelOrChSeq= -14, /**/ + CV_BadNumChannels= -15, /**/ + CV_BadNumChannel1U= -16, /**/ + CV_BadDepth= -17, /**/ + CV_BadAlphaChannel= -18, /**/ + CV_BadOrder= -19, /**/ + CV_BadOrigin= -20, /**/ + CV_BadAlign= -21, /**/ + CV_BadCallBack= -22, /**/ + CV_BadTileSize= -23, /**/ + CV_BadCOI= -24, /**/ + CV_BadROISize= -25, /**/ + CV_MaskIsTiled= -26, /**/ + CV_StsNullPtr= -27, /* null pointer */ + CV_StsVecLengthErr= -28, /* incorrect vector length */ + CV_StsFilterStructContentErr= -29, /* incorr. filter structure content */ + CV_StsKernelStructContentErr= -30, /* incorr. transform kernel content */ + CV_StsFilterOffsetErr= -31, /* incorrect filter ofset value */ + CV_StsBadSize= -201, /* the input/output structure size is incorrect */ + CV_StsDivByZero= -202, /* division by zero */ + CV_StsInplaceNotSupported= -203, /* in-place operation is not supported */ + CV_StsObjectNotFound= -204, /* request can't be completed */ + CV_StsUnmatchedFormats= -205, /* formats of input/output arrays differ */ + CV_StsBadFlag= -206, /* flag is wrong or not supported */ + CV_StsBadPoint= -207, /* bad CvPoint */ + CV_StsBadMask= -208, /* bad format of mask (neither 8uC1 nor 8sC1)*/ + CV_StsUnmatchedSizes= -209, /* sizes of input/output structures do not match */ + CV_StsUnsupportedFormat= -210, /* the data format/type is not supported by the function*/ + CV_StsOutOfRange= -211, /* some of parameters are out of range */ + CV_StsParseError= -212, /* invalid syntax/structure of the parsed file */ + CV_StsNotImplemented= -213, /* the requested function/feature is not implemented */ + CV_StsBadMemBlock= -214, /* an allocated block has been corrupted */ + CV_StsAssert= -215, /* assertion failed */ + CV_GpuNotSupported= -216, + CV_GpuApiCallError= -217, + CV_GpuNppCallError= -218, + CV_GpuCufftCallError= -219 +}; + +/****************************************************************************************\ +* Common macros and inline functions * +\****************************************************************************************/ + +#define CV_PI 3.1415926535897932384626433832795 +#define CV_LOG2 0.69314718055994530941723212145818 + +#define CV_SWAP(a,b,t) ((t) = (a), (a) = (b), (b) = (t)) + +#ifndef MIN +#define MIN(a,b) ((a) > (b) ? (b) : (a)) +#endif + +#ifndef MAX +#define MAX(a,b) ((a) < (b) ? (b) : (a)) +#endif + +/* min & max without jumps */ +#define CV_IMIN(a, b) ((a) ^ (((a)^(b)) & (((a) < (b)) - 1))) + +#define CV_IMAX(a, b) ((a) ^ (((a)^(b)) & (((a) > (b)) - 1))) + +/* absolute value without jumps */ +#ifndef __cplusplus +#define CV_IABS(a) (((a) ^ ((a) < 0 ? -1 : 0)) - ((a) < 0 ? -1 : 0)) +#else +#define CV_IABS(a) abs(a) +#endif +#define CV_CMP(a,b) (((a) > (b)) - ((a) < (b))) +#define CV_SIGN(a) CV_CMP((a),0) + +CV_INLINE int cvRound( double value ) +{ +#if (defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__ && !defined __APPLE__) + __m128d t = _mm_set_sd( value ); + return _mm_cvtsd_si32(t); +#elif defined _MSC_VER && defined _M_IX86 + int t; + __asm + { + fld value; + fistp t; + } + return t; +#elif defined HAVE_LRINT || defined CV_ICC || defined __XXGNUC__ + return (int)lrint(value); +#else + // while this is not IEEE754-compliant rounding, it's usually a good enough approximation + return (int)(value + (value >= 0 ? 0.5 : -0.5)); +#endif +} + + +CV_INLINE int cvFloor( double value ) +{ +#ifdef __GNUC__ + int i = (int)value; + return i - (i > value); +#elif defined _MSC_VER && defined _M_X64 + __m128d t = _mm_set_sd( value ); + int i = _mm_cvtsd_si32(t); + return i - _mm_movemask_pd(_mm_cmplt_sd(t, _mm_cvtsi32_sd(t,i))); +#else + int i = cvRound(value); + Cv32suf diff; + diff.f = (float)(value - i); + return i - (diff.i < 0); +#endif +} + + +CV_INLINE int cvCeil( double value ) +{ +#ifdef __GNUC__ + int i = (int)value; + return i + (i < value); +#elif defined _MSC_VER && defined _M_X64 + __m128d t = _mm_set_sd( value ); + int i = _mm_cvtsd_si32(t); + return i + _mm_movemask_pd(_mm_cmplt_sd(_mm_cvtsi32_sd(t,i), t)); +#else + int i = cvRound(value); + Cv32suf diff; + diff.f = (float)(i - value); + return i + (diff.i < 0); +#endif +} + +#define cvInvSqrt(value) ((float)(1./sqrt(value))) +#define cvSqrt(value) ((float)sqrt(value)) + +CV_INLINE int cvIsNaN( double value ) +{ +#if 1/*defined _MSC_VER || defined __BORLANDC__ + return _isnan(value); +#elif defined __GNUC__ + return isnan(value); +#else*/ + Cv64suf ieee754; + ieee754.f = value; + return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) + + ((unsigned)ieee754.u != 0) > 0x7ff00000; +#endif +} + + +CV_INLINE int cvIsInf( double value ) +{ +#if 1/*defined _MSC_VER || defined __BORLANDC__ + return !_finite(value); +#elif defined __GNUC__ + return isinf(value); +#else*/ + Cv64suf ieee754; + ieee754.f = value; + return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) == 0x7ff00000 && + (unsigned)ieee754.u == 0; +#endif +} + + +/*************** Random number generation *******************/ + +typedef uint64 CvRNG; + +#define CV_RNG_COEFF 4164903690U + +CV_INLINE CvRNG cvRNG( int64 seed CV_DEFAULT(-1)) +{ + CvRNG rng = seed ? (uint64)seed : (uint64)(int64)-1; + return rng; +} + +/* Return random 32-bit unsigned integer: */ +CV_INLINE unsigned cvRandInt( CvRNG* rng ) +{ + uint64 temp = *rng; + temp = (uint64)(unsigned)temp*CV_RNG_COEFF + (temp >> 32); + *rng = temp; + return (unsigned)temp; +} + +/* Returns random floating-point number between 0 and 1: */ +CV_INLINE double cvRandReal( CvRNG* rng ) +{ + return cvRandInt(rng)*2.3283064365386962890625e-10 /* 2^-32 */; +} + +/****************************************************************************************\ +* Image type (IplImage) * +\****************************************************************************************/ + +#ifndef HAVE_IPL + +/* + * The following definitions (until #endif) + * is an extract from IPL headers. + * Copyright (c) 1995 Intel Corporation. + */ +#define IPL_DEPTH_SIGN 0x80000000 + +#define IPL_DEPTH_1U 1 +#define IPL_DEPTH_8U 8 +#define IPL_DEPTH_16U 16 +#define IPL_DEPTH_32F 32 + +#define IPL_DEPTH_8S (IPL_DEPTH_SIGN| 8) +#define IPL_DEPTH_16S (IPL_DEPTH_SIGN|16) +#define IPL_DEPTH_32S (IPL_DEPTH_SIGN|32) + +#define IPL_DATA_ORDER_PIXEL 0 +#define IPL_DATA_ORDER_PLANE 1 + +#define IPL_ORIGIN_TL 0 +#define IPL_ORIGIN_BL 1 + +#define IPL_ALIGN_4BYTES 4 +#define IPL_ALIGN_8BYTES 8 +#define IPL_ALIGN_16BYTES 16 +#define IPL_ALIGN_32BYTES 32 + +#define IPL_ALIGN_DWORD IPL_ALIGN_4BYTES +#define IPL_ALIGN_QWORD IPL_ALIGN_8BYTES + +#define IPL_BORDER_CONSTANT 0 +#define IPL_BORDER_REPLICATE 1 +#define IPL_BORDER_REFLECT 2 +#define IPL_BORDER_WRAP 3 + +typedef struct _IplImage +{ + int nSize; /* sizeof(IplImage) */ + int ID; /* version (=0)*/ + int nChannels; /* Most of OpenCV functions support 1,2,3 or 4 channels */ + int alphaChannel; /* Ignored by OpenCV */ + int depth; /* Pixel depth in bits: IPL_DEPTH_8U, IPL_DEPTH_8S, IPL_DEPTH_16S, + IPL_DEPTH_32S, IPL_DEPTH_32F and IPL_DEPTH_64F are supported. */ + char colorModel[4]; /* Ignored by OpenCV */ + char channelSeq[4]; /* ditto */ + int dataOrder; /* 0 - interleaved color channels, 1 - separate color channels. + cvCreateImage can only create interleaved images */ + int origin; /* 0 - top-left origin, + 1 - bottom-left origin (Windows bitmaps style). */ + int align; /* Alignment of image rows (4 or 8). + OpenCV ignores it and uses widthStep instead. */ + int width; /* Image width in pixels. */ + int height; /* Image height in pixels. */ + struct _IplROI *roi; /* Image ROI. If NULL, the whole image is selected. */ + struct _IplImage *maskROI; /* Must be NULL. */ + void *imageId; /* " " */ + struct _IplTileInfo *tileInfo; /* " " */ + int imageSize; /* Image data size in bytes + (==image->height*image->widthStep + in case of interleaved data)*/ + char *imageData; /* Pointer to aligned image data. */ + int widthStep; /* Size of aligned image row in bytes. */ + int BorderMode[4]; /* Ignored by OpenCV. */ + int BorderConst[4]; /* Ditto. */ + char *imageDataOrigin; /* Pointer to very origin of image data + (not necessarily aligned) - + needed for correct deallocation */ +} +IplImage; + +typedef struct _IplTileInfo IplTileInfo; + +typedef struct _IplROI +{ + int coi; /* 0 - no COI (all channels are selected), 1 - 0th channel is selected ...*/ + int xOffset; + int yOffset; + int width; + int height; +} +IplROI; + +typedef struct _IplConvKernel +{ + int nCols; + int nRows; + int anchorX; + int anchorY; + int *values; + int nShiftR; +} +IplConvKernel; + +typedef struct _IplConvKernelFP +{ + int nCols; + int nRows; + int anchorX; + int anchorY; + float *values; +} +IplConvKernelFP; + +#define IPL_IMAGE_HEADER 1 +#define IPL_IMAGE_DATA 2 +#define IPL_IMAGE_ROI 4 + +#endif/*HAVE_IPL*/ + +/* extra border mode */ +#define IPL_BORDER_REFLECT_101 4 +#define IPL_BORDER_TRANSPARENT 5 + +#define IPL_IMAGE_MAGIC_VAL ((int)sizeof(IplImage)) +#define CV_TYPE_NAME_IMAGE "opencv-image" + +#define CV_IS_IMAGE_HDR(img) \ + ((img) != NULL && ((const IplImage*)(img))->nSize == sizeof(IplImage)) + +#define CV_IS_IMAGE(img) \ + (CV_IS_IMAGE_HDR(img) && ((IplImage*)img)->imageData != NULL) + +/* for storing double-precision + floating point data in IplImage's */ +#define IPL_DEPTH_64F 64 + +/* get reference to pixel at (col,row), + for multi-channel images (col) should be multiplied by number of channels */ +#define CV_IMAGE_ELEM( image, elemtype, row, col ) \ + (((elemtype*)((image)->imageData + (image)->widthStep*(row)))[(col)]) + +/****************************************************************************************\ +* Matrix type (CvMat) * +\****************************************************************************************/ + +#define CV_CN_MAX 512 +#define CV_CN_SHIFT 3 +#define CV_DEPTH_MAX (1 << CV_CN_SHIFT) + +#define CV_8U 0 +#define CV_8S 1 +#define CV_16U 2 +#define CV_16S 3 +#define CV_32S 4 +#define CV_32F 5 +#define CV_64F 6 +#define CV_USRTYPE1 7 + +#define CV_MAT_DEPTH_MASK (CV_DEPTH_MAX - 1) +#define CV_MAT_DEPTH(flags) ((flags) & CV_MAT_DEPTH_MASK) + +#define CV_MAKETYPE(depth,cn) (CV_MAT_DEPTH(depth) + (((cn)-1) << CV_CN_SHIFT)) +#define CV_MAKE_TYPE CV_MAKETYPE + +#define CV_8UC1 CV_MAKETYPE(CV_8U,1) +#define CV_8UC2 CV_MAKETYPE(CV_8U,2) +#define CV_8UC3 CV_MAKETYPE(CV_8U,3) +#define CV_8UC4 CV_MAKETYPE(CV_8U,4) +#define CV_8UC(n) CV_MAKETYPE(CV_8U,(n)) + +#define CV_8SC1 CV_MAKETYPE(CV_8S,1) +#define CV_8SC2 CV_MAKETYPE(CV_8S,2) +#define CV_8SC3 CV_MAKETYPE(CV_8S,3) +#define CV_8SC4 CV_MAKETYPE(CV_8S,4) +#define CV_8SC(n) CV_MAKETYPE(CV_8S,(n)) + +#define CV_16UC1 CV_MAKETYPE(CV_16U,1) +#define CV_16UC2 CV_MAKETYPE(CV_16U,2) +#define CV_16UC3 CV_MAKETYPE(CV_16U,3) +#define CV_16UC4 CV_MAKETYPE(CV_16U,4) +#define CV_16UC(n) CV_MAKETYPE(CV_16U,(n)) + +#define CV_16SC1 CV_MAKETYPE(CV_16S,1) +#define CV_16SC2 CV_MAKETYPE(CV_16S,2) +#define CV_16SC3 CV_MAKETYPE(CV_16S,3) +#define CV_16SC4 CV_MAKETYPE(CV_16S,4) +#define CV_16SC(n) CV_MAKETYPE(CV_16S,(n)) + +#define CV_32SC1 CV_MAKETYPE(CV_32S,1) +#define CV_32SC2 CV_MAKETYPE(CV_32S,2) +#define CV_32SC3 CV_MAKETYPE(CV_32S,3) +#define CV_32SC4 CV_MAKETYPE(CV_32S,4) +#define CV_32SC(n) CV_MAKETYPE(CV_32S,(n)) + +#define CV_32FC1 CV_MAKETYPE(CV_32F,1) +#define CV_32FC2 CV_MAKETYPE(CV_32F,2) +#define CV_32FC3 CV_MAKETYPE(CV_32F,3) +#define CV_32FC4 CV_MAKETYPE(CV_32F,4) +#define CV_32FC(n) CV_MAKETYPE(CV_32F,(n)) + +#define CV_64FC1 CV_MAKETYPE(CV_64F,1) +#define CV_64FC2 CV_MAKETYPE(CV_64F,2) +#define CV_64FC3 CV_MAKETYPE(CV_64F,3) +#define CV_64FC4 CV_MAKETYPE(CV_64F,4) +#define CV_64FC(n) CV_MAKETYPE(CV_64F,(n)) + +#define CV_AUTO_STEP 0x7fffffff +#define CV_WHOLE_ARR cvSlice( 0, 0x3fffffff ) + +#define CV_MAT_CN_MASK ((CV_CN_MAX - 1) << CV_CN_SHIFT) +#define CV_MAT_CN(flags) ((((flags) & CV_MAT_CN_MASK) >> CV_CN_SHIFT) + 1) +#define CV_MAT_TYPE_MASK (CV_DEPTH_MAX*CV_CN_MAX - 1) +#define CV_MAT_TYPE(flags) ((flags) & CV_MAT_TYPE_MASK) +#define CV_MAT_CONT_FLAG_SHIFT 14 +#define CV_MAT_CONT_FLAG (1 << CV_MAT_CONT_FLAG_SHIFT) +#define CV_IS_MAT_CONT(flags) ((flags) & CV_MAT_CONT_FLAG) +#define CV_IS_CONT_MAT CV_IS_MAT_CONT +#define CV_SUBMAT_FLAG_SHIFT 15 +#define CV_SUBMAT_FLAG (1 << CV_SUBMAT_FLAG_SHIFT) +#define CV_IS_SUBMAT(flags) ((flags) & CV_MAT_SUBMAT_FLAG) + +#define CV_MAGIC_MASK 0xFFFF0000 +#define CV_MAT_MAGIC_VAL 0x42420000 +#define CV_TYPE_NAME_MAT "opencv-matrix" + +typedef struct CvMat +{ + int type; + int step; + + /* for internal use only */ + int* refcount; + int hdr_refcount; + + union + { + uchar* ptr; + short* s; + int* i; + float* fl; + double* db; + } data; + +#ifdef __cplusplus + union + { + int rows; + int height; + }; + + union + { + int cols; + int width; + }; +#else + int rows; + int cols; +#endif + +} +CvMat; + + +#define CV_IS_MAT_HDR(mat) \ + ((mat) != NULL && \ + (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \ + ((const CvMat*)(mat))->cols > 0 && ((const CvMat*)(mat))->rows > 0) + +#define CV_IS_MAT_HDR_Z(mat) \ + ((mat) != NULL && \ + (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \ + ((const CvMat*)(mat))->cols >= 0 && ((const CvMat*)(mat))->rows >= 0) + +#define CV_IS_MAT(mat) \ + (CV_IS_MAT_HDR(mat) && ((const CvMat*)(mat))->data.ptr != NULL) + +#define CV_IS_MASK_ARR(mat) \ + (((mat)->type & (CV_MAT_TYPE_MASK & ~CV_8SC1)) == 0) + +#define CV_ARE_TYPES_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_TYPE_MASK) == 0) + +#define CV_ARE_CNS_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_CN_MASK) == 0) + +#define CV_ARE_DEPTHS_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_DEPTH_MASK) == 0) + +#define CV_ARE_SIZES_EQ(mat1, mat2) \ + ((mat1)->rows == (mat2)->rows && (mat1)->cols == (mat2)->cols) + +#define CV_IS_MAT_CONST(mat) \ + (((mat)->rows|(mat)->cols) == 1) + +/* Size of each channel item, + 0x124489 = 1000 0100 0100 0010 0010 0001 0001 ~ array of sizeof(arr_type_elem) */ +#define CV_ELEM_SIZE1(type) \ + ((((sizeof(size_t)<<28)|0x8442211) >> CV_MAT_DEPTH(type)*4) & 15) + +/* 0x3a50 = 11 10 10 01 01 00 00 ~ array of log2(sizeof(arr_type_elem)) */ +#define CV_ELEM_SIZE(type) \ + (CV_MAT_CN(type) << ((((sizeof(size_t)/4+1)*16384|0x3a50) >> CV_MAT_DEPTH(type)*2) & 3)) + +#define IPL2CV_DEPTH(depth) \ + ((((CV_8U)+(CV_16U<<4)+(CV_32F<<8)+(CV_64F<<16)+(CV_8S<<20)+ \ + (CV_16S<<24)+(CV_32S<<28)) >> ((((depth) & 0xF0) >> 2) + \ + (((depth) & IPL_DEPTH_SIGN) ? 20 : 0))) & 15) + +/* Inline constructor. No data is allocated internally!!! + * (Use together with cvCreateData, or use cvCreateMat instead to + * get a matrix with allocated data): + */ +CV_INLINE CvMat cvMat( int rows, int cols, int type, void* data CV_DEFAULT(NULL)) +{ + CvMat m; + + assert( (unsigned)CV_MAT_DEPTH(type) <= CV_64F ); + type = CV_MAT_TYPE(type); + m.type = CV_MAT_MAGIC_VAL | CV_MAT_CONT_FLAG | type; + m.cols = cols; + m.rows = rows; + m.step = m.cols*CV_ELEM_SIZE(type); + m.data.ptr = (uchar*)data; + m.refcount = NULL; + m.hdr_refcount = 0; + + return m; +} + + +#define CV_MAT_ELEM_PTR_FAST( mat, row, col, pix_size ) \ + (assert( (unsigned)(row) < (unsigned)(mat).rows && \ + (unsigned)(col) < (unsigned)(mat).cols ), \ + (mat).data.ptr + (size_t)(mat).step*(row) + (pix_size)*(col)) + +#define CV_MAT_ELEM_PTR( mat, row, col ) \ + CV_MAT_ELEM_PTR_FAST( mat, row, col, CV_ELEM_SIZE((mat).type) ) + +#define CV_MAT_ELEM( mat, elemtype, row, col ) \ + (*(elemtype*)CV_MAT_ELEM_PTR_FAST( mat, row, col, sizeof(elemtype))) + + +CV_INLINE double cvmGet( const CvMat* mat, int row, int col ) +{ + int type; + + type = CV_MAT_TYPE(mat->type); + assert( (unsigned)row < (unsigned)mat->rows && + (unsigned)col < (unsigned)mat->cols ); + + if( type == CV_32FC1 ) + return ((float*)(mat->data.ptr + (size_t)mat->step*row))[col]; + else + { + assert( type == CV_64FC1 ); + return ((double*)(mat->data.ptr + (size_t)mat->step*row))[col]; + } +} + + +CV_INLINE void cvmSet( CvMat* mat, int row, int col, double value ) +{ + int type; + type = CV_MAT_TYPE(mat->type); + assert( (unsigned)row < (unsigned)mat->rows && + (unsigned)col < (unsigned)mat->cols ); + + if( type == CV_32FC1 ) + ((float*)(mat->data.ptr + (size_t)mat->step*row))[col] = (float)value; + else + { + assert( type == CV_64FC1 ); + ((double*)(mat->data.ptr + (size_t)mat->step*row))[col] = (double)value; + } +} + + +CV_INLINE int cvIplDepth( int type ) +{ + int depth = CV_MAT_DEPTH(type); + return CV_ELEM_SIZE1(depth)*8 | (depth == CV_8S || depth == CV_16S || + depth == CV_32S ? IPL_DEPTH_SIGN : 0); +} + + +/****************************************************************************************\ +* Multi-dimensional dense array (CvMatND) * +\****************************************************************************************/ + +#define CV_MATND_MAGIC_VAL 0x42430000 +#define CV_TYPE_NAME_MATND "opencv-nd-matrix" + +#define CV_MAX_DIM 32 +#define CV_MAX_DIM_HEAP 1024 + +typedef struct CvMatND +{ + int type; + int dims; + + int* refcount; + int hdr_refcount; + + union + { + uchar* ptr; + float* fl; + double* db; + int* i; + short* s; + } data; + + struct + { + int size; + int step; + } + dim[CV_MAX_DIM]; +} +CvMatND; + +#define CV_IS_MATND_HDR(mat) \ + ((mat) != NULL && (((const CvMatND*)(mat))->type & CV_MAGIC_MASK) == CV_MATND_MAGIC_VAL) + +#define CV_IS_MATND(mat) \ + (CV_IS_MATND_HDR(mat) && ((const CvMatND*)(mat))->data.ptr != NULL) + + +/****************************************************************************************\ +* Multi-dimensional sparse array (CvSparseMat) * +\****************************************************************************************/ + +#define CV_SPARSE_MAT_MAGIC_VAL 0x42440000 +#define CV_TYPE_NAME_SPARSE_MAT "opencv-sparse-matrix" + +struct CvSet; + +typedef struct CvSparseMat +{ + int type; + int dims; + int* refcount; + int hdr_refcount; + + struct CvSet* heap; + void** hashtable; + int hashsize; + int valoffset; + int idxoffset; + int size[CV_MAX_DIM]; +} +CvSparseMat; + +#define CV_IS_SPARSE_MAT_HDR(mat) \ + ((mat) != NULL && \ + (((const CvSparseMat*)(mat))->type & CV_MAGIC_MASK) == CV_SPARSE_MAT_MAGIC_VAL) + +#define CV_IS_SPARSE_MAT(mat) \ + CV_IS_SPARSE_MAT_HDR(mat) + +/**************** iteration through a sparse array *****************/ + +typedef struct CvSparseNode +{ + unsigned hashval; + struct CvSparseNode* next; +} +CvSparseNode; + +typedef struct CvSparseMatIterator +{ + CvSparseMat* mat; + CvSparseNode* node; + int curidx; +} +CvSparseMatIterator; + +#define CV_NODE_VAL(mat,node) ((void*)((uchar*)(node) + (mat)->valoffset)) +#define CV_NODE_IDX(mat,node) ((int*)((uchar*)(node) + (mat)->idxoffset)) + +/****************************************************************************************\ +* Histogram * +\****************************************************************************************/ + +typedef int CvHistType; + +#define CV_HIST_MAGIC_VAL 0x42450000 +#define CV_HIST_UNIFORM_FLAG (1 << 10) + +/* indicates whether bin ranges are set already or not */ +#define CV_HIST_RANGES_FLAG (1 << 11) + +#define CV_HIST_ARRAY 0 +#define CV_HIST_SPARSE 1 +#define CV_HIST_TREE CV_HIST_SPARSE + +/* should be used as a parameter only, + it turns to CV_HIST_UNIFORM_FLAG of hist->type */ +#define CV_HIST_UNIFORM 1 + +typedef struct CvHistogram +{ + int type; + CvArr* bins; + float thresh[CV_MAX_DIM][2]; /* For uniform histograms. */ + float** thresh2; /* For non-uniform histograms. */ + CvMatND mat; /* Embedded matrix header for array histograms. */ +} +CvHistogram; + +#define CV_IS_HIST( hist ) \ + ((hist) != NULL && \ + (((CvHistogram*)(hist))->type & CV_MAGIC_MASK) == CV_HIST_MAGIC_VAL && \ + (hist)->bins != NULL) + +#define CV_IS_UNIFORM_HIST( hist ) \ + (((hist)->type & CV_HIST_UNIFORM_FLAG) != 0) + +#define CV_IS_SPARSE_HIST( hist ) \ + CV_IS_SPARSE_MAT((hist)->bins) + +#define CV_HIST_HAS_RANGES( hist ) \ + (((hist)->type & CV_HIST_RANGES_FLAG) != 0) + +/****************************************************************************************\ +* Other supplementary data type definitions * +\****************************************************************************************/ + +/*************************************** CvRect *****************************************/ + +typedef struct CvRect +{ + int x; + int y; + int width; + int height; +} +CvRect; + +CV_INLINE CvRect cvRect( int x, int y, int width, int height ) +{ + CvRect r; + + r.x = x; + r.y = y; + r.width = width; + r.height = height; + + return r; +} + + +CV_INLINE IplROI cvRectToROI( CvRect rect, int coi ) +{ + IplROI roi; + roi.xOffset = rect.x; + roi.yOffset = rect.y; + roi.width = rect.width; + roi.height = rect.height; + roi.coi = coi; + + return roi; +} + + +CV_INLINE CvRect cvROIToRect( IplROI roi ) +{ + return cvRect( roi.xOffset, roi.yOffset, roi.width, roi.height ); +} + +/*********************************** CvTermCriteria *************************************/ + +#define CV_TERMCRIT_ITER 1 +#define CV_TERMCRIT_NUMBER CV_TERMCRIT_ITER +#define CV_TERMCRIT_EPS 2 + +typedef struct CvTermCriteria +{ + int type; /* may be combination of + CV_TERMCRIT_ITER + CV_TERMCRIT_EPS */ + int max_iter; + double epsilon; +} +CvTermCriteria; + +CV_INLINE CvTermCriteria cvTermCriteria( int type, int max_iter, double epsilon ) +{ + CvTermCriteria t; + + t.type = type; + t.max_iter = max_iter; + t.epsilon = (float)epsilon; + + return t; +} + + +/******************************* CvPoint and variants ***********************************/ + +typedef struct CvPoint +{ + int x; + int y; +} +CvPoint; + + +CV_INLINE CvPoint cvPoint( int x, int y ) +{ + CvPoint p; + + p.x = x; + p.y = y; + + return p; +} + + +typedef struct CvPoint2D32f +{ + float x; + float y; +} +CvPoint2D32f; + + +CV_INLINE CvPoint2D32f cvPoint2D32f( double x, double y ) +{ + CvPoint2D32f p; + + p.x = (float)x; + p.y = (float)y; + + return p; +} + + +CV_INLINE CvPoint2D32f cvPointTo32f( CvPoint point ) +{ + return cvPoint2D32f( (float)point.x, (float)point.y ); +} + + +CV_INLINE CvPoint cvPointFrom32f( CvPoint2D32f point ) +{ + CvPoint ipt; + ipt.x = cvRound(point.x); + ipt.y = cvRound(point.y); + + return ipt; +} + + +typedef struct CvPoint3D32f +{ + float x; + float y; + float z; +} +CvPoint3D32f; + + +CV_INLINE CvPoint3D32f cvPoint3D32f( double x, double y, double z ) +{ + CvPoint3D32f p; + + p.x = (float)x; + p.y = (float)y; + p.z = (float)z; + + return p; +} + + +typedef struct CvPoint2D64f +{ + double x; + double y; +} +CvPoint2D64f; + + +CV_INLINE CvPoint2D64f cvPoint2D64f( double x, double y ) +{ + CvPoint2D64f p; + + p.x = x; + p.y = y; + + return p; +} + + +typedef struct CvPoint3D64f +{ + double x; + double y; + double z; +} +CvPoint3D64f; + + +CV_INLINE CvPoint3D64f cvPoint3D64f( double x, double y, double z ) +{ + CvPoint3D64f p; + + p.x = x; + p.y = y; + p.z = z; + + return p; +} + + +/******************************** CvSize's & CvBox **************************************/ + +typedef struct +{ + int width; + int height; +} +CvSize; + +CV_INLINE CvSize cvSize( int width, int height ) +{ + CvSize s; + + s.width = width; + s.height = height; + + return s; +} + +typedef struct CvSize2D32f +{ + float width; + float height; +} +CvSize2D32f; + + +CV_INLINE CvSize2D32f cvSize2D32f( double width, double height ) +{ + CvSize2D32f s; + + s.width = (float)width; + s.height = (float)height; + + return s; +} + +typedef struct CvBox2D +{ + CvPoint2D32f center; /* Center of the box. */ + CvSize2D32f size; /* Box width and length. */ + float angle; /* Angle between the horizontal axis */ + /* and the first side (i.e. length) in degrees */ +} +CvBox2D; + + +/* Line iterator state: */ +typedef struct CvLineIterator +{ + /* Pointer to the current point: */ + uchar* ptr; + + /* Bresenham algorithm state: */ + int err; + int plus_delta; + int minus_delta; + int plus_step; + int minus_step; +} +CvLineIterator; + + + +/************************************* CvSlice ******************************************/ + +typedef struct CvSlice +{ + int start_index, end_index; +} +CvSlice; + +CV_INLINE CvSlice cvSlice( int start, int end ) +{ + CvSlice slice; + slice.start_index = start; + slice.end_index = end; + + return slice; +} + +#define CV_WHOLE_SEQ_END_INDEX 0x3fffffff +#define CV_WHOLE_SEQ cvSlice(0, CV_WHOLE_SEQ_END_INDEX) + + +/************************************* CvScalar *****************************************/ + +typedef struct CvScalar +{ + double val[4]; +} +CvScalar; + +CV_INLINE CvScalar cvScalar( double val0, double val1 CV_DEFAULT(0), + double val2 CV_DEFAULT(0), double val3 CV_DEFAULT(0)) +{ + CvScalar scalar; + scalar.val[0] = val0; scalar.val[1] = val1; + scalar.val[2] = val2; scalar.val[3] = val3; + return scalar; +} + + +CV_INLINE CvScalar cvRealScalar( double val0 ) +{ + CvScalar scalar; + scalar.val[0] = val0; + scalar.val[1] = scalar.val[2] = scalar.val[3] = 0; + return scalar; +} + +CV_INLINE CvScalar cvScalarAll( double val0123 ) +{ + CvScalar scalar; + scalar.val[0] = val0123; + scalar.val[1] = val0123; + scalar.val[2] = val0123; + scalar.val[3] = val0123; + return scalar; +} + +/****************************************************************************************\ +* Dynamic Data structures * +\****************************************************************************************/ + +/******************************** Memory storage ****************************************/ + +typedef struct CvMemBlock +{ + struct CvMemBlock* prev; + struct CvMemBlock* next; +} +CvMemBlock; + +#define CV_STORAGE_MAGIC_VAL 0x42890000 + +typedef struct CvMemStorage +{ + int signature; + CvMemBlock* bottom; /* First allocated block. */ + CvMemBlock* top; /* Current memory block - top of the stack. */ + struct CvMemStorage* parent; /* We get new blocks from parent as needed. */ + int block_size; /* Block size. */ + int free_space; /* Remaining free space in current block. */ +} +CvMemStorage; + +#define CV_IS_STORAGE(storage) \ + ((storage) != NULL && \ + (((CvMemStorage*)(storage))->signature & CV_MAGIC_MASK) == CV_STORAGE_MAGIC_VAL) + + +typedef struct CvMemStoragePos +{ + CvMemBlock* top; + int free_space; +} +CvMemStoragePos; + + +/*********************************** Sequence *******************************************/ + +typedef struct CvSeqBlock +{ + struct CvSeqBlock* prev; /* Previous sequence block. */ + struct CvSeqBlock* next; /* Next sequence block. */ + int start_index; /* Index of the first element in the block + */ + /* sequence->first->start_index. */ + int count; /* Number of elements in the block. */ + schar* data; /* Pointer to the first element of the block. */ +} +CvSeqBlock; + + +#define CV_TREE_NODE_FIELDS(node_type) \ + int flags; /* Miscellaneous flags. */ \ + int header_size; /* Size of sequence header. */ \ + struct node_type* h_prev; /* Previous sequence. */ \ + struct node_type* h_next; /* Next sequence. */ \ + struct node_type* v_prev; /* 2nd previous sequence. */ \ + struct node_type* v_next /* 2nd next sequence. */ + +/* + Read/Write sequence. + Elements can be dynamically inserted to or deleted from the sequence. +*/ +#define CV_SEQUENCE_FIELDS() \ + CV_TREE_NODE_FIELDS(CvSeq); \ + int total; /* Total number of elements. */ \ + int elem_size; /* Size of sequence element in bytes. */ \ + schar* block_max; /* Maximal bound of the last block. */ \ + schar* ptr; /* Current write pointer. */ \ + int delta_elems; /* Grow seq this many at a time. */ \ + CvMemStorage* storage; /* Where the seq is stored. */ \ + CvSeqBlock* free_blocks; /* Free blocks list. */ \ + CvSeqBlock* first; /* Pointer to the first sequence block. */ + +typedef struct CvSeq +{ + CV_SEQUENCE_FIELDS() +} +CvSeq; + +#define CV_TYPE_NAME_SEQ "opencv-sequence" +#define CV_TYPE_NAME_SEQ_TREE "opencv-sequence-tree" + +/*************************************** Set ********************************************/ +/* + Set. + Order is not preserved. There can be gaps between sequence elements. + After the element has been inserted it stays in the same place all the time. + The MSB(most-significant or sign bit) of the first field (flags) is 0 iff the element exists. +*/ +#define CV_SET_ELEM_FIELDS(elem_type) \ + int flags; \ + struct elem_type* next_free; + +typedef struct CvSetElem +{ + CV_SET_ELEM_FIELDS(CvSetElem) +} +CvSetElem; + +#define CV_SET_FIELDS() \ + CV_SEQUENCE_FIELDS() \ + CvSetElem* free_elems; \ + int active_count; + +typedef struct CvSet +{ + CV_SET_FIELDS() +} +CvSet; + + +#define CV_SET_ELEM_IDX_MASK ((1 << 26) - 1) +#define CV_SET_ELEM_FREE_FLAG (1 << (sizeof(int)*8-1)) + +/* Checks whether the element pointed by ptr belongs to a set or not */ +#define CV_IS_SET_ELEM( ptr ) (((CvSetElem*)(ptr))->flags >= 0) + +/************************************* Graph ********************************************/ + +/* + We represent a graph as a set of vertices. + Vertices contain their adjacency lists (more exactly, pointers to first incoming or + outcoming edge (or 0 if isolated vertex)). Edges are stored in another set. + There is a singly-linked list of incoming/outcoming edges for each vertex. + + Each edge consists of + + o Two pointers to the starting and ending vertices + (vtx[0] and vtx[1] respectively). + + A graph may be oriented or not. In the latter case, edges between + vertex i to vertex j are not distinguished during search operations. + + o Two pointers to next edges for the starting and ending vertices, where + next[0] points to the next edge in the vtx[0] adjacency list and + next[1] points to the next edge in the vtx[1] adjacency list. +*/ +#define CV_GRAPH_EDGE_FIELDS() \ + int flags; \ + float weight; \ + struct CvGraphEdge* next[2]; \ + struct CvGraphVtx* vtx[2]; + + +#define CV_GRAPH_VERTEX_FIELDS() \ + int flags; \ + struct CvGraphEdge* first; + + +typedef struct CvGraphEdge +{ + CV_GRAPH_EDGE_FIELDS() +} +CvGraphEdge; + +typedef struct CvGraphVtx +{ + CV_GRAPH_VERTEX_FIELDS() +} +CvGraphVtx; + +typedef struct CvGraphVtx2D +{ + CV_GRAPH_VERTEX_FIELDS() + CvPoint2D32f* ptr; +} +CvGraphVtx2D; + +/* + Graph is "derived" from the set (this is set a of vertices) + and includes another set (edges) +*/ +#define CV_GRAPH_FIELDS() \ + CV_SET_FIELDS() \ + CvSet* edges; + +typedef struct CvGraph +{ + CV_GRAPH_FIELDS() +} +CvGraph; + +#define CV_TYPE_NAME_GRAPH "opencv-graph" + +/*********************************** Chain/Countour *************************************/ + +typedef struct CvChain +{ + CV_SEQUENCE_FIELDS() + CvPoint origin; +} +CvChain; + +#define CV_CONTOUR_FIELDS() \ + CV_SEQUENCE_FIELDS() \ + CvRect rect; \ + int color; \ + int reserved[3]; + +typedef struct CvContour +{ + CV_CONTOUR_FIELDS() +} +CvContour; + +typedef CvContour CvPoint2DSeq; + +/****************************************************************************************\ +* Sequence types * +\****************************************************************************************/ + +#define CV_SEQ_MAGIC_VAL 0x42990000 + +#define CV_IS_SEQ(seq) \ + ((seq) != NULL && (((CvSeq*)(seq))->flags & CV_MAGIC_MASK) == CV_SEQ_MAGIC_VAL) + +#define CV_SET_MAGIC_VAL 0x42980000 +#define CV_IS_SET(set) \ + ((set) != NULL && (((CvSeq*)(set))->flags & CV_MAGIC_MASK) == CV_SET_MAGIC_VAL) + +#define CV_SEQ_ELTYPE_BITS 12 +#define CV_SEQ_ELTYPE_MASK ((1 << CV_SEQ_ELTYPE_BITS) - 1) + +#define CV_SEQ_ELTYPE_POINT CV_32SC2 /* (x,y) */ +#define CV_SEQ_ELTYPE_CODE CV_8UC1 /* freeman code: 0..7 */ +#define CV_SEQ_ELTYPE_GENERIC 0 +#define CV_SEQ_ELTYPE_PTR CV_USRTYPE1 +#define CV_SEQ_ELTYPE_PPOINT CV_SEQ_ELTYPE_PTR /* &(x,y) */ +#define CV_SEQ_ELTYPE_INDEX CV_32SC1 /* #(x,y) */ +#define CV_SEQ_ELTYPE_GRAPH_EDGE 0 /* &next_o, &next_d, &vtx_o, &vtx_d */ +#define CV_SEQ_ELTYPE_GRAPH_VERTEX 0 /* first_edge, &(x,y) */ +#define CV_SEQ_ELTYPE_TRIAN_ATR 0 /* vertex of the binary tree */ +#define CV_SEQ_ELTYPE_CONNECTED_COMP 0 /* connected component */ +#define CV_SEQ_ELTYPE_POINT3D CV_32FC3 /* (x,y,z) */ + +#define CV_SEQ_KIND_BITS 2 +#define CV_SEQ_KIND_MASK (((1 << CV_SEQ_KIND_BITS) - 1)<flags & CV_SEQ_ELTYPE_MASK) +#define CV_SEQ_KIND( seq ) ((seq)->flags & CV_SEQ_KIND_MASK ) + +/* flag checking */ +#define CV_IS_SEQ_INDEX( seq ) ((CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_INDEX) && \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_GENERIC)) + +#define CV_IS_SEQ_CURVE( seq ) (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE) +#define CV_IS_SEQ_CLOSED( seq ) (((seq)->flags & CV_SEQ_FLAG_CLOSED) != 0) +#define CV_IS_SEQ_CONVEX( seq ) 0 +#define CV_IS_SEQ_HOLE( seq ) (((seq)->flags & CV_SEQ_FLAG_HOLE) != 0) +#define CV_IS_SEQ_SIMPLE( seq ) 1 + +/* type checking macros */ +#define CV_IS_SEQ_POINT_SET( seq ) \ + ((CV_SEQ_ELTYPE(seq) == CV_32SC2 || CV_SEQ_ELTYPE(seq) == CV_32FC2)) + +#define CV_IS_SEQ_POINT_SUBSET( seq ) \ + (CV_IS_SEQ_INDEX( seq ) || CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_PPOINT) + +#define CV_IS_SEQ_POLYLINE( seq ) \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && CV_IS_SEQ_POINT_SET(seq)) + +#define CV_IS_SEQ_POLYGON( seq ) \ + (CV_IS_SEQ_POLYLINE(seq) && CV_IS_SEQ_CLOSED(seq)) + +#define CV_IS_SEQ_CHAIN( seq ) \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && (seq)->elem_size == 1) + +#define CV_IS_SEQ_CONTOUR( seq ) \ + (CV_IS_SEQ_CLOSED(seq) && (CV_IS_SEQ_POLYLINE(seq) || CV_IS_SEQ_CHAIN(seq))) + +#define CV_IS_SEQ_CHAIN_CONTOUR( seq ) \ + (CV_IS_SEQ_CHAIN( seq ) && CV_IS_SEQ_CLOSED( seq )) + +#define CV_IS_SEQ_POLYGON_TREE( seq ) \ + (CV_SEQ_ELTYPE (seq) == CV_SEQ_ELTYPE_TRIAN_ATR && \ + CV_SEQ_KIND( seq ) == CV_SEQ_KIND_BIN_TREE ) + +#define CV_IS_GRAPH( seq ) \ + (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_GRAPH) + +#define CV_IS_GRAPH_ORIENTED( seq ) \ + (((seq)->flags & CV_GRAPH_FLAG_ORIENTED) != 0) + +#define CV_IS_SUBDIV2D( seq ) \ + (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_SUBDIV2D) + +/****************************************************************************************/ +/* Sequence writer & reader */ +/****************************************************************************************/ + +#define CV_SEQ_WRITER_FIELDS() \ + int header_size; \ + CvSeq* seq; /* the sequence written */ \ + CvSeqBlock* block; /* current block */ \ + schar* ptr; /* pointer to free space */ \ + schar* block_min; /* pointer to the beginning of block*/\ + schar* block_max; /* pointer to the end of block */ + +typedef struct CvSeqWriter +{ + CV_SEQ_WRITER_FIELDS() +} +CvSeqWriter; + + +#define CV_SEQ_READER_FIELDS() \ + int header_size; \ + CvSeq* seq; /* sequence, beign read */ \ + CvSeqBlock* block; /* current block */ \ + schar* ptr; /* pointer to element be read next */ \ + schar* block_min; /* pointer to the beginning of block */\ + schar* block_max; /* pointer to the end of block */ \ + int delta_index;/* = seq->first->start_index */ \ + schar* prev_elem; /* pointer to previous element */ + + +typedef struct CvSeqReader +{ + CV_SEQ_READER_FIELDS() +} +CvSeqReader; + +/****************************************************************************************/ +/* Operations on sequences */ +/****************************************************************************************/ + +#define CV_SEQ_ELEM( seq, elem_type, index ) \ +/* assert gives some guarantee that parameter is valid */ \ +( assert(sizeof((seq)->first[0]) == sizeof(CvSeqBlock) && \ + (seq)->elem_size == sizeof(elem_type)), \ + (elem_type*)((seq)->first && (unsigned)index < \ + (unsigned)((seq)->first->count) ? \ + (seq)->first->data + (index) * sizeof(elem_type) : \ + cvGetSeqElem( (CvSeq*)(seq), (index) ))) +#define CV_GET_SEQ_ELEM( elem_type, seq, index ) CV_SEQ_ELEM( (seq), elem_type, (index) ) + +/* Add element to sequence: */ +#define CV_WRITE_SEQ_ELEM_VAR( elem_ptr, writer ) \ +{ \ + if( (writer).ptr >= (writer).block_max ) \ + { \ + cvCreateSeqBlock( &writer); \ + } \ + memcpy((writer).ptr, elem_ptr, (writer).seq->elem_size);\ + (writer).ptr += (writer).seq->elem_size; \ +} + +#define CV_WRITE_SEQ_ELEM( elem, writer ) \ +{ \ + assert( (writer).seq->elem_size == sizeof(elem)); \ + if( (writer).ptr >= (writer).block_max ) \ + { \ + cvCreateSeqBlock( &writer); \ + } \ + assert( (writer).ptr <= (writer).block_max - sizeof(elem));\ + memcpy((writer).ptr, &(elem), sizeof(elem)); \ + (writer).ptr += sizeof(elem); \ +} + + +/* Move reader position forward: */ +#define CV_NEXT_SEQ_ELEM( elem_size, reader ) \ +{ \ + if( ((reader).ptr += (elem_size)) >= (reader).block_max ) \ + { \ + cvChangeSeqBlock( &(reader), 1 ); \ + } \ +} + + +/* Move reader position backward: */ +#define CV_PREV_SEQ_ELEM( elem_size, reader ) \ +{ \ + if( ((reader).ptr -= (elem_size)) < (reader).block_min ) \ + { \ + cvChangeSeqBlock( &(reader), -1 ); \ + } \ +} + +/* Read element and move read position forward: */ +#define CV_READ_SEQ_ELEM( elem, reader ) \ +{ \ + assert( (reader).seq->elem_size == sizeof(elem)); \ + memcpy( &(elem), (reader).ptr, sizeof((elem))); \ + CV_NEXT_SEQ_ELEM( sizeof(elem), reader ) \ +} + +/* Read element and move read position backward: */ +#define CV_REV_READ_SEQ_ELEM( elem, reader ) \ +{ \ + assert( (reader).seq->elem_size == sizeof(elem)); \ + memcpy(&(elem), (reader).ptr, sizeof((elem))); \ + CV_PREV_SEQ_ELEM( sizeof(elem), reader ) \ +} + + +#define CV_READ_CHAIN_POINT( _pt, reader ) \ +{ \ + (_pt) = (reader).pt; \ + if( (reader).ptr ) \ + { \ + CV_READ_SEQ_ELEM( (reader).code, (reader)); \ + assert( ((reader).code & ~7) == 0 ); \ + (reader).pt.x += (reader).deltas[(int)(reader).code][0]; \ + (reader).pt.y += (reader).deltas[(int)(reader).code][1]; \ + } \ +} + +#define CV_CURRENT_POINT( reader ) (*((CvPoint*)((reader).ptr))) +#define CV_PREV_POINT( reader ) (*((CvPoint*)((reader).prev_elem))) + +#define CV_READ_EDGE( pt1, pt2, reader ) \ +{ \ + assert( sizeof(pt1) == sizeof(CvPoint) && \ + sizeof(pt2) == sizeof(CvPoint) && \ + reader.seq->elem_size == sizeof(CvPoint)); \ + (pt1) = CV_PREV_POINT( reader ); \ + (pt2) = CV_CURRENT_POINT( reader ); \ + (reader).prev_elem = (reader).ptr; \ + CV_NEXT_SEQ_ELEM( sizeof(CvPoint), (reader)); \ +} + +/************ Graph macros ************/ + +/* Return next graph edge for given vertex: */ +#define CV_NEXT_GRAPH_EDGE( edge, vertex ) \ + (assert((edge)->vtx[0] == (vertex) || (edge)->vtx[1] == (vertex)), \ + (edge)->next[(edge)->vtx[1] == (vertex)]) + + + +/****************************************************************************************\ +* Data structures for persistence (a.k.a serialization) functionality * +\****************************************************************************************/ + +/* "black box" file storage */ +typedef struct CvFileStorage CvFileStorage; + +/* Storage flags: */ +#define CV_STORAGE_READ 0 +#define CV_STORAGE_WRITE 1 +#define CV_STORAGE_WRITE_TEXT CV_STORAGE_WRITE +#define CV_STORAGE_WRITE_BINARY CV_STORAGE_WRITE +#define CV_STORAGE_APPEND 2 + +/* List of attributes: */ +typedef struct CvAttrList +{ + const char** attr; /* NULL-terminated array of (attribute_name,attribute_value) pairs. */ + struct CvAttrList* next; /* Pointer to next chunk of the attributes list. */ +} +CvAttrList; + +CV_INLINE CvAttrList cvAttrList( const char** attr CV_DEFAULT(NULL), + CvAttrList* next CV_DEFAULT(NULL) ) +{ + CvAttrList l; + l.attr = attr; + l.next = next; + + return l; +} + +struct CvTypeInfo; + +#define CV_NODE_NONE 0 +#define CV_NODE_INT 1 +#define CV_NODE_INTEGER CV_NODE_INT +#define CV_NODE_REAL 2 +#define CV_NODE_FLOAT CV_NODE_REAL +#define CV_NODE_STR 3 +#define CV_NODE_STRING CV_NODE_STR +#define CV_NODE_REF 4 /* not used */ +#define CV_NODE_SEQ 5 +#define CV_NODE_MAP 6 +#define CV_NODE_TYPE_MASK 7 + +#define CV_NODE_TYPE(flags) ((flags) & CV_NODE_TYPE_MASK) + +/* file node flags */ +#define CV_NODE_FLOW 8 /* Used only for writing structures in YAML format. */ +#define CV_NODE_USER 16 +#define CV_NODE_EMPTY 32 +#define CV_NODE_NAMED 64 + +#define CV_NODE_IS_INT(flags) (CV_NODE_TYPE(flags) == CV_NODE_INT) +#define CV_NODE_IS_REAL(flags) (CV_NODE_TYPE(flags) == CV_NODE_REAL) +#define CV_NODE_IS_STRING(flags) (CV_NODE_TYPE(flags) == CV_NODE_STRING) +#define CV_NODE_IS_SEQ(flags) (CV_NODE_TYPE(flags) == CV_NODE_SEQ) +#define CV_NODE_IS_MAP(flags) (CV_NODE_TYPE(flags) == CV_NODE_MAP) +#define CV_NODE_IS_COLLECTION(flags) (CV_NODE_TYPE(flags) >= CV_NODE_SEQ) +#define CV_NODE_IS_FLOW(flags) (((flags) & CV_NODE_FLOW) != 0) +#define CV_NODE_IS_EMPTY(flags) (((flags) & CV_NODE_EMPTY) != 0) +#define CV_NODE_IS_USER(flags) (((flags) & CV_NODE_USER) != 0) +#define CV_NODE_HAS_NAME(flags) (((flags) & CV_NODE_NAMED) != 0) + +#define CV_NODE_SEQ_SIMPLE 256 +#define CV_NODE_SEQ_IS_SIMPLE(seq) (((seq)->flags & CV_NODE_SEQ_SIMPLE) != 0) + +typedef struct CvString +{ + int len; + char* ptr; +} +CvString; + +/* All the keys (names) of elements in the readed file storage + are stored in the hash to speed up the lookup operations: */ +typedef struct CvStringHashNode +{ + unsigned hashval; + CvString str; + struct CvStringHashNode* next; +} +CvStringHashNode; + +typedef struct CvGenericHash CvFileNodeHash; + +/* Basic element of the file storage - scalar or collection: */ +typedef struct CvFileNode +{ + int tag; + struct CvTypeInfo* info; /* type information + (only for user-defined object, for others it is 0) */ + union + { + double f; /* scalar floating-point number */ + int i; /* scalar integer number */ + CvString str; /* text string */ + CvSeq* seq; /* sequence (ordered collection of file nodes) */ + CvFileNodeHash* map; /* map (collection of named file nodes) */ + } data; +} +CvFileNode; + +#ifdef __cplusplus +extern "C" { +#endif +typedef int (CV_CDECL *CvIsInstanceFunc)( const void* struct_ptr ); +typedef void (CV_CDECL *CvReleaseFunc)( void** struct_dblptr ); +typedef void* (CV_CDECL *CvReadFunc)( CvFileStorage* storage, CvFileNode* node ); +typedef void (CV_CDECL *CvWriteFunc)( CvFileStorage* storage, const char* name, + const void* struct_ptr, CvAttrList attributes ); +typedef void* (CV_CDECL *CvCloneFunc)( const void* struct_ptr ); +#ifdef __cplusplus +} +#endif + +typedef struct CvTypeInfo +{ + int flags; + int header_size; + struct CvTypeInfo* prev; + struct CvTypeInfo* next; + const char* type_name; + CvIsInstanceFunc is_instance; + CvReleaseFunc release; + CvReadFunc read; + CvWriteFunc write; + CvCloneFunc clone; +} +CvTypeInfo; + + +/**** System data types ******/ + +typedef struct CvPluginFuncInfo +{ + void** func_addr; + void* default_func_addr; + const char* func_names; + int search_modules; + int loaded_from; +} +CvPluginFuncInfo; + +typedef struct CvModuleInfo +{ + struct CvModuleInfo* next; + const char* name; + const char* version; + CvPluginFuncInfo* func_tab; +} +CvModuleInfo; + +enum { CV_PARAM_TYPE_INT=0, CV_PARAM_TYPE_REAL=1, CV_PARAM_TYPE_STRING=2, CV_PARAM_TYPE_MAT=3 }; + +#endif /*_CXCORE_TYPES_H_*/ + +/* End of file. */ diff --git a/opencv/core/opencv2/core/version.hpp b/opencv/core/opencv2/core/version.hpp new file mode 100644 index 0000000..0cd25ca --- /dev/null +++ b/opencv/core/opencv2/core/version.hpp @@ -0,0 +1,58 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright( C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +//(including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort(including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* + definition of the current version of OpenCV + Usefull to test in user programs +*/ + +#ifndef __OPENCV_VERSION_HPP__ +#define __OPENCV_VERSION_HPP__ + +#define CV_MAJOR_VERSION 2 +#define CV_MINOR_VERSION 3 +#define CV_SUBMINOR_VERSION 1 + +#define CVAUX_STR_EXP(__A) #__A +#define CVAUX_STR(__A) CVAUX_STR_EXP(__A) +#define CV_VERSION CVAUX_STR(CV_MAJOR_VERSION) "." CVAUX_STR(CV_MINOR_VERSION) "." CVAUX_STR(CV_SUBMINOR_VERSION) + +#endif diff --git a/opencv/core/opencv2/core/wimage.hpp b/opencv/core/opencv2/core/wimage.hpp new file mode 100644 index 0000000..579c009 --- /dev/null +++ b/opencv/core/opencv2/core/wimage.hpp @@ -0,0 +1,621 @@ +/////////////////////////////////////////////////////////////////////////////// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to +// this license. If you do not agree to this license, do not download, +// install, copy or use the software. +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2008, Google, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation or contributors may not be used to endorse +// or promote products derived from this software without specific +// prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" +// and any express or implied warranties, including, but not limited to, the +// implied warranties of merchantability and fitness for a particular purpose +// are disclaimed. In no event shall the Intel Corporation or contributors be +// liable for any direct, indirect, incidental, special, exemplary, or +// consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. + + +///////////////////////////////////////////////////////////////////////////////// +// +// Image class which provides a thin layer around an IplImage. The goals +// of the class design are: +// 1. All the data has explicit ownership to avoid memory leaks +// 2. No hidden allocations or copies for performance. +// 3. Easy access to OpenCV methods (which will access IPP if available) +// 4. Can easily treat external data as an image +// 5. Easy to create images which are subsets of other images +// 6. Fast pixel access which can take advantage of number of channels +// if known at compile time. +// +// The WImage class is the image class which provides the data accessors. +// The 'W' comes from the fact that it is also a wrapper around the popular +// but inconvenient IplImage class. A WImage can be constructed either using a +// WImageBuffer class which allocates and frees the data, +// or using a WImageView class which constructs a subimage or a view into +// external data. The view class does no memory management. Each class +// actually has two versions, one when the number of channels is known at +// compile time and one when it isn't. Using the one with the number of +// channels specified can provide some compile time optimizations by using the +// fact that the number of channels is a constant. +// +// We use the convention (c,r) to refer to column c and row r with (0,0) being +// the upper left corner. This is similar to standard Euclidean coordinates +// with the first coordinate varying in the horizontal direction and the second +// coordinate varying in the vertical direction. +// Thus (c,r) is usually in the domain [0, width) X [0, height) +// +// Example usage: +// WImageBuffer3_b im(5,7); // Make a 5X7 3 channel image of type uchar +// WImageView3_b sub_im(im, 2,2, 3,3); // 3X3 submatrix +// vector vec(10, 3.0f); +// WImageView1_f user_im(&vec[0], 2, 5); // 2X5 image w/ supplied data +// +// im.SetZero(); // same as cvSetZero(im.Ipl()) +// *im(2, 3) = 15; // Modify the element at column 2, row 3 +// MySetRand(&sub_im); +// +// // Copy the second row into the first. This can be done with no memory +// // allocation and will use SSE if IPP is available. +// int w = im.Width(); +// im.View(0,0, w,1).CopyFrom(im.View(0,1, w,1)); +// +// // Doesn't care about source of data since using WImage +// void MySetRand(WImage_b* im) { // Works with any number of channels +// for (int r = 0; r < im->Height(); ++r) { +// float* row = im->Row(r); +// for (int c = 0; c < im->Width(); ++c) { +// for (int ch = 0; ch < im->Channels(); ++ch, ++row) { +// *row = uchar(rand() & 255); +// } +// } +// } +// } +// +// Functions that are not part of the basic image allocation, viewing, and +// access should come from OpenCV, except some useful functions that are not +// part of OpenCV can be found in wimage_util.h +#ifndef __OPENCV_CORE_WIMAGE_HPP__ +#define __OPENCV_CORE_WIMAGE_HPP__ + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus + +namespace cv { + +template class WImage; +template class WImageBuffer; +template class WImageView; + +template class WImageC; +template class WImageBufferC; +template class WImageViewC; + +// Commonly used typedefs. +typedef WImage WImage_b; +typedef WImageView WImageView_b; +typedef WImageBuffer WImageBuffer_b; + +typedef WImageC WImage1_b; +typedef WImageViewC WImageView1_b; +typedef WImageBufferC WImageBuffer1_b; + +typedef WImageC WImage3_b; +typedef WImageViewC WImageView3_b; +typedef WImageBufferC WImageBuffer3_b; + +typedef WImage WImage_f; +typedef WImageView WImageView_f; +typedef WImageBuffer WImageBuffer_f; + +typedef WImageC WImage1_f; +typedef WImageViewC WImageView1_f; +typedef WImageBufferC WImageBuffer1_f; + +typedef WImageC WImage3_f; +typedef WImageViewC WImageView3_f; +typedef WImageBufferC WImageBuffer3_f; + +// There isn't a standard for signed and unsigned short so be more +// explicit in the typename for these cases. +typedef WImage WImage_16s; +typedef WImageView WImageView_16s; +typedef WImageBuffer WImageBuffer_16s; + +typedef WImageC WImage1_16s; +typedef WImageViewC WImageView1_16s; +typedef WImageBufferC WImageBuffer1_16s; + +typedef WImageC WImage3_16s; +typedef WImageViewC WImageView3_16s; +typedef WImageBufferC WImageBuffer3_16s; + +typedef WImage WImage_16u; +typedef WImageView WImageView_16u; +typedef WImageBuffer WImageBuffer_16u; + +typedef WImageC WImage1_16u; +typedef WImageViewC WImageView1_16u; +typedef WImageBufferC WImageBuffer1_16u; + +typedef WImageC WImage3_16u; +typedef WImageViewC WImageView3_16u; +typedef WImageBufferC WImageBuffer3_16u; + +// +// WImage definitions +// +// This WImage class gives access to the data it refers to. It can be +// constructed either by allocating the data with a WImageBuffer class or +// using the WImageView class to refer to a subimage or outside data. +template +class WImage +{ +public: + typedef T BaseType; + + // WImage is an abstract class with no other virtual methods so make the + // destructor virtual. + virtual ~WImage() = 0; + + // Accessors + IplImage* Ipl() {return image_; } + const IplImage* Ipl() const {return image_; } + T* ImageData() { return reinterpret_cast(image_->imageData); } + const T* ImageData() const { + return reinterpret_cast(image_->imageData); + } + + int Width() const {return image_->width; } + int Height() const {return image_->height; } + + // WidthStep is the number of bytes to go to the pixel with the next y coord + int WidthStep() const {return image_->widthStep; } + + int Channels() const {return image_->nChannels; } + int ChannelSize() const {return sizeof(T); } // number of bytes per channel + + // Number of bytes per pixel + int PixelSize() const {return Channels() * ChannelSize(); } + + // Return depth type (e.g. IPL_DEPTH_8U, IPL_DEPTH_32F) which is the number + // of bits per channel and with the signed bit set. + // This is known at compile time using specializations. + int Depth() const; + + inline const T* Row(int r) const { + return reinterpret_cast(image_->imageData + r*image_->widthStep); + } + + inline T* Row(int r) { + return reinterpret_cast(image_->imageData + r*image_->widthStep); + } + + // Pixel accessors which returns a pointer to the start of the channel + inline T* operator() (int c, int r) { + return reinterpret_cast(image_->imageData + r*image_->widthStep) + + c*Channels(); + } + + inline const T* operator() (int c, int r) const { + return reinterpret_cast(image_->imageData + r*image_->widthStep) + + c*Channels(); + } + + // Copy the contents from another image which is just a convenience to cvCopy + void CopyFrom(const WImage& src) { cvCopy(src.Ipl(), image_); } + + // Set contents to zero which is just a convenient to cvSetZero + void SetZero() { cvSetZero(image_); } + + // Construct a view into a region of this image + WImageView View(int c, int r, int width, int height); + +protected: + // Disallow copy and assignment + WImage(const WImage&); + void operator=(const WImage&); + + explicit WImage(IplImage* img) : image_(img) { + assert(!img || img->depth == Depth()); + } + + void SetIpl(IplImage* image) { + assert(!image || image->depth == Depth()); + image_ = image; + } + + IplImage* image_; +}; + + + +// Image class when both the pixel type and number of channels +// are known at compile time. This wrapper will speed up some of the operations +// like accessing individual pixels using the () operator. +template +class WImageC : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + explicit WImageC(IplImage* img) : WImage(img) { + assert(!img || img->nChannels == Channels()); + } + + // Construct a view into a region of this image + WImageViewC View(int c, int r, int width, int height); + + // Copy the contents from another image which is just a convenience to cvCopy + void CopyFrom(const WImageC& src) { + cvCopy(src.Ipl(), WImage::image_); + } + + // WImageC is an abstract class with no other virtual methods so make the + // destructor virtual. + virtual ~WImageC() = 0; + + int Channels() const {return C; } + +protected: + // Disallow copy and assignment + WImageC(const WImageC&); + void operator=(const WImageC&); + + void SetIpl(IplImage* image) { + assert(!image || image->depth == WImage::Depth()); + WImage::SetIpl(image); + } +}; + +// +// WImageBuffer definitions +// +// Image class which owns the data, so it can be allocated and is always +// freed. It cannot be copied but can be explicity cloned. +// +template +class WImageBuffer : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + + // Default constructor which creates an object that can be + WImageBuffer() : WImage(0) {} + + WImageBuffer(int width, int height, int nchannels) : WImage(0) { + Allocate(width, height, nchannels); + } + + // Constructor which takes ownership of a given IplImage so releases + // the image on destruction. + explicit WImageBuffer(IplImage* img) : WImage(img) {} + + // Allocate an image. Does nothing if current size is the same as + // the new size. + void Allocate(int width, int height, int nchannels); + + // Set the data to point to an image, releasing the old data + void SetIpl(IplImage* img) { + ReleaseImage(); + WImage::SetIpl(img); + } + + // Clone an image which reallocates the image if of a different dimension. + void CloneFrom(const WImage& src) { + Allocate(src.Width(), src.Height(), src.Channels()); + CopyFrom(src); + } + + ~WImageBuffer() { + ReleaseImage(); + } + + // Release the image if it isn't null. + void ReleaseImage() { + if (WImage::image_) { + IplImage* image = WImage::image_; + cvReleaseImage(&image); + WImage::SetIpl(0); + } + } + + bool IsNull() const {return WImage::image_ == NULL; } + +private: + // Disallow copy and assignment + WImageBuffer(const WImageBuffer&); + void operator=(const WImageBuffer&); +}; + +// Like a WImageBuffer class but when the number of channels is known +// at compile time. +template +class WImageBufferC : public WImageC +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + // Default constructor which creates an object that can be + WImageBufferC() : WImageC(0) {} + + WImageBufferC(int width, int height) : WImageC(0) { + Allocate(width, height); + } + + // Constructor which takes ownership of a given IplImage so releases + // the image on destruction. + explicit WImageBufferC(IplImage* img) : WImageC(img) {} + + // Allocate an image. Does nothing if current size is the same as + // the new size. + void Allocate(int width, int height); + + // Set the data to point to an image, releasing the old data + void SetIpl(IplImage* img) { + ReleaseImage(); + WImageC::SetIpl(img); + } + + // Clone an image which reallocates the image if of a different dimension. + void CloneFrom(const WImageC& src) { + Allocate(src.Width(), src.Height()); + CopyFrom(src); + } + + ~WImageBufferC() { + ReleaseImage(); + } + + // Release the image if it isn't null. + void ReleaseImage() { + if (WImage::image_) { + IplImage* image = WImage::image_; + cvReleaseImage(&image); + WImageC::SetIpl(0); + } + } + + bool IsNull() const {return WImage::image_ == NULL; } + +private: + // Disallow copy and assignment + WImageBufferC(const WImageBufferC&); + void operator=(const WImageBufferC&); +}; + +// +// WImageView definitions +// +// View into an image class which allows treating a subimage as an image +// or treating external data as an image +// +template +class WImageView : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + + // Construct a subimage. No checks are done that the subimage lies + // completely inside the original image. + WImageView(WImage* img, int c, int r, int width, int height); + + // Refer to external data. + // If not given width_step assumed to be same as width. + WImageView(T* data, int width, int height, int channels, int width_step = -1); + + // Refer to external data. This does NOT take ownership + // of the supplied IplImage. + WImageView(IplImage* img) : WImage(img) {} + + // Copy constructor + WImageView(const WImage& img) : WImage(0) { + header_ = *(img.Ipl()); + WImage::SetIpl(&header_); + } + + WImageView& operator=(const WImage& img) { + header_ = *(img.Ipl()); + WImage::SetIpl(&header_); + return *this; + } + +protected: + IplImage header_; +}; + + +template +class WImageViewC : public WImageC +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + // Default constructor needed for vectors of views. + WImageViewC(); + + virtual ~WImageViewC() {} + + // Construct a subimage. No checks are done that the subimage lies + // completely inside the original image. + WImageViewC(WImageC* img, + int c, int r, int width, int height); + + // Refer to external data + WImageViewC(T* data, int width, int height, int width_step = -1); + + // Refer to external data. This does NOT take ownership + // of the supplied IplImage. + WImageViewC(IplImage* img) : WImageC(img) {} + + // Copy constructor which does a shallow copy to allow multiple views + // of same data. gcc-4.1.1 gets confused if both versions of + // the constructor and assignment operator are not provided. + WImageViewC(const WImageC& img) : WImageC(0) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + } + WImageViewC(const WImageViewC& img) : WImageC(0) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + } + + WImageViewC& operator=(const WImageC& img) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + return *this; + } + WImageViewC& operator=(const WImageViewC& img) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + return *this; + } + +protected: + IplImage header_; +}; + + +// Specializations for depth +template<> +inline int WImage::Depth() const {return IPL_DEPTH_8U; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_8S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_16S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_16U; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_32S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_32F; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_64F; } + +// +// Pure virtual destructors still need to be defined. +// +template inline WImage::~WImage() {} +template inline WImageC::~WImageC() {} + +// +// Allocate ImageData +// +template +inline void WImageBuffer::Allocate(int width, int height, int nchannels) +{ + if (IsNull() || WImage::Width() != width || + WImage::Height() != height || WImage::Channels() != nchannels) { + ReleaseImage(); + WImage::image_ = cvCreateImage(cvSize(width, height), + WImage::Depth(), nchannels); + } +} + +template +inline void WImageBufferC::Allocate(int width, int height) +{ + if (IsNull() || WImage::Width() != width || WImage::Height() != height) { + ReleaseImage(); + WImageC::SetIpl(cvCreateImage(cvSize(width, height),WImage::Depth(), C)); + } +} + +// +// ImageView methods +// +template +WImageView::WImageView(WImage* img, int c, int r, int width, int height) + : WImage(0) +{ + header_ = *(img->Ipl()); + header_.imageData = reinterpret_cast((*img)(c, r)); + header_.width = width; + header_.height = height; + WImage::SetIpl(&header_); +} + +template +WImageView::WImageView(T* data, int width, int height, int nchannels, int width_step) + : WImage(0) +{ + cvInitImageHeader(&header_, cvSize(width, height), WImage::Depth(), nchannels); + header_.imageData = reinterpret_cast(data); + if (width_step > 0) { + header_.widthStep = width_step; + } + WImage::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC(WImageC* img, int c, int r, int width, int height) + : WImageC(0) +{ + header_ = *(img->Ipl()); + header_.imageData = reinterpret_cast((*img)(c, r)); + header_.width = width; + header_.height = height; + WImageC::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC() : WImageC(0) { + cvInitImageHeader(&header_, cvSize(0, 0), WImage::Depth(), C); + header_.imageData = reinterpret_cast(0); + WImageC::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC(T* data, int width, int height, int width_step) + : WImageC(0) +{ + cvInitImageHeader(&header_, cvSize(width, height), WImage::Depth(), C); + header_.imageData = reinterpret_cast(data); + if (width_step > 0) { + header_.widthStep = width_step; + } + WImageC::SetIpl(&header_); +} + +// Construct a view into a region of an image +template +WImageView WImage::View(int c, int r, int width, int height) { + return WImageView(this, c, r, width, height); +} + +template +WImageViewC WImageC::View(int c, int r, int width, int height) { + return WImageViewC(this, c, r, width, height); +} + +} // end of namespace + +#endif // __cplusplus + +#endif diff --git a/opencv/core/out.cpp b/opencv/core/out.cpp new file mode 100644 index 0000000..366f5cf --- /dev/null +++ b/opencv/core/out.cpp @@ -0,0 +1,307 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2010, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include + +namespace cv +{ + +static inline char getCloseBrace(char c) +{ + return c == '[' ? ']' : c == '(' ? ')' : c == '{' ? '}' : '\0'; +} + + +template static void writeElems(std::ostream& out, const _Tp* data, + int nelems, int cn, char obrace, char cbrace) +{ + typedef typename DataType<_Tp>::work_type _WTp; + nelems *= cn; + for(int i = 0; i < nelems; i += cn) + { + if(cn == 1) + { + out << (_WTp)data[i] << (i+1 < nelems ? ", " : ""); + continue; + } + out << obrace; + for(int j = 0; j < cn; j++) + out << (_WTp)data[i + j] << (j+1 < cn ? ", " : ""); + out << cbrace << (i+cn < nelems ? ", " : ""); + } +} + + +static void writeElems(std::ostream& out, const void* data, int nelems, int type, char brace) +{ + int depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type); + char cbrace = ' '; + if(!brace || isspace(brace)) + { + nelems *= cn; + cn = 1; + } + else + cbrace = getCloseBrace(brace); + if(depth == CV_8U) + writeElems(out, (const uchar*)data, nelems, cn, brace, cbrace); + else if(depth == CV_8S) + writeElems(out, (const schar*)data, nelems, cn, brace, cbrace); + else if(depth == CV_16U) + writeElems(out, (const ushort*)data, nelems, cn, brace, cbrace); + else if(depth == CV_16S) + writeElems(out, (const short*)data, nelems, cn, brace, cbrace); + else if(depth == CV_32S) + writeElems(out, (const int*)data, nelems, cn, brace, cbrace); + else if(depth == CV_32F) + { + std::streamsize pp = out.precision(); + out.precision(8); + writeElems(out, (const float*)data, nelems, cn, brace, cbrace); + out.precision(pp); + } + else if(depth == CV_64F) + { + std::streamsize pp = out.precision(); + out.precision(16); + writeElems(out, (const double*)data, nelems, cn, brace, cbrace); + out.precision(pp); + } + else + CV_Error(CV_StsUnsupportedFormat, ""); +} + + +static void writeMat(std::ostream& out, const Mat& m, char rowsep, char elembrace, bool singleLine) +{ + CV_Assert(m.dims <= 2); + int type = m.type(); + + char crowbrace = getCloseBrace(rowsep); + char orowbrace = crowbrace ? rowsep : '\0'; + + if( orowbrace || isspace(rowsep) ) + rowsep = '\0'; + + for( int i = 0; i < m.rows; i++ ) + { + if(orowbrace) + out << orowbrace; + if( m.data ) + writeElems(out, m.ptr(i), m.cols, type, elembrace); + if(orowbrace) + out << crowbrace << (i+1 < m.rows ? ", " : ""); + if(i+1 < m.rows) + { + if(rowsep) + out << rowsep << (singleLine ? " " : ""); + if(!singleLine) + out << "\n "; + } + } +} + +class MatlabFormatter : public Formatter +{ +public: + virtual ~MatlabFormatter() {} + void write(std::ostream& out, const Mat& m, const int*, int) const + { + out << "["; + writeMat(out, m, ';', ' ', m.cols == 1); + out << "]"; + } + + void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const + { + writeElems(out, data, nelems, type, ' '); + } +}; + +class PythonFormatter : public Formatter +{ +public: + virtual ~PythonFormatter() {} + void write(std::ostream& out, const Mat& m, const int*, int) const + { + out << "["; + writeMat(out, m, m.cols > 1 ? '[' : ' ', '[', m.cols*m.channels() == 1); + out << "]"; + } + + void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const + { + writeElems(out, data, nelems, type, '['); + } +}; + + +class NumpyFormatter : public Formatter +{ +public: + virtual ~NumpyFormatter() {} + void write(std::ostream& out, const Mat& m, const int*, int) const + { + static const char* numpyTypes[] = + { + "uint8", "int8", "uint16", "int16", "int32", "float32", "float64", "uint64" + }; + out << "array(["; + writeMat(out, m, m.cols > 1 ? '[' : ' ', '[', m.cols*m.channels() == 1); + out << "], type='" << numpyTypes[m.depth()] << "')"; + } + + void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const + { + writeElems(out, data, nelems, type, '['); + } +}; + + +class CSVFormatter : public Formatter +{ +public: + virtual ~CSVFormatter() {} + void write(std::ostream& out, const Mat& m, const int*, int) const + { + writeMat(out, m, ' ', ' ', m.cols*m.channels() == 1); + if(m.rows > 1) + out << "\n"; + } + + void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const + { + writeElems(out, data, nelems, type, ' '); + } +}; + + +class CFormatter : public Formatter +{ +public: + virtual ~CFormatter() {} + void write(std::ostream& out, const Mat& m, const int*, int) const + { + out << "{"; + writeMat(out, m, ',', ' ', m.cols==1); + out << "}"; + } + + void write(std::ostream& out, const void* data, int nelems, int type, const int*, int) const + { + writeElems(out, data, nelems, type, ' '); + } +}; + + +static MatlabFormatter matlabFormatter; +static PythonFormatter pythonFormatter; +static NumpyFormatter numpyFormatter; +static CSVFormatter csvFormatter; +static CFormatter cFormatter; + +static const Formatter* g_defaultFormatter0 = &matlabFormatter; +static const Formatter* g_defaultFormatter = &matlabFormatter; + +bool my_streq(const char* a, const char* b) +{ + size_t i, alen = strlen(a), blen = strlen(b); + if( alen != blen ) + return false; + for( i = 0; i < alen; i++ ) + if( a[i] != b[i] && a[i] - 32 != b[i] ) + return false; + return true; +} + +const Formatter* Formatter::get(const char* fmt) +{ + if(!fmt || my_streq(fmt, "")) + return g_defaultFormatter; + if( my_streq(fmt, "MATLAB")) + return &matlabFormatter; + if( my_streq(fmt, "CSV")) + return &csvFormatter; + if( my_streq(fmt, "PYTHON")) + return &pythonFormatter; + if( my_streq(fmt, "NUMPY")) + return &numpyFormatter; + if( my_streq(fmt, "C")) + return &cFormatter; + CV_Error(CV_StsBadArg, "Unknown formatter"); + return g_defaultFormatter; +} + +const Formatter* Formatter::setDefault(const Formatter* fmt) +{ + const Formatter* prevFmt = g_defaultFormatter; + if(!fmt) + fmt = g_defaultFormatter0; + g_defaultFormatter = fmt; + return prevFmt; +} + +Formatted::Formatted(const Mat& _m, const Formatter* _fmt, + const vector& _params) +{ + mtx = _m; + fmt = _fmt ? _fmt : Formatter::get(); + std::copy(_params.begin(), _params.end(), back_inserter(params)); +} + +Formatted::Formatted(const Mat& _m, const Formatter* _fmt, const int* _params) +{ + mtx = _m; + fmt = _fmt ? _fmt : Formatter::get(); + + if( _params ) + { + int i, maxParams = 100; + for(i = 0; i < maxParams && _params[i] != 0; i+=2) + ; + std::copy(_params, _params + i, back_inserter(params)); + } +} + +} + diff --git a/opencv/core/persistence.cpp b/opencv/core/persistence.cpp new file mode 100644 index 0000000..6fd84e7 --- /dev/null +++ b/opencv/core/persistence.cpp @@ -0,0 +1,5408 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include +#include +#include "zlib.h" + +/****************************************************************************************\ +* Common macros and type definitions * +\****************************************************************************************/ + +#define cv_isprint(c) ((uchar)(c) >= (uchar)' ') +#define cv_isprint_or_tab(c) ((uchar)(c) >= (uchar)' ' || (c) == '\t') + +static inline bool cv_isalnum(char c) +{ + return ('0' <= c && c <= '9') || ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z'); +} + +static inline bool cv_isalpha(char c) +{ + return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z'); +} + +static inline bool cv_isdigit(char c) +{ + return '0' <= c && c <= '9'; +} + +static inline bool cv_isspace(char c) +{ + return (9 <= c && c <= 13) || c == ' '; +} + +static char* icv_itoa( int _val, char* buffer, int /*radix*/ ) +{ + const int radix = 10; + char* ptr=buffer + 23 /* enough even for 64-bit integers */; + unsigned val = abs(_val); + + *ptr = '\0'; + do + { + unsigned r = val / radix; + *--ptr = (char)(val - (r*radix) + '0'); + val = r; + } + while( val != 0 ); + + if( _val < 0 ) + *--ptr = '-'; + + return ptr; +} + +cv::string cv::FileStorage::getDefaultObjectName(const string& _filename) +{ + static const char* stubname = "unnamed"; + const char* filename = _filename.c_str(); + const char* ptr2 = filename + _filename.size(); + const char* ptr = ptr2 - 1; + cv::AutoBuffer name_buf(_filename.size()+1); + + while( ptr >= filename && *ptr != '\\' && *ptr != '/' && *ptr != ':' ) + { + if( *ptr == '.' && (!*ptr2 || strncmp(ptr2, ".gz", 3) == 0) ) + ptr2 = ptr; + ptr--; + } + ptr++; + if( ptr == ptr2 ) + CV_Error( CV_StsBadArg, "Invalid filename" ); + + char* name = name_buf; + + // name must start with letter or '_' + if( !cv_isalpha(*ptr) && *ptr!= '_' ){ + *name++ = '_'; + } + + while( ptr < ptr2 ) + { + char c = *ptr++; + if( !cv_isalnum(c) && c != '-' && c != '_' ) + c = '_'; + *name++ = c; + } + *name = '\0'; + name = name_buf; + if( strcmp( name, "_" ) == 0 ) + strcpy( name, stubname ); + return cv::string(name); +} + +namespace cv +{ +#ifndef ANDROID //unsuported wcstombs on android +string fromUtf16(const WString& str) +{ + cv::AutoBuffer _buf(str.size()*4 + 1); + char* buf = _buf; + + size_t sz = wcstombs(buf, str.c_str(), str.size()); + if( sz == (size_t)-1 ) + return string(); + buf[sz] = '\0'; + return string(buf); +} + +WString toUtf16(const string& str) +{ + cv::AutoBuffer _buf(str.size() + 1); + wchar_t* buf = _buf; + + size_t sz = mbstowcs(buf, str.c_str(), str.size()); + if( sz == (size_t)-1 ) + return WString(); + buf[sz] = '\0'; + return WString(buf); +} +#endif +} + +typedef struct CvGenericHash +{ + CV_SET_FIELDS() + int tab_size; + void** table; +} +CvGenericHash; + +typedef CvGenericHash CvStringHash; + +typedef struct CvFileMapNode +{ + CvFileNode value; + const CvStringHashNode* key; + struct CvFileMapNode* next; +} +CvFileMapNode; + +typedef struct CvXMLStackRecord +{ + CvMemStoragePos pos; + CvString struct_tag; + int struct_indent; + int struct_flags; +} +CvXMLStackRecord; + +#define CV_XML_OPENING_TAG 1 +#define CV_XML_CLOSING_TAG 2 +#define CV_XML_EMPTY_TAG 3 +#define CV_XML_HEADER_TAG 4 +#define CV_XML_DIRECTIVE_TAG 5 + +//typedef void (*CvParse)( struct CvFileStorage* fs ); +typedef void (*CvStartWriteStruct)( struct CvFileStorage* fs, const char* key, + int struct_flags, const char* type_name ); +typedef void (*CvEndWriteStruct)( struct CvFileStorage* fs ); +typedef void (*CvWriteInt)( struct CvFileStorage* fs, const char* key, int value ); +typedef void (*CvWriteReal)( struct CvFileStorage* fs, const char* key, double value ); +typedef void (*CvWriteString)( struct CvFileStorage* fs, const char* key, + const char* value, int quote ); +typedef void (*CvWriteComment)( struct CvFileStorage* fs, const char* comment, int eol_comment ); +typedef void (*CvStartNextStream)( struct CvFileStorage* fs ); + +typedef struct CvFileStorage +{ + int flags; + int is_xml; + int write_mode; + int is_first; + CvMemStorage* memstorage; + CvMemStorage* dststorage; + CvMemStorage* strstorage; + CvStringHash* str_hash; + CvSeq* roots; + CvSeq* write_stack; + int struct_indent; + int struct_flags; + CvString struct_tag; + int space; + char* filename; + FILE* file; + gzFile gzfile; + char* buffer; + char* buffer_start; + char* buffer_end; + int wrap_margin; + int lineno; + int dummy_eof; + const char* errmsg; + char errmsgbuf[128]; + + CvStartWriteStruct start_write_struct; + CvEndWriteStruct end_write_struct; + CvWriteInt write_int; + CvWriteReal write_real; + CvWriteString write_string; + CvWriteComment write_comment; + CvStartNextStream start_next_stream; + //CvParse parse; +} +CvFileStorage; + +static void icvPuts( CvFileStorage* fs, const char* str ) +{ + CV_Assert( fs->file || fs->gzfile ); + if( fs->file ) + fputs( str, fs->file ); + else + gzputs( fs->gzfile, str ); +} + +static char* icvGets( CvFileStorage* fs, char* str, int maxCount ) +{ + CV_Assert( fs->file || fs->gzfile ); + if( fs->file ) + return fgets( str, maxCount, fs->file ); + return gzgets( fs->gzfile, str, maxCount ); +} + +static int icvEof( CvFileStorage* fs ) +{ + CV_Assert( fs->file || fs->gzfile ); + if( fs->file ) + return feof(fs->file); + return gzeof(fs->gzfile); +} + +static void icvClose( CvFileStorage* fs ) +{ + if( fs->file ) + fclose( fs->file ); + if( fs->gzfile ) + gzclose( fs->gzfile ); + fs->file = 0; + fs->gzfile = 0; +} + +static void icvRewind( CvFileStorage* fs ) +{ + CV_Assert( fs->file || fs->gzfile ); + if( fs->file ) + rewind(fs->file); + else + gzrewind(fs->gzfile); +} + +#define CV_YML_INDENT 3 +#define CV_XML_INDENT 2 +#define CV_YML_INDENT_FLOW 1 +#define CV_FS_MAX_LEN 4096 + +#define CV_FILE_STORAGE ('Y' + ('A' << 8) + ('M' << 16) + ('L' << 24)) +#define CV_IS_FILE_STORAGE(fs) ((fs) != 0 && (fs)->flags == CV_FILE_STORAGE) + +#define CV_CHECK_FILE_STORAGE(fs) \ +{ \ + if( !CV_IS_FILE_STORAGE(fs) ) \ + CV_Error( (fs) ? CV_StsBadArg : CV_StsNullPtr, \ + "Invalid pointer to file storage" ); \ +} + +#define CV_CHECK_OUTPUT_FILE_STORAGE(fs) \ +{ \ + CV_CHECK_FILE_STORAGE(fs); \ + if( !fs->write_mode ) \ + CV_Error( CV_StsError, "The file storage is opened for reading" ); \ +} + +CV_IMPL const char* +cvAttrValue( const CvAttrList* attr, const char* attr_name ) +{ + while( attr && attr->attr ) + { + int i; + for( i = 0; attr->attr[i*2] != 0; i++ ) + { + if( strcmp( attr_name, attr->attr[i*2] ) == 0 ) + return attr->attr[i*2+1]; + } + attr = attr->next; + } + + return 0; +} + + +static CvGenericHash* +cvCreateMap( int flags, int header_size, int elem_size, + CvMemStorage* storage, int start_tab_size ) +{ + if( header_size < (int)sizeof(CvGenericHash) ) + CV_Error( CV_StsBadSize, "Too small map header_size" ); + + if( start_tab_size <= 0 ) + start_tab_size = 16; + + CvGenericHash* map = (CvGenericHash*)cvCreateSet( flags, header_size, elem_size, storage ); + + map->tab_size = start_tab_size; + start_tab_size *= sizeof(map->table[0]); + map->table = (void**)cvMemStorageAlloc( storage, start_tab_size ); + memset( map->table, 0, start_tab_size ); + + return map; +} + +#ifdef __GNUC__ +#define CV_PARSE_ERROR( errmsg ) \ + icvParseError( fs, __func__, (errmsg), __FILE__, __LINE__ ) +#else +#define CV_PARSE_ERROR( errmsg ) \ + icvParseError( fs, "", (errmsg), __FILE__, __LINE__ ) +#endif + +static void +icvParseError( CvFileStorage* fs, const char* func_name, + const char* err_msg, const char* source_file, int source_line ) +{ + char buf[1<<10]; + sprintf( buf, "%s(%d): %s", fs->filename, fs->lineno, err_msg ); + cvError( CV_StsParseError, func_name, buf, source_file, source_line ); +} + + +static void +icvFSCreateCollection( CvFileStorage* fs, int tag, CvFileNode* collection ) +{ + if( CV_NODE_IS_MAP(tag) ) + { + if( collection->tag != CV_NODE_NONE ) + { + assert( fs->is_xml != 0 ); + CV_PARSE_ERROR( "Sequence element should not have name (use <_>)" ); + } + + collection->data.map = cvCreateMap( 0, sizeof(CvFileNodeHash), + sizeof(CvFileMapNode), fs->memstorage, 16 ); + } + else + { + CvSeq* seq; + seq = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvFileNode), fs->memstorage ); + + // if contains some scalar element, add it to the newly created collection + if( CV_NODE_TYPE(collection->tag) != CV_NODE_NONE ) + cvSeqPush( seq, collection ); + + collection->data.seq = seq; + } + + collection->tag = tag; + cvSetSeqBlockSize( collection->data.seq, 8 ); +} + + +/*static void +icvFSReleaseCollection( CvSeq* seq ) +{ + if( seq ) + { + int is_map = CV_IS_SET(seq); + CvSeqReader reader; + int i, total = seq->total; + cvStartReadSeq( seq, &reader, 0 ); + + for( i = 0; i < total; i++ ) + { + CvFileNode* node = (CvFileNode*)reader.ptr; + + if( (!is_map || CV_IS_SET_ELEM( node )) && CV_NODE_IS_COLLECTION(node->tag) ) + { + if( CV_NODE_IS_USER(node->tag) && node->info && node->data.obj.decoded ) + cvRelease( (void**)&node->data.obj.decoded ); + if( !CV_NODE_SEQ_IS_SIMPLE( node->data.seq )) + icvFSReleaseCollection( node->data.seq ); + } + CV_NEXT_SEQ_ELEM( seq->elem_size, reader ); + } + } +}*/ + + +static char* +icvFSDoResize( CvFileStorage* fs, char* ptr, int len ) +{ + char* new_ptr = 0; + int written_len = (int)(ptr - fs->buffer_start); + int new_size = (int)((fs->buffer_end - fs->buffer_start)*3/2); + new_size = MAX( written_len + len, new_size ); + new_ptr = (char*)cvAlloc( new_size + 256 ); + fs->buffer = new_ptr + (fs->buffer - fs->buffer_start); + if( written_len > 0 ) + memcpy( new_ptr, fs->buffer_start, written_len ); + fs->buffer_start = new_ptr; + fs->buffer_end = fs->buffer_start + new_size; + new_ptr += written_len; + return new_ptr; +} + + +inline char* icvFSResizeWriteBuffer( CvFileStorage* fs, char* ptr, int len ) +{ + return ptr + len < fs->buffer_end ? ptr : icvFSDoResize( fs, ptr, len ); +} + + +static char* +icvFSFlush( CvFileStorage* fs ) +{ + char* ptr = fs->buffer; + int indent; + + if( ptr > fs->buffer_start + fs->space ) + { + ptr[0] = '\n'; + ptr[1] = '\0'; + icvPuts( fs, fs->buffer_start ); + fs->buffer = fs->buffer_start; + } + + indent = fs->struct_indent; + + if( fs->space != indent ) + { + if( fs->space < indent ) + memset( fs->buffer_start + fs->space, ' ', indent - fs->space ); + fs->space = indent; + } + + ptr = fs->buffer = fs->buffer_start + fs->space; + + return ptr; +} + + +/* closes file storage and deallocates buffers */ +CV_IMPL void +cvReleaseFileStorage( CvFileStorage** p_fs ) +{ + if( !p_fs ) + CV_Error( CV_StsNullPtr, "NULL double pointer to file storage" ); + + if( *p_fs ) + { + CvFileStorage* fs = *p_fs; + *p_fs = 0; + + if( fs->write_mode && (fs->file || fs->gzfile) ) + { + if( fs->write_stack ) + { + while( fs->write_stack->total > 0 ) + cvEndWriteStruct(fs); + } + icvFSFlush(fs); + if( fs->is_xml ) + icvPuts( fs, "\n" ); + } + + //icvFSReleaseCollection( fs->roots ); // delete all the user types recursively + + icvClose(fs); + + cvReleaseMemStorage( &fs->strstorage ); + + cvFree( &fs->buffer_start ); + cvReleaseMemStorage( &fs->memstorage ); + + memset( fs, 0, sizeof(*fs) ); + cvFree( &fs ); + } +} + + +#define CV_HASHVAL_SCALE 33 + +CV_IMPL CvStringHashNode* +cvGetHashedKey( CvFileStorage* fs, const char* str, int len, int create_missing ) +{ + CvStringHashNode* node = 0; + unsigned hashval = 0; + int i, tab_size; + CvStringHash* map = fs->str_hash; + + if( !fs ) + return 0; + + if( len < 0 ) + { + for( i = 0; str[i] != '\0'; i++ ) + hashval = hashval*CV_HASHVAL_SCALE + (unsigned char)str[i]; + len = i; + } + else for( i = 0; i < len; i++ ) + hashval = hashval*CV_HASHVAL_SCALE + (unsigned char)str[i]; + + hashval &= INT_MAX; + tab_size = map->tab_size; + if( (tab_size & (tab_size - 1)) == 0 ) + i = (int)(hashval & (tab_size - 1)); + else + i = (int)(hashval % tab_size); + + for( node = (CvStringHashNode*)(map->table[i]); node != 0; node = node->next ) + { + if( node->hashval == hashval && + node->str.len == len && + memcmp( node->str.ptr, str, len ) == 0 ) + break; + } + + if( !node && create_missing ) + { + node = (CvStringHashNode*)cvSetNew( (CvSet*)map ); + node->hashval = hashval; + node->str = cvMemStorageAllocString( map->storage, str, len ); + node->next = (CvStringHashNode*)(map->table[i]); + map->table[i] = node; + } + + return node; +} + + +CV_IMPL CvFileNode* +cvGetFileNode( CvFileStorage* fs, CvFileNode* _map_node, + const CvStringHashNode* key, + int create_missing ) +{ + CvFileNode* value = 0; + int k = 0, attempts = 1; + + if( !fs ) + return 0; + + CV_CHECK_FILE_STORAGE(fs); + + if( !key ) + CV_Error( CV_StsNullPtr, "Null key element" ); + + if( _map_node ) + { + if( !fs->roots ) + return 0; + attempts = fs->roots->total; + } + + for( k = 0; k < attempts; k++ ) + { + int i, tab_size; + CvFileNode* map_node = _map_node; + CvFileMapNode* another; + CvFileNodeHash* map; + + if( !map_node ) + map_node = (CvFileNode*)cvGetSeqElem( fs->roots, k ); + + if( !CV_NODE_IS_MAP(map_node->tag) ) + { + if( (!CV_NODE_IS_SEQ(map_node->tag) || map_node->data.seq->total != 0) && + CV_NODE_TYPE(map_node->tag) != CV_NODE_NONE ) + CV_Error( CV_StsError, "The node is neither a map nor an empty collection" ); + return 0; + } + + map = map_node->data.map; + tab_size = map->tab_size; + + if( (tab_size & (tab_size - 1)) == 0 ) + i = (int)(key->hashval & (tab_size - 1)); + else + i = (int)(key->hashval % tab_size); + + for( another = (CvFileMapNode*)(map->table[i]); another != 0; another = another->next ) + if( another->key == key ) + { + if( !create_missing ) + { + value = &another->value; + return value; + } + CV_PARSE_ERROR( "Duplicated key" ); + } + + if( k == attempts - 1 && create_missing ) + { + CvFileMapNode* node = (CvFileMapNode*)cvSetNew( (CvSet*)map ); + node->key = key; + + node->next = (CvFileMapNode*)(map->table[i]); + map->table[i] = node; + value = (CvFileNode*)node; + } + } + + return value; +} + + +CV_IMPL CvFileNode* +cvGetFileNodeByName( const CvFileStorage* fs, const CvFileNode* _map_node, const char* str ) +{ + CvFileNode* value = 0; + int i, len, tab_size; + unsigned hashval = 0; + int k = 0, attempts = 1; + + if( !fs ) + return 0; + + CV_CHECK_FILE_STORAGE(fs); + + if( !str ) + CV_Error( CV_StsNullPtr, "Null element name" ); + + for( i = 0; str[i] != '\0'; i++ ) + hashval = hashval*CV_HASHVAL_SCALE + (unsigned char)str[i]; + hashval &= INT_MAX; + len = i; + + if( !_map_node ) + { + if( !fs->roots ) + return 0; + attempts = fs->roots->total; + } + + for( k = 0; k < attempts; k++ ) + { + CvFileNodeHash* map; + const CvFileNode* map_node = _map_node; + CvFileMapNode* another; + + if( !map_node ) + map_node = (CvFileNode*)cvGetSeqElem( fs->roots, k ); + + if( !CV_NODE_IS_MAP(map_node->tag) ) + { + if( (!CV_NODE_IS_SEQ(map_node->tag) || map_node->data.seq->total != 0) && + CV_NODE_TYPE(map_node->tag) != CV_NODE_NONE ) + CV_Error( CV_StsError, "The node is neither a map nor an empty collection" ); + return 0; + } + + map = map_node->data.map; + tab_size = map->tab_size; + + if( (tab_size & (tab_size - 1)) == 0 ) + i = (int)(hashval & (tab_size - 1)); + else + i = (int)(hashval % tab_size); + + for( another = (CvFileMapNode*)(map->table[i]); another != 0; another = another->next ) + { + const CvStringHashNode* key = another->key; + + if( key->hashval == hashval && + key->str.len == len && + memcmp( key->str.ptr, str, len ) == 0 ) + { + value = &another->value; + return value; + } + } + } + + return value; +} + + +CV_IMPL CvFileNode* +cvGetRootFileNode( const CvFileStorage* fs, int stream_index ) +{ + CV_CHECK_FILE_STORAGE(fs); + + if( !fs->roots || (unsigned)stream_index >= (unsigned)fs->roots->total ) + return 0; + + return (CvFileNode*)cvGetSeqElem( fs->roots, stream_index ); +} + + +/* returns the sequence element by its index */ +/*CV_IMPL CvFileNode* +cvGetFileNodeFromSeq( CvFileStorage* fs, + CvFileNode* seq_node, int index ) +{ + CvFileNode* value = 0; + CvSeq* seq; + + if( !seq_node ) + seq = fs->roots; + else if( !CV_NODE_IS_SEQ(seq_node->tag) ) + { + if( CV_NODE_IS_MAP(seq_node->tag) ) + CV_Error( CV_StsError, "The node is map. Use cvGetFileNodeFromMap()." ); + if( CV_NODE_TYPE(seq_node->tag) == CV_NODE_NONE ) + CV_Error( CV_StsError, "The node is an empty object (None)." ); + if( index != 0 && index != -1 ) + CV_Error( CV_StsOutOfRange, "" ); + value = seq_node; + EXIT; + } + else + seq = seq_node->data.seq; + + if( !seq ) + CV_Error( CV_StsNullPtr, "The file storage is empty" ); + + value = (CvFileNode*)cvGetSeqElem( seq, index, 0 ); + + + + return value; +}*/ + + +static char* +icvDoubleToString( char* buf, double value ) +{ + Cv64suf val; + unsigned ieee754_hi; + + val.f = value; + ieee754_hi = (unsigned)(val.u >> 32); + + if( (ieee754_hi & 0x7ff00000) != 0x7ff00000 ) + { + int ivalue = cvRound(value); + if( ivalue == value ) + sprintf( buf, "%d.", ivalue ); + else + { + static const char* fmt = "%.16e"; + char* ptr = buf; + sprintf( buf, fmt, value ); + if( *ptr == '+' || *ptr == '-' ) + ptr++; + for( ; cv_isdigit(*ptr); ptr++ ) + ; + if( *ptr == ',' ) + *ptr = '.'; + } + } + else + { + unsigned ieee754_lo = (unsigned)val.u; + if( (ieee754_hi & 0x7fffffff) + (ieee754_lo != 0) > 0x7ff00000 ) + strcpy( buf, ".Nan" ); + else + strcpy( buf, (int)ieee754_hi < 0 ? "-.Inf" : ".Inf" ); + } + + return buf; +} + + +static char* +icvFloatToString( char* buf, float value ) +{ + Cv32suf val; + unsigned ieee754; + val.f = value; + ieee754 = val.u; + + if( (ieee754 & 0x7f800000) != 0x7f800000 ) + { + int ivalue = cvRound(value); + if( ivalue == value ) + sprintf( buf, "%d.", ivalue ); + else + { + static const char* fmt = "%.8e"; + char* ptr = buf; + sprintf( buf, fmt, value ); + if( *ptr == '+' || *ptr == '-' ) + ptr++; + for( ; cv_isdigit(*ptr); ptr++ ) + ; + if( *ptr == ',' ) + *ptr = '.'; + } + } + else + { + if( (ieee754 & 0x7fffffff) != 0x7f800000 ) + strcpy( buf, ".Nan" ); + else + strcpy( buf, (int)ieee754 < 0 ? "-.Inf" : ".Inf" ); + } + + return buf; +} + + +static void +icvProcessSpecialDouble( CvFileStorage* fs, char* buf, double* value, char** endptr ) +{ + char c = buf[0]; + int inf_hi = 0x7ff00000; + + if( c == '-' || c == '+' ) + { + inf_hi = c == '-' ? 0xfff00000 : 0x7ff00000; + c = *++buf; + } + + if( c != '.' ) + CV_PARSE_ERROR( "Bad format of floating-point constant" ); + + if( toupper(buf[1]) == 'I' && toupper(buf[2]) == 'N' && toupper(buf[3]) == 'F' ) + *(uint64*)value = ((uint64)inf_hi << 32); + else if( toupper(buf[1]) == 'N' && toupper(buf[2]) == 'A' && toupper(buf[3]) == 'N' ) + *(uint64*)value = (uint64)-1; + else + CV_PARSE_ERROR( "Bad format of floating-point constant" ); + + *endptr = buf + 4; +} + + +static double icv_strtod( CvFileStorage* fs, char* ptr, char** endptr ) +{ + double fval = strtod( ptr, endptr ); + if( **endptr == '.' ) + { + char* dot_pos = *endptr; + *dot_pos = ','; + double fval2 = strtod( ptr, endptr ); + *dot_pos = '.'; + if( *endptr > dot_pos ) + fval = fval2; + else + *endptr = dot_pos; + } + + if( *endptr == ptr || cv_isalpha(**endptr) ) + icvProcessSpecialDouble( fs, ptr, &fval, endptr ); + + return fval; +} + + +/****************************************************************************************\ +* YAML Parser * +\****************************************************************************************/ + +static char* +icvYMLSkipSpaces( CvFileStorage* fs, char* ptr, int min_indent, int max_comment_indent ) +{ + for(;;) + { + while( *ptr == ' ' ) + ptr++; + if( *ptr == '#' ) + { + if( ptr - fs->buffer_start > max_comment_indent ) + return ptr; + *ptr = '\0'; + } + else if( cv_isprint(*ptr) ) + { + if( ptr - fs->buffer_start < min_indent ) + CV_PARSE_ERROR( "Incorrect indentation" ); + break; + } + else if( *ptr == '\0' || *ptr == '\n' || *ptr == '\r' ) + { + int max_size = (int)(fs->buffer_end - fs->buffer_start); + ptr = icvGets( fs, fs->buffer_start, max_size ); + if( !ptr ) + { + // emulate end of stream + ptr = fs->buffer_start; + ptr[0] = ptr[1] = ptr[2] = '.'; + ptr[3] = '\0'; + fs->dummy_eof = 1; + break; + } + else + { + int l = (int)strlen(ptr); + if( ptr[l-1] != '\n' && ptr[l-1] != '\r' && !icvEof(fs) ) + CV_PARSE_ERROR( "Too long string or a last string w/o newline" ); + } + + fs->lineno++; + } + else + CV_PARSE_ERROR( *ptr == '\t' ? "Tabs are prohibited in YAML!" : "Invalid character" ); + } + + return ptr; +} + + +static char* +icvYMLParseKey( CvFileStorage* fs, char* ptr, + CvFileNode* map_node, CvFileNode** value_placeholder ) +{ + char c; + char *endptr = ptr - 1, *saveptr; + CvStringHashNode* str_hash_node; + + if( *ptr == '-' ) + CV_PARSE_ERROR( "Key may not start with \'-\'" ); + + do c = *++endptr; + while( cv_isprint(c) && c != ':' ); + + if( c != ':' ) + CV_PARSE_ERROR( "Missing \':\'" ); + + saveptr = endptr + 1; + do c = *--endptr; + while( c == ' ' ); + + ++endptr; + if( endptr == ptr ) + CV_PARSE_ERROR( "An empty key" ); + + str_hash_node = cvGetHashedKey( fs, ptr, (int)(endptr - ptr), 1 ); + *value_placeholder = cvGetFileNode( fs, map_node, str_hash_node, 1 ); + ptr = saveptr; + + return ptr; +} + + +static char* +icvYMLParseValue( CvFileStorage* fs, char* ptr, CvFileNode* node, + int parent_flags, int min_indent ) +{ + char buf[CV_FS_MAX_LEN + 1024]; + char* endptr = 0; + char c = ptr[0], d = ptr[1]; + int is_parent_flow = CV_NODE_IS_FLOW(parent_flags); + int value_type = CV_NODE_NONE; + int len; + + memset( node, 0, sizeof(*node) ); + + if( c == '!' ) // handle explicit type specification + { + if( d == '!' || d == '^' ) + { + ptr++; + value_type |= CV_NODE_USER; + } + + endptr = ptr++; + do d = *++endptr; + while( cv_isprint(d) && d != ' ' ); + len = (int)(endptr - ptr); + if( len == 0 ) + CV_PARSE_ERROR( "Empty type name" ); + d = *endptr; + *endptr = '\0'; + + if( len == 3 && !CV_NODE_IS_USER(value_type) ) + { + if( memcmp( ptr, "str", 3 ) == 0 ) + value_type = CV_NODE_STRING; + else if( memcmp( ptr, "int", 3 ) == 0 ) + value_type = CV_NODE_INT; + else if( memcmp( ptr, "seq", 3 ) == 0 ) + value_type = CV_NODE_SEQ; + else if( memcmp( ptr, "map", 3 ) == 0 ) + value_type = CV_NODE_MAP; + } + else if( len == 5 && !CV_NODE_IS_USER(value_type) ) + { + if( memcmp( ptr, "float", 5 ) == 0 ) + value_type = CV_NODE_REAL; + } + else if( CV_NODE_IS_USER(value_type) ) + { + node->info = cvFindType( ptr ); + if( !node->info ) + node->tag &= ~CV_NODE_USER; + } + + *endptr = d; + ptr = icvYMLSkipSpaces( fs, endptr, min_indent, INT_MAX ); + + c = *ptr; + + if( !CV_NODE_IS_USER(value_type) ) + { + if( value_type == CV_NODE_STRING && c != '\'' && c != '\"' ) + goto force_string; + if( value_type == CV_NODE_INT ) + goto force_int; + if( value_type == CV_NODE_REAL ) + goto force_real; + } + } + + if( cv_isdigit(c) || + ((c == '-' || c == '+') && (cv_isdigit(d) || d == '.')) || + (c == '.' && cv_isalnum(d))) // a number + { + double fval; + int ival; + endptr = ptr + (c == '-' || c == '+'); + while( cv_isdigit(*endptr) ) + endptr++; + if( *endptr == '.' || *endptr == 'e' ) + { +force_real: + fval = icv_strtod( fs, ptr, &endptr ); + /*if( endptr == ptr || cv_isalpha(*endptr) ) + icvProcessSpecialDouble( fs, endptr, &fval, &endptr ));*/ + + node->tag = CV_NODE_REAL; + node->data.f = fval; + } + else + { +force_int: + ival = (int)strtol( ptr, &endptr, 0 ); + node->tag = CV_NODE_INT; + node->data.i = ival; + } + + if( !endptr || endptr == ptr ) + CV_PARSE_ERROR( "Invalid numeric value (inconsistent explicit type specification?)" ); + + ptr = endptr; + } + else if( c == '\'' || c == '\"' ) // an explicit string + { + node->tag = CV_NODE_STRING; + if( c == '\'' ) + for( len = 0; len < CV_FS_MAX_LEN; ) + { + c = *++ptr; + if( cv_isalnum(c) || (c != '\'' && cv_isprint(c))) + buf[len++] = c; + else if( c == '\'' ) + { + c = *++ptr; + if( c != '\'' ) + break; + buf[len++] = c; + } + else + CV_PARSE_ERROR( "Invalid character" ); + } + else + for( len = 0; len < CV_FS_MAX_LEN; ) + { + c = *++ptr; + if( cv_isalnum(c) || (c != '\\' && c != '\"' && cv_isprint(c))) + buf[len++] = c; + else if( c == '\"' ) + { + ++ptr; + break; + } + else if( c == '\\' ) + { + d = *++ptr; + if( d == '\'' ) + buf[len++] = d; + else if( d == '\"' || d == '\\' || d == '\'' ) + buf[len++] = d; + else if( d == 'n' ) + buf[len++] = '\n'; + else if( d == 'r' ) + buf[len++] = '\r'; + else if( d == 't' ) + buf[len++] = '\t'; + else if( d == 'x' || (cv_isdigit(d) && d < '8') ) + { + int val, is_hex = d == 'x'; + c = ptr[3]; + ptr[3] = '\0'; + val = strtol( ptr + is_hex, &endptr, is_hex ? 8 : 16 ); + ptr[3] = c; + if( endptr == ptr + is_hex ) + buf[len++] = 'x'; + else + { + buf[len++] = (char)val; + ptr = endptr; + } + } + } + else + CV_PARSE_ERROR( "Invalid character" ); + } + + if( len >= CV_FS_MAX_LEN ) + CV_PARSE_ERROR( "Too long string literal" ); + + node->data.str = cvMemStorageAllocString( fs->memstorage, buf, len ); + } + else if( c == '[' || c == '{' ) // collection as a flow + { + int new_min_indent = min_indent + !is_parent_flow; + int struct_flags = CV_NODE_FLOW + (c == '{' ? CV_NODE_MAP : CV_NODE_SEQ); + int is_simple = 1; + + icvFSCreateCollection( fs, CV_NODE_TYPE(struct_flags) + + (node->info ? CV_NODE_USER : 0), node ); + + d = c == '[' ? ']' : '}'; + + for( ++ptr ;;) + { + CvFileNode* elem = 0; + + ptr = icvYMLSkipSpaces( fs, ptr, new_min_indent, INT_MAX ); + if( *ptr == '}' || *ptr == ']' ) + { + if( *ptr != d ) + CV_PARSE_ERROR( "The wrong closing bracket" ); + ptr++; + break; + } + + if( node->data.seq->total != 0 ) + { + if( *ptr != ',' ) + CV_PARSE_ERROR( "Missing , between the elements" ); + ptr = icvYMLSkipSpaces( fs, ptr + 1, new_min_indent, INT_MAX ); + } + + if( CV_NODE_IS_MAP(struct_flags) ) + { + ptr = icvYMLParseKey( fs, ptr, node, &elem ); + ptr = icvYMLSkipSpaces( fs, ptr, new_min_indent, INT_MAX ); + } + else + { + if( *ptr == ']' ) + break; + elem = (CvFileNode*)cvSeqPush( node->data.seq, 0 ); + } + ptr = icvYMLParseValue( fs, ptr, elem, struct_flags, new_min_indent ); + if( CV_NODE_IS_MAP(struct_flags) ) + elem->tag |= CV_NODE_NAMED; + is_simple &= !CV_NODE_IS_COLLECTION(elem->tag); + } + node->data.seq->flags |= is_simple ? CV_NODE_SEQ_SIMPLE : 0; + } + else + { + int indent, struct_flags, is_simple; + + if( is_parent_flow || c != '-' ) + { + // implicit (one-line) string or nested block-style collection + if( !is_parent_flow ) + { + if( c == '?' ) + CV_PARSE_ERROR( "Complex keys are not supported" ); + if( c == '|' || c == '>' ) + CV_PARSE_ERROR( "Multi-line text literals are not supported" ); + } + +force_string: + endptr = ptr - 1; + + do c = *++endptr; + while( cv_isprint(c) && + (!is_parent_flow || (c != ',' && c != '}' && c != ']')) && + (is_parent_flow || c != ':' || value_type == CV_NODE_STRING)); + + if( endptr == ptr ) + CV_PARSE_ERROR( "Invalid character" ); + + if( is_parent_flow || c != ':' ) + { + char* str_end = endptr; + node->tag = CV_NODE_STRING; + // strip spaces in the end of string + do c = *--str_end; + while( str_end > ptr && c == ' ' ); + str_end++; + node->data.str = cvMemStorageAllocString( fs->memstorage, ptr, (int)(str_end - ptr) ); + ptr = endptr; + return ptr; + } + struct_flags = CV_NODE_MAP; + } + else + struct_flags = CV_NODE_SEQ; + + icvFSCreateCollection( fs, struct_flags + + (node->info ? CV_NODE_USER : 0), node ); + + indent = (int)(ptr - fs->buffer_start); + is_simple = 1; + + for(;;) + { + CvFileNode* elem = 0; + + if( CV_NODE_IS_MAP(struct_flags) ) + { + ptr = icvYMLParseKey( fs, ptr, node, &elem ); + } + else + { + c = *ptr++; + if( c != '-' ) + CV_PARSE_ERROR( "Block sequence elements must be preceded with \'-\'" ); + + elem = (CvFileNode*)cvSeqPush( node->data.seq, 0 ); + } + + ptr = icvYMLSkipSpaces( fs, ptr, indent + 1, INT_MAX ); + ptr = icvYMLParseValue( fs, ptr, elem, struct_flags, indent + 1 ); + if( CV_NODE_IS_MAP(struct_flags) ) + elem->tag |= CV_NODE_NAMED; + is_simple &= !CV_NODE_IS_COLLECTION(elem->tag); + + ptr = icvYMLSkipSpaces( fs, ptr, 0, INT_MAX ); + if( ptr - fs->buffer_start != indent ) + { + if( ptr - fs->buffer_start < indent ) + break; + else + CV_PARSE_ERROR( "Incorrect indentation" ); + } + if( memcmp( ptr, "...", 3 ) == 0 ) + break; + } + + node->data.seq->flags |= is_simple ? CV_NODE_SEQ_SIMPLE : 0; + } + + return ptr; +} + + +static void +icvYMLParse( CvFileStorage* fs ) +{ + char* ptr = fs->buffer_start; + int is_first = 1; + + for(;;) + { + // 0. skip leading comments and directives and ... + // 1. reach the first item + for(;;) + { + ptr = icvYMLSkipSpaces( fs, ptr, 0, INT_MAX ); + if( !ptr ) + return; + + if( *ptr == '%' ) + { + if( memcmp( ptr, "%YAML:", 6 ) == 0 && + memcmp( ptr, "%YAML:1.", 8 ) != 0 ) + CV_PARSE_ERROR( "Unsupported YAML version (it must be 1.x)" ); + *ptr = '\0'; + } + else if( *ptr == '-' ) + { + if( memcmp(ptr, "---", 3) == 0 ) + { + ptr += 3; + break; + } + else if( is_first ) + break; + } + else if( cv_isalnum(*ptr) || *ptr=='_') + { + if( !is_first ) + CV_PARSE_ERROR( "The YAML streams must start with '---', except the first one" ); + break; + } + else if( fs->dummy_eof ) + break; + else + CV_PARSE_ERROR( "Invalid or unsupported syntax" ); + } + + ptr = icvYMLSkipSpaces( fs, ptr, 0, INT_MAX ); + if( memcmp( ptr, "...", 3 ) != 0 ) + { + // 2. parse the collection + CvFileNode* root_node = (CvFileNode*)cvSeqPush( fs->roots, 0 ); + + ptr = icvYMLParseValue( fs, ptr, root_node, CV_NODE_NONE, 0 ); + if( !CV_NODE_IS_COLLECTION(root_node->tag) ) + CV_PARSE_ERROR( "Only collections as YAML streams are supported by this parser" ); + + // 3. parse until the end of file or next collection + ptr = icvYMLSkipSpaces( fs, ptr, 0, INT_MAX ); + if( !ptr ) + return; + } + + if( fs->dummy_eof ) + break; + ptr += 3; + is_first = 0; + } +} + + +/****************************************************************************************\ +* YAML Emitter * +\****************************************************************************************/ + +static void +icvYMLWrite( CvFileStorage* fs, const char* key, const char* data ) +{ + int i, keylen = 0; + int datalen = 0; + int struct_flags; + char* ptr; + + struct_flags = fs->struct_flags; + + if( key && key[0] == '\0' ) + key = 0; + + if( CV_NODE_IS_COLLECTION(struct_flags) ) + { + if( (CV_NODE_IS_MAP(struct_flags) ^ (key != 0)) ) + CV_Error( CV_StsBadArg, "An attempt to add element without a key to a map, " + "or add element with key to sequence" ); + } + else + { + fs->is_first = 0; + struct_flags = CV_NODE_EMPTY | (key ? CV_NODE_MAP : CV_NODE_SEQ); + } + + if( key ) + { + keylen = (int)strlen(key); + if( keylen == 0 ) + CV_Error( CV_StsBadArg, "The key is an empty" ); + + if( keylen > CV_FS_MAX_LEN ) + CV_Error( CV_StsBadArg, "The key is too long" ); + } + + if( data ) + datalen = (int)strlen(data); + + if( CV_NODE_IS_FLOW(struct_flags) ) + { + int new_offset; + ptr = fs->buffer; + if( !CV_NODE_IS_EMPTY(struct_flags) ) + *ptr++ = ','; + new_offset = (int)(ptr - fs->buffer_start) + keylen + datalen; + if( new_offset > fs->wrap_margin && new_offset - fs->struct_indent > 10 ) + { + fs->buffer = ptr; + ptr = icvFSFlush(fs); + } + else + *ptr++ = ' '; + } + else + { + ptr = icvFSFlush(fs); + if( !CV_NODE_IS_MAP(struct_flags) ) + { + *ptr++ = '-'; + if( data ) + *ptr++ = ' '; + } + } + + if( key ) + { + if( !cv_isalpha(key[0]) && key[0] != '_' ) + CV_Error( CV_StsBadArg, "Key must start with a letter or _" ); + + ptr = icvFSResizeWriteBuffer( fs, ptr, keylen ); + + for( i = 0; i < keylen; i++ ) + { + char c = key[i]; + + ptr[i] = c; + if( !cv_isalnum(c) && c != '-' && c != '_' && c != ' ' ) + CV_Error( CV_StsBadArg, "Key names may only contain alphanumeric characters [a-zA-Z0-9], '-', '_' and ' '" ); + } + + ptr += keylen; + *ptr++ = ':'; + if( !CV_NODE_IS_FLOW(struct_flags) && data ) + *ptr++ = ' '; + } + + if( data ) + { + ptr = icvFSResizeWriteBuffer( fs, ptr, datalen ); + memcpy( ptr, data, datalen ); + ptr += datalen; + } + + fs->buffer = ptr; + fs->struct_flags = struct_flags & ~CV_NODE_EMPTY; +} + + +static void +icvYMLStartWriteStruct( CvFileStorage* fs, const char* key, int struct_flags, + const char* type_name CV_DEFAULT(0)) +{ + int parent_flags; + char buf[CV_FS_MAX_LEN + 1024]; + const char* data = 0; + + struct_flags = (struct_flags & (CV_NODE_TYPE_MASK|CV_NODE_FLOW)) | CV_NODE_EMPTY; + if( !CV_NODE_IS_COLLECTION(struct_flags)) + CV_Error( CV_StsBadArg, + "Some collection type - CV_NODE_SEQ or CV_NODE_MAP, must be specified" ); + + if( CV_NODE_IS_FLOW(struct_flags) ) + { + char c = CV_NODE_IS_MAP(struct_flags) ? '{' : '['; + struct_flags |= CV_NODE_FLOW; + + if( type_name ) + sprintf( buf, "!!%s %c", type_name, c ); + else + { + buf[0] = c; + buf[1] = '\0'; + } + data = buf; + } + else if( type_name ) + { + sprintf( buf, "!!%s", type_name ); + data = buf; + } + + icvYMLWrite( fs, key, data ); + + parent_flags = fs->struct_flags; + cvSeqPush( fs->write_stack, &parent_flags ); + fs->struct_flags = struct_flags; + + if( !CV_NODE_IS_FLOW(parent_flags) ) + fs->struct_indent += CV_YML_INDENT + CV_NODE_IS_FLOW(struct_flags); +} + + +static void +icvYMLEndWriteStruct( CvFileStorage* fs ) +{ + int parent_flags = 0, struct_flags; + char* ptr; + + struct_flags = fs->struct_flags; + if( fs->write_stack->total == 0 ) + CV_Error( CV_StsError, "EndWriteStruct w/o matching StartWriteStruct" ); + + cvSeqPop( fs->write_stack, &parent_flags ); + + if( CV_NODE_IS_FLOW(struct_flags) ) + { + ptr = fs->buffer; + if( ptr > fs->buffer_start + fs->struct_indent && !CV_NODE_IS_EMPTY(struct_flags) ) + *ptr++ = ' '; + *ptr++ = CV_NODE_IS_MAP(struct_flags) ? '}' : ']'; + fs->buffer = ptr; + } + else if( CV_NODE_IS_EMPTY(struct_flags) ) + { + ptr = icvFSFlush(fs); + memcpy( ptr, CV_NODE_IS_MAP(struct_flags) ? "{}" : "[]", 2 ); + fs->buffer = ptr + 2; + } + + if( !CV_NODE_IS_FLOW(parent_flags) ) + fs->struct_indent -= CV_YML_INDENT + CV_NODE_IS_FLOW(struct_flags); + assert( fs->struct_indent >= 0 ); + + fs->struct_flags = parent_flags; +} + + +static void +icvYMLStartNextStream( CvFileStorage* fs ) +{ + if( !fs->is_first ) + { + while( fs->write_stack->total > 0 ) + icvYMLEndWriteStruct(fs); + + fs->struct_indent = 0; + icvFSFlush(fs); + icvPuts( fs, "...\n" ); + icvPuts( fs, "---\n" ); + fs->buffer = fs->buffer_start; + } +} + + +static void +icvYMLWriteInt( CvFileStorage* fs, const char* key, int value ) +{ + char buf[128]; + icvYMLWrite( fs, key, icv_itoa( value, buf, 10 )); +} + + +static void +icvYMLWriteReal( CvFileStorage* fs, const char* key, double value ) +{ + char buf[128]; + icvYMLWrite( fs, key, icvDoubleToString( buf, value )); +} + + +static void +icvYMLWriteString( CvFileStorage* fs, const char* key, + const char* str, int quote CV_DEFAULT(0)) +{ + char buf[CV_FS_MAX_LEN*4+16]; + char* data = (char*)str; + int i, len; + + if( !str ) + CV_Error( CV_StsNullPtr, "Null string pointer" ); + + len = (int)strlen(str); + if( len > CV_FS_MAX_LEN ) + CV_Error( CV_StsBadArg, "The written string is too long" ); + + if( quote || len == 0 || str[0] != str[len-1] || (str[0] != '\"' && str[0] != '\'') ) + { + int need_quote = quote || len == 0; + data = buf; + *data++ = '\"'; + for( i = 0; i < len; i++ ) + { + char c = str[i]; + + if( !need_quote && !cv_isalnum(c) && c != '_' && c != ' ' && c != '-' && + c != '(' && c != ')' && c != '/' && c != '+' && c != ';' ) + need_quote = 1; + + if( !cv_isalnum(c) && (!cv_isprint(c) || c == '\\' || c == '\'' || c == '\"') ) + { + *data++ = '\\'; + if( cv_isprint(c) ) + *data++ = c; + else if( c == '\n' ) + *data++ = 'n'; + else if( c == '\r' ) + *data++ = 'r'; + else if( c == '\t' ) + *data++ = 't'; + else + { + sprintf( data, "x%02x", c ); + data += 3; + } + } + else + *data++ = c; + } + if( !need_quote && (cv_isdigit(str[0]) || + str[0] == '+' || str[0] == '-' || str[0] == '.' )) + need_quote = 1; + + if( need_quote ) + *data++ = '\"'; + *data++ = '\0'; + data = buf + !need_quote; + } + + icvYMLWrite( fs, key, data ); +} + + +static void +icvYMLWriteComment( CvFileStorage* fs, const char* comment, int eol_comment ) +{ + int len; //, indent; + int multiline; + const char* eol; + char* ptr; + + if( !comment ) + CV_Error( CV_StsNullPtr, "Null comment" ); + + len = (int)strlen(comment); + eol = strchr(comment, '\n'); + multiline = eol != 0; + ptr = fs->buffer; + + if( !eol_comment || multiline || + fs->buffer_end - ptr < len || ptr == fs->buffer_start ) + ptr = icvFSFlush( fs ); + else + *ptr++ = ' '; + + while( comment ) + { + *ptr++ = '#'; + *ptr++ = ' '; + if( eol ) + { + ptr = icvFSResizeWriteBuffer( fs, ptr, (int)(eol - comment) + 1 ); + memcpy( ptr, comment, eol - comment + 1 ); + fs->buffer = ptr + (eol - comment); + comment = eol + 1; + eol = strchr( comment, '\n' ); + } + else + { + len = (int)strlen(comment); + ptr = icvFSResizeWriteBuffer( fs, ptr, len ); + memcpy( ptr, comment, len ); + fs->buffer = ptr + len; + comment = 0; + } + ptr = icvFSFlush( fs ); + } +} + + +/****************************************************************************************\ +* XML Parser * +\****************************************************************************************/ + +#define CV_XML_INSIDE_COMMENT 1 +#define CV_XML_INSIDE_TAG 2 +#define CV_XML_INSIDE_DIRECTIVE 3 + +static char* +icvXMLSkipSpaces( CvFileStorage* fs, char* ptr, int mode ) +{ + int level = 0; + + for(;;) + { + char c; + ptr--; + + if( mode == CV_XML_INSIDE_COMMENT ) + { + do c = *++ptr; + while( cv_isprint_or_tab(c) && (c != '-' || ptr[1] != '-' || ptr[2] != '>') ); + + if( c == '-' ) + { + assert( ptr[1] == '-' && ptr[2] == '>' ); + mode = 0; + ptr += 3; + } + } + else if( mode == CV_XML_INSIDE_DIRECTIVE ) + { + // !!!NOTE!!! This is not quite correct, but should work in most cases + do + { + c = *++ptr; + level += c == '<'; + level -= c == '>'; + if( level < 0 ) + return ptr; + } while( cv_isprint_or_tab(c) ); + } + else + { + do c = *++ptr; + while( c == ' ' || c == '\t' ); + + if( c == '<' && ptr[1] == '!' && ptr[2] == '-' && ptr[3] == '-' ) + { + if( mode != 0 ) + CV_PARSE_ERROR( "Comments are not allowed here" ); + mode = CV_XML_INSIDE_COMMENT; + ptr += 4; + } + else if( cv_isprint(c) ) + break; + } + + if( !cv_isprint(*ptr) ) + { + int max_size = (int)(fs->buffer_end - fs->buffer_start); + if( *ptr != '\0' && *ptr != '\n' && *ptr != '\r' ) + CV_PARSE_ERROR( "Invalid character in the stream" ); + ptr = icvGets( fs, fs->buffer_start, max_size ); + if( !ptr ) + { + ptr = fs->buffer_start; + *ptr = '\0'; + fs->dummy_eof = 1; + break; + } + else + { + int l = (int)strlen(ptr); + if( ptr[l-1] != '\n' && ptr[l-1] != '\r' && !icvEof(fs) ) + CV_PARSE_ERROR( "Too long string or a last string w/o newline" ); + } + fs->lineno++; + } + } + return ptr; +} + + +static char* +icvXMLParseTag( CvFileStorage* fs, char* ptr, CvStringHashNode** _tag, + CvAttrList** _list, int* _tag_type ); + +static char* +icvXMLParseValue( CvFileStorage* fs, char* ptr, CvFileNode* node, + int value_type CV_DEFAULT(CV_NODE_NONE)) +{ + CvFileNode *elem = node; + int have_space = 1, is_simple = 1; + int is_user_type = CV_NODE_IS_USER(value_type); + memset( node, 0, sizeof(*node) ); + + value_type = CV_NODE_TYPE(value_type); + + for(;;) + { + char c = *ptr, d; + char* endptr; + + if( cv_isspace(c) || c == '\0' || (c == '<' && ptr[1] == '!' && ptr[2] == '-') ) + { + ptr = icvXMLSkipSpaces( fs, ptr, 0 ); + have_space = 1; + c = *ptr; + } + + d = ptr[1]; + + if( c =='<' || c == '\0' ) + { + CvStringHashNode *key = 0, *key2 = 0; + CvAttrList* list = 0; + CvTypeInfo* info = 0; + int tag_type = 0; + int is_noname = 0; + const char* type_name = 0; + int elem_type = CV_NODE_NONE; + + if( d == '/' || c == '\0' ) + break; + + ptr = icvXMLParseTag( fs, ptr, &key, &list, &tag_type ); + + if( tag_type == CV_XML_DIRECTIVE_TAG ) + CV_PARSE_ERROR( "Directive tags are not allowed here" ); + if( tag_type == CV_XML_EMPTY_TAG ) + CV_PARSE_ERROR( "Empty tags are not supported" ); + + assert( tag_type == CV_XML_OPENING_TAG ); + + type_name = list ? cvAttrValue( list, "type_id" ) : 0; + if( type_name ) + { + if( strcmp( type_name, "str" ) == 0 ) + elem_type = CV_NODE_STRING; + else if( strcmp( type_name, "map" ) == 0 ) + elem_type = CV_NODE_MAP; + else if( strcmp( type_name, "seq" ) == 0 ) + elem_type = CV_NODE_SEQ; + else + { + info = cvFindType( type_name ); + if( info ) + elem_type = CV_NODE_USER; + } + } + + is_noname = key->str.len == 1 && key->str.ptr[0] == '_'; + if( !CV_NODE_IS_COLLECTION(node->tag) ) + { + icvFSCreateCollection( fs, is_noname ? CV_NODE_SEQ : CV_NODE_MAP, node ); + } + else if( is_noname ^ CV_NODE_IS_SEQ(node->tag) ) + CV_PARSE_ERROR( is_noname ? "Map element should have a name" : + "Sequence element should not have name (use <_>)" ); + + if( is_noname ) + elem = (CvFileNode*)cvSeqPush( node->data.seq, 0 ); + else + elem = cvGetFileNode( fs, node, key, 1 ); + + ptr = icvXMLParseValue( fs, ptr, elem, elem_type); + if( !is_noname ) + elem->tag |= CV_NODE_NAMED; + is_simple &= !CV_NODE_IS_COLLECTION(elem->tag); + elem->info = info; + ptr = icvXMLParseTag( fs, ptr, &key2, &list, &tag_type ); + if( tag_type != CV_XML_CLOSING_TAG || key2 != key ) + CV_PARSE_ERROR( "Mismatched closing tag" ); + have_space = 1; + } + else + { + if( !have_space ) + CV_PARSE_ERROR( "There should be space between literals" ); + + elem = node; + if( node->tag != CV_NODE_NONE ) + { + if( !CV_NODE_IS_COLLECTION(node->tag) ) + icvFSCreateCollection( fs, CV_NODE_SEQ, node ); + + elem = (CvFileNode*)cvSeqPush( node->data.seq, 0 ); + elem->info = 0; + } + + if( value_type != CV_NODE_STRING && + (cv_isdigit(c) || ((c == '-' || c == '+') && + (cv_isdigit(d) || d == '.')) || (c == '.' && cv_isalnum(d))) ) // a number + { + double fval; + int ival; + endptr = ptr + (c == '-' || c == '+'); + while( cv_isdigit(*endptr) ) + endptr++; + if( *endptr == '.' || *endptr == 'e' ) + { + fval = icv_strtod( fs, ptr, &endptr ); + /*if( endptr == ptr || cv_isalpha(*endptr) ) + icvProcessSpecialDouble( fs, ptr, &fval, &endptr ));*/ + elem->tag = CV_NODE_REAL; + elem->data.f = fval; + } + else + { + ival = (int)strtol( ptr, &endptr, 0 ); + elem->tag = CV_NODE_INT; + elem->data.i = ival; + } + + if( endptr == ptr ) + CV_PARSE_ERROR( "Invalid numeric value (inconsistent explicit type specification?)" ); + + ptr = endptr; + } + else + { + // string + char buf[CV_FS_MAX_LEN+16]; + int i = 0, len, is_quoted = 0; + elem->tag = CV_NODE_STRING; + if( c == '\"' ) + is_quoted = 1; + else + --ptr; + + for( ;; ) + { + c = *++ptr; + if( !cv_isalnum(c) ) + { + if( c == '\"' ) + { + if( !is_quoted ) + CV_PARSE_ERROR( "Literal \" is not allowed within a string. Use "" ); + ++ptr; + break; + } + else if( !cv_isprint(c) || c == '<' || (!is_quoted && cv_isspace(c))) + { + if( is_quoted ) + CV_PARSE_ERROR( "Closing \" is expected" ); + break; + } + else if( c == '\'' || c == '>' ) + { + CV_PARSE_ERROR( "Literal \' or > are not allowed. Use ' or >" ); + } + else if( c == '&' ) + { + if( *++ptr == '#' ) + { + int val, base = 10; + ptr++; + if( *ptr == 'x' ) + { + base = 16; + ptr++; + } + val = (int)strtol( ptr, &endptr, base ); + if( (unsigned)val > (unsigned)255 || + !endptr || *endptr != ';' ) + CV_PARSE_ERROR( "Invalid numeric value in the string" ); + c = (char)val; + } + else + { + endptr = ptr; + do c = *++endptr; + while( cv_isalnum(c) ); + if( c != ';' ) + CV_PARSE_ERROR( "Invalid character in the symbol entity name" ); + len = (int)(endptr - ptr); + if( len == 2 && memcmp( ptr, "lt", len ) == 0 ) + c = '<'; + else if( len == 2 && memcmp( ptr, "gt", len ) == 0 ) + c = '>'; + else if( len == 3 && memcmp( ptr, "amp", len ) == 0 ) + c = '&'; + else if( len == 4 && memcmp( ptr, "apos", len ) == 0 ) + c = '\''; + else if( len == 4 && memcmp( ptr, "quot", len ) == 0 ) + c = '\"'; + else + { + memcpy( buf + i, ptr-1, len + 2 ); + i += len + 2; + } + } + ptr = endptr; + } + } + buf[i++] = c; + if( i >= CV_FS_MAX_LEN ) + CV_PARSE_ERROR( "Too long string literal" ); + } + elem->data.str = cvMemStorageAllocString( fs->memstorage, buf, i ); + } + + if( !CV_NODE_IS_COLLECTION(value_type) && value_type != CV_NODE_NONE ) + break; + have_space = 0; + } + } + + if( (CV_NODE_TYPE(node->tag) == CV_NODE_NONE || + (CV_NODE_TYPE(node->tag) != value_type && + !CV_NODE_IS_COLLECTION(node->tag))) && + CV_NODE_IS_COLLECTION(value_type) ) + { + icvFSCreateCollection( fs, CV_NODE_IS_MAP(value_type) ? + CV_NODE_MAP : CV_NODE_SEQ, node ); + } + + if( value_type != CV_NODE_NONE && + value_type != CV_NODE_TYPE(node->tag) ) + CV_PARSE_ERROR( "The actual type is different from the specified type" ); + + if( CV_NODE_IS_COLLECTION(node->tag) && is_simple ) + node->data.seq->flags |= CV_NODE_SEQ_SIMPLE; + + node->tag |= is_user_type ? CV_NODE_USER : 0; + return ptr; +} + + +static char* +icvXMLParseTag( CvFileStorage* fs, char* ptr, CvStringHashNode** _tag, + CvAttrList** _list, int* _tag_type ) +{ + int tag_type = 0; + CvStringHashNode* tagname = 0; + CvAttrList *first = 0, *last = 0; + int count = 0, max_count = 4; + int attr_buf_size = (max_count*2 + 1)*sizeof(char*) + sizeof(CvAttrList); + char* endptr; + char c; + int have_space; + + if( *ptr == '\0' ) + CV_PARSE_ERROR( "Preliminary end of the stream" ); + + if( *ptr != '<' ) + CV_PARSE_ERROR( "Tag should start with \'<\'" ); + + ptr++; + if( cv_isalnum(*ptr) || *ptr == '_' ) + tag_type = CV_XML_OPENING_TAG; + else if( *ptr == '/' ) + { + tag_type = CV_XML_CLOSING_TAG; + ptr++; + } + else if( *ptr == '?' ) + { + tag_type = CV_XML_HEADER_TAG; + ptr++; + } + else if( *ptr == '!' ) + { + tag_type = CV_XML_DIRECTIVE_TAG; + assert( ptr[1] != '-' || ptr[2] != '-' ); + ptr++; + } + else + CV_PARSE_ERROR( "Unknown tag type" ); + + for(;;) + { + CvStringHashNode* attrname; + + if( !cv_isalpha(*ptr) && *ptr != '_' ) + CV_PARSE_ERROR( "Name should start with a letter or underscore" ); + + endptr = ptr - 1; + do c = *++endptr; + while( cv_isalnum(c) || c == '_' || c == '-' ); + + attrname = cvGetHashedKey( fs, ptr, (int)(endptr - ptr), 1 ); + ptr = endptr; + + if( !tagname ) + tagname = attrname; + else + { + if( tag_type == CV_XML_CLOSING_TAG ) + CV_PARSE_ERROR( "Closing tag should not contain any attributes" ); + + if( !last || count >= max_count ) + { + CvAttrList* chunk; + + chunk = (CvAttrList*)cvMemStorageAlloc( fs->memstorage, attr_buf_size ); + memset( chunk, 0, attr_buf_size ); + chunk->attr = (const char**)(chunk + 1); + count = 0; + if( !last ) + first = last = chunk; + else + last = last->next = chunk; + } + last->attr[count*2] = attrname->str.ptr; + } + + if( last ) + { + CvFileNode stub; + + if( *ptr != '=' ) + { + ptr = icvXMLSkipSpaces( fs, ptr, CV_XML_INSIDE_TAG ); + if( *ptr != '=' ) + CV_PARSE_ERROR( "Attribute name should be followed by \'=\'" ); + } + + c = *++ptr; + if( c != '\"' && c != '\'' ) + { + ptr = icvXMLSkipSpaces( fs, ptr, CV_XML_INSIDE_TAG ); + if( *ptr != '\"' && *ptr != '\'' ) + CV_PARSE_ERROR( "Attribute value should be put into single or double quotes" ); + } + + ptr = icvXMLParseValue( fs, ptr, &stub, CV_NODE_STRING ); + assert( stub.tag == CV_NODE_STRING ); + last->attr[count*2+1] = stub.data.str.ptr; + count++; + } + + c = *ptr; + have_space = cv_isspace(c) || c == '\0'; + + if( c != '>' ) + { + ptr = icvXMLSkipSpaces( fs, ptr, CV_XML_INSIDE_TAG ); + c = *ptr; + } + + if( c == '>' ) + { + if( tag_type == CV_XML_HEADER_TAG ) + CV_PARSE_ERROR( "Invalid closing tag for ' ) + CV_PARSE_ERROR( "Invalid closing tag for ' && tag_type == CV_XML_OPENING_TAG ) + { + tag_type = CV_XML_EMPTY_TAG; + ptr += 2; + break; + } + + if( !have_space ) + CV_PARSE_ERROR( "There should be space between attributes" ); + } + + *_tag = tagname; + *_tag_type = tag_type; + *_list = first; + + return ptr; +} + + +static void +icvXMLParse( CvFileStorage* fs ) +{ + char* ptr = fs->buffer_start; + CvStringHashNode *key = 0, *key2 = 0; + CvAttrList* list = 0; + int tag_type = 0; + + // CV_XML_INSIDE_TAG is used to prohibit leading comments + ptr = icvXMLSkipSpaces( fs, ptr, CV_XML_INSIDE_TAG ); + + if( memcmp( ptr, "\'" ); + + ptr = icvXMLParseTag( fs, ptr, &key, &list, &tag_type ); + + /*{ + const char* version = cvAttrValue( list, "version" ); + if( version && strncmp( version, "1.", 2 ) != 0 ) + CV_Error( CV_StsParseError, "Unsupported version of XML" ); + }*/ + // we support any 8-bit encoding, so we do not need to check the actual encoding. + // we do not support utf-16, but in the case of utf-16 we will not get here anyway. + /*{ + const char* encoding = cvAttrValue( list, "encoding" ); + if( encoding && strcmp( encoding, "ASCII" ) != 0 && + strcmp( encoding, "UTF-8" ) != 0 && + strcmp( encoding, "utf-8" ) != 0 ) + CV_PARSE_ERROR( "Unsupported encoding" ); + }*/ + + while( *ptr != '\0' ) + { + ptr = icvXMLSkipSpaces( fs, ptr, 0 ); + + if( *ptr != '\0' ) + { + CvFileNode* root_node; + ptr = icvXMLParseTag( fs, ptr, &key, &list, &tag_type ); + if( tag_type != CV_XML_OPENING_TAG || + strcmp(key->str.ptr,"opencv_storage") != 0 ) + CV_PARSE_ERROR( " tag is missing" ); + + root_node = (CvFileNode*)cvSeqPush( fs->roots, 0 ); + ptr = icvXMLParseValue( fs, ptr, root_node, CV_NODE_NONE ); + ptr = icvXMLParseTag( fs, ptr, &key2, &list, &tag_type ); + if( tag_type != CV_XML_CLOSING_TAG || key != key2 ) + CV_PARSE_ERROR( " tag is missing" ); + ptr = icvXMLSkipSpaces( fs, ptr, 0 ); + } + } + + assert( fs->dummy_eof != 0 ); +} + + +/****************************************************************************************\ +* XML Emitter * +\****************************************************************************************/ + +#define icvXMLFlush icvFSFlush + +static void +icvXMLWriteTag( CvFileStorage* fs, const char* key, int tag_type, CvAttrList list ) +{ + char* ptr = fs->buffer; + int i, len = 0; + int struct_flags = fs->struct_flags; + + if( key && key[0] == '\0' ) + key = 0; + + if( tag_type == CV_XML_OPENING_TAG || tag_type == CV_XML_EMPTY_TAG ) + { + if( CV_NODE_IS_COLLECTION(struct_flags) ) + { + if( CV_NODE_IS_MAP(struct_flags) ^ (key != 0) ) + CV_Error( CV_StsBadArg, "An attempt to add element without a key to a map, " + "or add element with key to sequence" ); + } + else + { + struct_flags = CV_NODE_EMPTY + (key ? CV_NODE_MAP : CV_NODE_SEQ); + fs->is_first = 0; + } + + if( !CV_NODE_IS_EMPTY(struct_flags) ) + ptr = icvXMLFlush(fs); + } + + if( !key ) + key = "_"; + else if( key[0] == '_' && key[1] == '\0' ) + CV_Error( CV_StsBadArg, "A single _ is a reserved tag name" ); + + len = (int)strlen( key ); + *ptr++ = '<'; + if( tag_type == CV_XML_CLOSING_TAG ) + { + if( list.attr ) + CV_Error( CV_StsBadArg, "Closing tag should not include any attributes" ); + *ptr++ = '/'; + } + + if( !cv_isalpha(key[0]) && key[0] != '_' ) + CV_Error( CV_StsBadArg, "Key should start with a letter or _" ); + + ptr = icvFSResizeWriteBuffer( fs, ptr, len ); + for( i = 0; i < len; i++ ) + { + char c = key[i]; + if( !cv_isalnum(c) && c != '_' && c != '-' ) + CV_Error( CV_StsBadArg, "Key name may only contain alphanumeric characters [a-zA-Z0-9], '-' and '_'" ); + ptr[i] = c; + } + ptr += len; + + for(;;) + { + const char** attr = list.attr; + + for( ; attr && attr[0] != 0; attr += 2 ) + { + int len0 = (int)strlen(attr[0]); + int len1 = (int)strlen(attr[1]); + + ptr = icvFSResizeWriteBuffer( fs, ptr, len0 + len1 + 4 ); + *ptr++ = ' '; + memcpy( ptr, attr[0], len0 ); + ptr += len0; + *ptr++ = '='; + *ptr++ = '\"'; + memcpy( ptr, attr[1], len1 ); + ptr += len1; + *ptr++ = '\"'; + } + if( !list.next ) + break; + list = *list.next; + } + + if( tag_type == CV_XML_EMPTY_TAG ) + *ptr++ = '/'; + *ptr++ = '>'; + fs->buffer = ptr; + fs->struct_flags = struct_flags & ~CV_NODE_EMPTY; +} + + +static void +icvXMLStartWriteStruct( CvFileStorage* fs, const char* key, int struct_flags, + const char* type_name CV_DEFAULT(0)) +{ + CvXMLStackRecord parent; + const char* attr[10]; + int idx = 0; + + struct_flags = (struct_flags & (CV_NODE_TYPE_MASK|CV_NODE_FLOW)) | CV_NODE_EMPTY; + if( !CV_NODE_IS_COLLECTION(struct_flags)) + CV_Error( CV_StsBadArg, + "Some collection type: CV_NODE_SEQ or CV_NODE_MAP must be specified" ); + + if( type_name ) + { + attr[idx++] = "type_id"; + attr[idx++] = type_name; + } + attr[idx++] = 0; + + icvXMLWriteTag( fs, key, CV_XML_OPENING_TAG, cvAttrList(attr,0) ); + + parent.struct_flags = fs->struct_flags & ~CV_NODE_EMPTY; + parent.struct_indent = fs->struct_indent; + parent.struct_tag = fs->struct_tag; + cvSaveMemStoragePos( fs->strstorage, &parent.pos ); + cvSeqPush( fs->write_stack, &parent ); + + fs->struct_indent += CV_XML_INDENT; + if( !CV_NODE_IS_FLOW(struct_flags) ) + icvXMLFlush( fs ); + + fs->struct_flags = struct_flags; + if( key ) + { + fs->struct_tag = cvMemStorageAllocString( fs->strstorage, (char*)key, -1 ); + } + else + { + fs->struct_tag.ptr = 0; + fs->struct_tag.len = 0; + } +} + + +static void +icvXMLEndWriteStruct( CvFileStorage* fs ) +{ + CvXMLStackRecord parent; + + if( fs->write_stack->total == 0 ) + CV_Error( CV_StsError, "An extra closing tag" ); + + icvXMLWriteTag( fs, fs->struct_tag.ptr, CV_XML_CLOSING_TAG, cvAttrList(0,0) ); + cvSeqPop( fs->write_stack, &parent ); + + fs->struct_indent = parent.struct_indent; + fs->struct_flags = parent.struct_flags; + fs->struct_tag = parent.struct_tag; + cvRestoreMemStoragePos( fs->strstorage, &parent.pos ); +} + + +static void +icvXMLStartNextStream( CvFileStorage* fs ) +{ + if( !fs->is_first ) + { + while( fs->write_stack->total > 0 ) + icvXMLEndWriteStruct(fs); + + fs->struct_indent = 0; + icvXMLFlush(fs); + /* XML does not allow multiple top-level elements, + so we just put a comment and continue + the current (and the only) "stream" */ + icvPuts( fs, "\n\n" ); + /*fputs( "\n", fs->file ); + fputs( "\n", fs->file );*/ + fs->buffer = fs->buffer_start; + } +} + + +static void +icvXMLWriteScalar( CvFileStorage* fs, const char* key, const char* data, int len ) +{ + if( CV_NODE_IS_MAP(fs->struct_flags) || + (!CV_NODE_IS_COLLECTION(fs->struct_flags) && key) ) + { + icvXMLWriteTag( fs, key, CV_XML_OPENING_TAG, cvAttrList(0,0) ); + char* ptr = icvFSResizeWriteBuffer( fs, fs->buffer, len ); + memcpy( ptr, data, len ); + fs->buffer = ptr + len; + icvXMLWriteTag( fs, key, CV_XML_CLOSING_TAG, cvAttrList(0,0) ); + } + else + { + char* ptr = fs->buffer; + int new_offset = (int)(ptr - fs->buffer_start) + len; + + if( key ) + CV_Error( CV_StsBadArg, "elements with keys can not be written to sequence" ); + + fs->struct_flags = CV_NODE_SEQ; + + if( (new_offset > fs->wrap_margin && new_offset - fs->struct_indent > 10) || + (ptr > fs->buffer_start && ptr[-1] == '>' && !CV_NODE_IS_EMPTY(fs->struct_flags)) ) + { + ptr = icvXMLFlush(fs); + } + else if( ptr > fs->buffer_start + fs->struct_indent && ptr[-1] != '>' ) + *ptr++ = ' '; + + memcpy( ptr, data, len ); + fs->buffer = ptr + len; + } +} + + +static void +icvXMLWriteInt( CvFileStorage* fs, const char* key, int value ) +{ + char buf[128], *ptr = icv_itoa( value, buf, 10 ); + int len = (int)strlen(ptr); + icvXMLWriteScalar( fs, key, ptr, len ); +} + + +static void +icvXMLWriteReal( CvFileStorage* fs, const char* key, double value ) +{ + char buf[128]; + int len = (int)strlen( icvDoubleToString( buf, value )); + icvXMLWriteScalar( fs, key, buf, len ); +} + + +static void +icvXMLWriteString( CvFileStorage* fs, const char* key, const char* str, int quote ) +{ + char buf[CV_FS_MAX_LEN*6+16]; + char* data = (char*)str; + int i, len; + + if( !str ) + CV_Error( CV_StsNullPtr, "Null string pointer" ); + + len = (int)strlen(str); + if( len > CV_FS_MAX_LEN ) + CV_Error( CV_StsBadArg, "The written string is too long" ); + + if( quote || len == 0 || str[0] != '\"' || str[0] != str[len-1] ) + { + int need_quote = quote || len == 0; + data = buf; + *data++ = '\"'; + for( i = 0; i < len; i++ ) + { + char c = str[i]; + + if( (uchar)c >= 128 || c == ' ' ) + { + *data++ = c; + need_quote = 1; + } + else if( !cv_isprint(c) || c == '<' || c == '>' || c == '&' || c == '\'' || c == '\"' ) + { + *data++ = '&'; + if( c == '<' ) + { + memcpy(data, "lt", 2); + data += 2; + } + else if( c == '>' ) + { + memcpy(data, "gt", 2); + data += 2; + } + else if( c == '&' ) + { + memcpy(data, "amp", 3); + data += 3; + } + else if( c == '\'' ) + { + memcpy(data, "apos", 4); + data += 4; + } + else if( c == '\"' ) + { + memcpy( data, "quot", 4); + data += 4; + } + else + { + sprintf( data, "#x%02x", (uchar)c ); + data += 4; + } + *data++ = ';'; + need_quote = 1; + } + else + *data++ = c; + } + if( !need_quote && (cv_isdigit(str[0]) || + str[0] == '+' || str[0] == '-' || str[0] == '.' )) + need_quote = 1; + + if( need_quote ) + *data++ = '\"'; + len = (int)(data - buf) - !need_quote; + *data++ = '\0'; + data = buf + !need_quote; + } + + icvXMLWriteScalar( fs, key, data, len ); +} + + +static void +icvXMLWriteComment( CvFileStorage* fs, const char* comment, int eol_comment ) +{ + int len; + int multiline; + const char* eol; + char* ptr; + + if( !comment ) + CV_Error( CV_StsNullPtr, "Null comment" ); + + if( strstr(comment, "--") != 0 ) + CV_Error( CV_StsBadArg, "Double hyphen \'--\' is not allowed in the comments" ); + + len = (int)strlen(comment); + eol = strchr(comment, '\n'); + multiline = eol != 0; + ptr = fs->buffer; + + if( multiline || !eol_comment || fs->buffer_end - ptr < len + 5 ) + ptr = icvXMLFlush( fs ); + else if( ptr > fs->buffer_start + fs->struct_indent ) + *ptr++ = ' '; + + if( !multiline ) + { + ptr = icvFSResizeWriteBuffer( fs, ptr, len + 9 ); + sprintf( ptr, "", comment ); + len = (int)strlen(ptr); + } + else + { + strcpy( ptr, "" ); + fs->buffer = ptr + 3; + icvXMLFlush( fs ); + } +} + + +/****************************************************************************************\ +* Common High-Level Functions * +\****************************************************************************************/ + +CV_IMPL CvFileStorage* +cvOpenFileStorage( const char* filename, CvMemStorage* dststorage, int flags, const char* encoding ) +{ + CvFileStorage* fs = 0; + char* xml_buf = 0; + int default_block_size = 1 << 18; + bool append = (flags & 3) == CV_STORAGE_APPEND; + bool isGZ = false; + + if( !filename ) + CV_Error( CV_StsNullPtr, "NULL filename" ); + + fs = (CvFileStorage*)cvAlloc( sizeof(*fs) ); + memset( fs, 0, sizeof(*fs)); + + fs->memstorage = cvCreateMemStorage( default_block_size ); + fs->dststorage = dststorage ? dststorage : fs->memstorage; + + int fnamelen = (int)strlen(filename); + if( !fnamelen ) + CV_Error( CV_StsError, "Empty filename" ); + + fs->filename = (char*)cvMemStorageAlloc( fs->memstorage, fnamelen+1 ); + strcpy( fs->filename, filename ); + + char* dot_pos = strrchr(fs->filename, '.'); + char compression = '\0'; + + if( dot_pos && dot_pos[1] == 'g' && dot_pos[2] == 'z' && + (dot_pos[3] == '\0' || (cv_isdigit(dot_pos[3]) && dot_pos[4] == '\0')) ) + { + if( append ) + CV_Error(CV_StsNotImplemented, "Appending data to compressed file is not implemented" ); + isGZ = true; + compression = dot_pos[3]; + if( compression ) + dot_pos[3] = '\0', fnamelen--; + } + + fs->flags = CV_FILE_STORAGE; + fs->write_mode = (flags & 3) != 0; + + if( !isGZ ) + { + fs->file = fopen(fs->filename, !fs->write_mode ? "rt" : !append ? "wt" : "a+t" ); + if( !fs->file ) + goto _exit_; + } + else + { + char mode[] = { fs->write_mode ? 'w' : 'r', 'b', compression ? compression : '3', '\0' }; + fs->gzfile = gzopen(fs->filename, mode); + if( !fs->gzfile ) + goto _exit_; + } + + fs->roots = 0; + fs->struct_indent = 0; + fs->struct_flags = 0; + fs->wrap_margin = 71; + + if( fs->write_mode ) + { + // we use factor=6 for XML (the longest characters (' and ") are encoded with 6 bytes (' and ") + // and factor=4 for YAML ( as we use 4 bytes for non ASCII characters (e.g. \xAB)) + int buf_size = CV_FS_MAX_LEN*(fs->is_xml ? 6 : 4) + 1024; + + dot_pos = fs->filename + fnamelen - (isGZ ? 7 : 4); + fs->is_xml = dot_pos > fs->filename && (memcmp( dot_pos, ".xml", 4) == 0 || + memcmp(dot_pos, ".XML", 4) == 0 || memcmp(dot_pos, ".Xml", 4) == 0); + + if( append ) + fseek( fs->file, 0, SEEK_END ); + + fs->write_stack = cvCreateSeq( 0, sizeof(CvSeq), fs->is_xml ? + sizeof(CvXMLStackRecord) : sizeof(int), fs->memstorage ); + fs->is_first = 1; + fs->struct_indent = 0; + fs->struct_flags = CV_NODE_EMPTY; + fs->buffer_start = fs->buffer = (char*)cvAlloc( buf_size + 1024 ); + fs->buffer_end = fs->buffer_start + buf_size; + if( fs->is_xml ) + { + int file_size = fs->file ? (int)ftell( fs->file ) : 0; + fs->strstorage = cvCreateChildMemStorage( fs->memstorage ); + if( !append || file_size == 0 ) + { + if( encoding ) + { + if( strcmp( encoding, "UTF-16" ) == 0 || + strcmp( encoding, "utf-16" ) == 0 || + strcmp( encoding, "Utf-16" ) == 0 ) + CV_Error( CV_StsBadArg, "UTF-16 XML encoding is not supported! Use 8-bit encoding\n"); + + CV_Assert( strlen(encoding) < 1000 ); + char buf[1100]; + sprintf(buf, "\n", encoding); + icvPuts( fs, buf ); + } + else + icvPuts( fs, "\n" ); + icvPuts( fs, "\n" ); + } + else + { + int xml_buf_size = 1 << 10; + char substr[] = ""; + int last_occurence = -1; + xml_buf_size = MIN(xml_buf_size, file_size); + fseek( fs->file, -xml_buf_size, SEEK_END ); + xml_buf = (char*)cvAlloc( xml_buf_size+2 ); + // find the last occurence of + for(;;) + { + int line_offset = ftell( fs->file ); + char* ptr0 = icvGets( fs, xml_buf, xml_buf_size ), *ptr; + if( !ptr0 ) + break; + ptr = ptr0; + for(;;) + { + ptr = strstr( ptr, substr ); + if( !ptr ) + break; + last_occurence = line_offset + (int)(ptr - ptr0); + ptr += strlen(substr); + } + } + if( last_occurence < 0 ) + CV_Error( CV_StsError, "Could not find in the end of file.\n" ); + icvClose( fs ); + fs->file = fopen( fs->filename, "r+t" ); + fseek( fs->file, last_occurence, SEEK_SET ); + // replace the last "" with " ", which has the same length + icvPuts( fs, " " ); + fseek( fs->file, 0, SEEK_END ); + icvPuts( fs, "\n" ); + } + fs->start_write_struct = icvXMLStartWriteStruct; + fs->end_write_struct = icvXMLEndWriteStruct; + fs->write_int = icvXMLWriteInt; + fs->write_real = icvXMLWriteReal; + fs->write_string = icvXMLWriteString; + fs->write_comment = icvXMLWriteComment; + fs->start_next_stream = icvXMLStartNextStream; + } + else + { + if( !append ) + icvPuts( fs, "%YAML:1.0\n" ); + else + icvPuts( fs, "...\n---\n" ); + fs->start_write_struct = icvYMLStartWriteStruct; + fs->end_write_struct = icvYMLEndWriteStruct; + fs->write_int = icvYMLWriteInt; + fs->write_real = icvYMLWriteReal; + fs->write_string = icvYMLWriteString; + fs->write_comment = icvYMLWriteComment; + fs->start_next_stream = icvYMLStartNextStream; + } + } + else + { + int buf_size = 1 << 20; + const char* yaml_signature = "%YAML:"; + char buf[16]; + icvGets( fs, buf, sizeof(buf)-2 ); + fs->is_xml = strncmp( buf, yaml_signature, strlen(yaml_signature) ) != 0; + + if( !isGZ ) + { + fseek( fs->file, 0, SEEK_END ); + buf_size = ftell( fs->file ); + buf_size = MIN( buf_size, (1 << 20) ); + buf_size = MAX( buf_size, CV_FS_MAX_LEN*2 + 1024 ); + } + icvRewind(fs); + + fs->str_hash = cvCreateMap( 0, sizeof(CvStringHash), + sizeof(CvStringHashNode), fs->memstorage, 256 ); + + fs->roots = cvCreateSeq( 0, sizeof(CvSeq), + sizeof(CvFileNode), fs->memstorage ); + + fs->buffer = fs->buffer_start = (char*)cvAlloc( buf_size + 256 ); + fs->buffer_end = fs->buffer_start + buf_size; + fs->buffer[0] = '\n'; + fs->buffer[1] = '\0'; + + //mode = cvGetErrMode(); + //cvSetErrMode( CV_ErrModeSilent ); + if( fs->is_xml ) + icvXMLParse( fs ); + else + icvYMLParse( fs ); + //cvSetErrMode( mode ); + + // release resources that we do not need anymore + cvFree( &fs->buffer_start ); + fs->buffer = fs->buffer_end = 0; + } +_exit_: + if( fs ) + { + if( cvGetErrStatus() < 0 || (!fs->file && !fs->gzfile) ) + { + cvReleaseFileStorage( &fs ); + } + else if( !fs->write_mode ) + { + icvClose(fs); + } + } + + cvFree( &xml_buf ); + return fs; +} + + +CV_IMPL void +cvStartWriteStruct( CvFileStorage* fs, const char* key, int struct_flags, + const char* type_name, CvAttrList /*attributes*/ ) +{ + CV_CHECK_OUTPUT_FILE_STORAGE(fs); + fs->start_write_struct( fs, key, struct_flags, type_name ); +} + + +CV_IMPL void +cvEndWriteStruct( CvFileStorage* fs ) +{ + CV_CHECK_OUTPUT_FILE_STORAGE(fs); + fs->end_write_struct( fs ); +} + + +CV_IMPL void +cvWriteInt( CvFileStorage* fs, const char* key, int value ) +{ + CV_CHECK_OUTPUT_FILE_STORAGE(fs); + fs->write_int( fs, key, value ); +} + + +CV_IMPL void +cvWriteReal( CvFileStorage* fs, const char* key, double value ) +{ + CV_CHECK_OUTPUT_FILE_STORAGE(fs); + fs->write_real( fs, key, value ); +} + + +CV_IMPL void +cvWriteString( CvFileStorage* fs, const char* key, const char* value, int quote ) +{ + CV_CHECK_OUTPUT_FILE_STORAGE(fs); + fs->write_string( fs, key, value, quote ); +} + + +CV_IMPL void +cvWriteComment( CvFileStorage* fs, const char* comment, int eol_comment ) +{ + CV_CHECK_OUTPUT_FILE_STORAGE(fs); + fs->write_comment( fs, comment, eol_comment ); +} + + +CV_IMPL void +cvStartNextStream( CvFileStorage* fs ) +{ + CV_CHECK_OUTPUT_FILE_STORAGE(fs); + fs->start_next_stream( fs ); +} + + +static const char icvTypeSymbol[] = "ucwsifdr"; +#define CV_FS_MAX_FMT_PAIRS 128 + +static char* +icvEncodeFormat( int elem_type, char* dt ) +{ + sprintf( dt, "%d%c", CV_MAT_CN(elem_type), icvTypeSymbol[CV_MAT_DEPTH(elem_type)] ); + return dt + ( dt[2] == '\0' && dt[0] == '1' ); +} + +static int +icvDecodeFormat( const char* dt, int* fmt_pairs, int max_len ) +{ + int fmt_pair_count = 0; + int i = 0, k = 0, len = dt ? (int)strlen(dt) : 0; + + if( !dt || !len ) + return 0; + + assert( fmt_pairs != 0 && max_len > 0 ); + fmt_pairs[0] = 0; + max_len *= 2; + + for( ; k < len; k++ ) + { + char c = dt[k]; + + if( cv_isdigit(c) ) + { + int count = c - '0'; + if( cv_isdigit(dt[k+1]) ) + { + char* endptr = 0; + count = (int)strtol( dt+k, &endptr, 10 ); + k = (int)(endptr - dt) - 1; + } + + if( count <= 0 ) + CV_Error( CV_StsBadArg, "Invalid data type specification" ); + + fmt_pairs[i] = count; + } + else + { + const char* pos = strchr( icvTypeSymbol, c ); + if( !pos ) + CV_Error( CV_StsBadArg, "Invalid data type specification" ); + if( fmt_pairs[i] == 0 ) + fmt_pairs[i] = 1; + fmt_pairs[i+1] = (int)(pos - icvTypeSymbol); + if( i > 0 && fmt_pairs[i+1] == fmt_pairs[i-1] ) + fmt_pairs[i-2] += fmt_pairs[i]; + else + { + i += 2; + if( i >= max_len ) + CV_Error( CV_StsBadArg, "Too long data type specification" ); + } + fmt_pairs[i] = 0; + } + } + + fmt_pair_count = i/2; + return fmt_pair_count; +} + + +static int +icvCalcElemSize( const char* dt, int initial_size ) +{ + int size = 0; + int fmt_pairs[CV_FS_MAX_FMT_PAIRS], i, fmt_pair_count; + int comp_size; + + fmt_pair_count = icvDecodeFormat( dt, fmt_pairs, CV_FS_MAX_FMT_PAIRS ); + fmt_pair_count *= 2; + for( i = 0, size = initial_size; i < fmt_pair_count; i += 2 ) + { + comp_size = CV_ELEM_SIZE(fmt_pairs[i+1]); + size = cvAlign( size, comp_size ); + size += comp_size * fmt_pairs[i]; + } + if( initial_size == 0 ) + { + comp_size = CV_ELEM_SIZE(fmt_pairs[1]); + size = cvAlign( size, comp_size ); + } + return size; +} + + +static int +icvDecodeSimpleFormat( const char* dt ) +{ + int elem_type = -1; + int fmt_pairs[CV_FS_MAX_FMT_PAIRS], fmt_pair_count; + + fmt_pair_count = icvDecodeFormat( dt, fmt_pairs, CV_FS_MAX_FMT_PAIRS ); + if( fmt_pair_count != 1 || fmt_pairs[0] > 4 ) + CV_Error( CV_StsError, "Too complex format for the matrix" ); + + elem_type = CV_MAKETYPE( fmt_pairs[1], fmt_pairs[0] ); + + return elem_type; +} + + +CV_IMPL void +cvWriteRawData( CvFileStorage* fs, const void* _data, int len, const char* dt ) +{ + const char* data0 = (const char*)_data; + int offset = 0; + int fmt_pairs[CV_FS_MAX_FMT_PAIRS*2], k, fmt_pair_count; + char buf[256] = ""; + + CV_CHECK_OUTPUT_FILE_STORAGE( fs ); + + if( !data0 ) + CV_Error( CV_StsNullPtr, "Null data pointer" ); + + if( len < 0 ) + CV_Error( CV_StsOutOfRange, "Negative number of elements" ); + + fmt_pair_count = icvDecodeFormat( dt, fmt_pairs, CV_FS_MAX_FMT_PAIRS ); + + if( !len ) + return; + + if( fmt_pair_count == 1 ) + { + fmt_pairs[0] *= len; + len = 1; + } + + for(;len--;) + { + for( k = 0; k < fmt_pair_count; k++ ) + { + int i, count = fmt_pairs[k*2]; + int elem_type = fmt_pairs[k*2+1]; + int elem_size = CV_ELEM_SIZE(elem_type); + const char* data, *ptr; + + offset = cvAlign( offset, elem_size ); + data = data0 + offset; + + for( i = 0; i < count; i++ ) + { + switch( elem_type ) + { + case CV_8U: + ptr = icv_itoa( *(uchar*)data, buf, 10 ); + data++; + break; + case CV_8S: + ptr = icv_itoa( *(char*)data, buf, 10 ); + data++; + break; + case CV_16U: + ptr = icv_itoa( *(ushort*)data, buf, 10 ); + data += sizeof(ushort); + break; + case CV_16S: + ptr = icv_itoa( *(short*)data, buf, 10 ); + data += sizeof(short); + break; + case CV_32S: + ptr = icv_itoa( *(int*)data, buf, 10 ); + data += sizeof(int); + break; + case CV_32F: + ptr = icvFloatToString( buf, *(float*)data ); + data += sizeof(float); + break; + case CV_64F: + ptr = icvDoubleToString( buf, *(double*)data ); + data += sizeof(double); + break; + case CV_USRTYPE1: /* reference */ + ptr = icv_itoa( (int)*(size_t*)data, buf, 10 ); + data += sizeof(size_t); + break; + default: + assert(0); + return; + } + + if( fs->is_xml ) + { + int buf_len = (int)strlen(ptr); + icvXMLWriteScalar( fs, 0, ptr, buf_len ); + } + else + icvYMLWrite( fs, 0, ptr ); + } + + offset = (int)(data - data0); + } + } +} + + +CV_IMPL void +cvStartReadRawData( const CvFileStorage* fs, const CvFileNode* src, CvSeqReader* reader ) +{ + int node_type; + CV_CHECK_FILE_STORAGE( fs ); + + if( !src || !reader ) + CV_Error( CV_StsNullPtr, "Null pointer to source file node or reader" ); + + node_type = CV_NODE_TYPE(src->tag); + if( node_type == CV_NODE_INT || node_type == CV_NODE_REAL ) + { + // emulate reading from 1-element sequence + reader->ptr = (schar*)src; + reader->block_max = reader->ptr + sizeof(*src)*2; + reader->block_min = reader->ptr; + reader->seq = 0; + } + else if( node_type == CV_NODE_SEQ ) + { + cvStartReadSeq( src->data.seq, reader, 0 ); + } + else if( node_type == CV_NODE_NONE ) + { + memset( reader, 0, sizeof(*reader) ); + } + else + CV_Error( CV_StsBadArg, "The file node should be a numerical scalar or a sequence" ); +} + + +CV_IMPL void +cvReadRawDataSlice( const CvFileStorage* fs, CvSeqReader* reader, + int len, void* _data, const char* dt ) +{ + char* data0 = (char*)_data; + int fmt_pairs[CV_FS_MAX_FMT_PAIRS*2], k = 0, fmt_pair_count; + int i = 0, offset = 0, count = 0; + + CV_CHECK_FILE_STORAGE( fs ); + + if( !reader || !data0 ) + CV_Error( CV_StsNullPtr, "Null pointer to reader or destination array" ); + + if( !reader->seq && len != 1 ) + CV_Error( CV_StsBadSize, "The readed sequence is a scalar, thus len must be 1" ); + + fmt_pair_count = icvDecodeFormat( dt, fmt_pairs, CV_FS_MAX_FMT_PAIRS ); + + for(;;) + { + for( k = 0; k < fmt_pair_count; k++ ) + { + int elem_type = fmt_pairs[k*2+1]; + int elem_size = CV_ELEM_SIZE(elem_type); + char* data; + + count = fmt_pairs[k*2]; + offset = cvAlign( offset, elem_size ); + data = data0 + offset; + + for( i = 0; i < count; i++ ) + { + CvFileNode* node = (CvFileNode*)reader->ptr; + if( CV_NODE_IS_INT(node->tag) ) + { + int ival = node->data.i; + + switch( elem_type ) + { + case CV_8U: + *(uchar*)data = CV_CAST_8U(ival); + data++; + break; + case CV_8S: + *(char*)data = CV_CAST_8S(ival); + data++; + break; + case CV_16U: + *(ushort*)data = CV_CAST_16U(ival); + data += sizeof(ushort); + break; + case CV_16S: + *(short*)data = CV_CAST_16S(ival); + data += sizeof(short); + break; + case CV_32S: + *(int*)data = ival; + data += sizeof(int); + break; + case CV_32F: + *(float*)data = (float)ival; + data += sizeof(float); + break; + case CV_64F: + *(double*)data = (double)ival; + data += sizeof(double); + break; + case CV_USRTYPE1: /* reference */ + *(size_t*)data = ival; + data += sizeof(size_t); + break; + default: + assert(0); + return; + } + } + else if( CV_NODE_IS_REAL(node->tag) ) + { + double fval = node->data.f; + int ival; + + switch( elem_type ) + { + case CV_8U: + ival = cvRound(fval); + *(uchar*)data = CV_CAST_8U(ival); + data++; + break; + case CV_8S: + ival = cvRound(fval); + *(char*)data = CV_CAST_8S(ival); + data++; + break; + case CV_16U: + ival = cvRound(fval); + *(ushort*)data = CV_CAST_16U(ival); + data += sizeof(ushort); + break; + case CV_16S: + ival = cvRound(fval); + *(short*)data = CV_CAST_16S(ival); + data += sizeof(short); + break; + case CV_32S: + ival = cvRound(fval); + *(int*)data = ival; + data += sizeof(int); + break; + case CV_32F: + *(float*)data = (float)fval; + data += sizeof(float); + break; + case CV_64F: + *(double*)data = fval; + data += sizeof(double); + break; + case CV_USRTYPE1: /* reference */ + ival = cvRound(fval); + *(size_t*)data = ival; + data += sizeof(size_t); + break; + default: + assert(0); + return; + } + } + else + CV_Error( CV_StsError, + "The sequence element is not a numerical scalar" ); + + CV_NEXT_SEQ_ELEM( sizeof(CvFileNode), *reader ); + if( !--len ) + goto end_loop; + } + + offset = (int)(data - data0); + } + } + +end_loop: + if( i != count - 1 || k != fmt_pair_count - 1 ) + CV_Error( CV_StsBadSize, + "The sequence slice does not fit an integer number of records" ); + + if( !reader->seq ) + reader->ptr -= sizeof(CvFileNode); +} + + +CV_IMPL void +cvReadRawData( const CvFileStorage* fs, const CvFileNode* src, + void* data, const char* dt ) +{ + CvSeqReader reader; + + if( !src || !data ) + CV_Error( CV_StsNullPtr, "Null pointers to source file node or destination array" ); + + cvStartReadRawData( fs, src, &reader ); + cvReadRawDataSlice( fs, &reader, CV_NODE_IS_SEQ(src->tag) ? + src->data.seq->total : 1, data, dt ); +} + + +static void +icvWriteFileNode( CvFileStorage* fs, const char* name, const CvFileNode* node ); + +static void +icvWriteCollection( CvFileStorage* fs, const CvFileNode* node ) +{ + int i, total = node->data.seq->total; + int elem_size = node->data.seq->elem_size; + int is_map = CV_NODE_IS_MAP(node->tag); + CvSeqReader reader; + + cvStartReadSeq( node->data.seq, &reader, 0 ); + + for( i = 0; i < total; i++ ) + { + CvFileMapNode* elem = (CvFileMapNode*)reader.ptr; + if( !is_map || CV_IS_SET_ELEM(elem) ) + { + const char* name = is_map ? elem->key->str.ptr : 0; + icvWriteFileNode( fs, name, &elem->value ); + } + CV_NEXT_SEQ_ELEM( elem_size, reader ); + } +} + +static void +icvWriteFileNode( CvFileStorage* fs, const char* name, const CvFileNode* node ) +{ + switch( CV_NODE_TYPE(node->tag) ) + { + case CV_NODE_INT: + fs->write_int( fs, name, node->data.i ); + break; + case CV_NODE_REAL: + fs->write_real( fs, name, node->data.f ); + break; + case CV_NODE_STR: + fs->write_string( fs, name, node->data.str.ptr, 0 ); + break; + case CV_NODE_SEQ: + case CV_NODE_MAP: + fs->start_write_struct( fs, name, CV_NODE_TYPE(node->tag) + + (CV_NODE_SEQ_IS_SIMPLE(node->data.seq) ? CV_NODE_FLOW : 0), + node->info ? node->info->type_name : 0 ); + icvWriteCollection( fs, node ); + fs->end_write_struct( fs ); + break; + case CV_NODE_NONE: + fs->start_write_struct( fs, name, CV_NODE_SEQ, 0 ); + fs->end_write_struct( fs ); + break; + default: + CV_Error( CV_StsBadFlag, "Unknown type of file node" ); + } +} + + +CV_IMPL void +cvWriteFileNode( CvFileStorage* fs, const char* new_node_name, + const CvFileNode* node, int embed ) +{ + CvFileStorage* dst = 0; + CV_CHECK_OUTPUT_FILE_STORAGE(fs); + + if( !node ) + return; + + if( CV_NODE_IS_COLLECTION(node->tag) && embed ) + { + icvWriteCollection( fs, node ); + } + else + { + icvWriteFileNode( fs, new_node_name, node ); + } + /* + int i, stream_count; + stream_count = fs->roots->total; + for( i = 0; i < stream_count; i++ ) + { + CvFileNode* node = (CvFileNode*)cvGetSeqElem( fs->roots, i, 0 ); + icvDumpCollection( dst, node ); + if( i < stream_count - 1 ) + dst->start_next_stream( dst ); + }*/ + cvReleaseFileStorage( &dst ); +} + + +CV_IMPL const char* +cvGetFileNodeName( const CvFileNode* file_node ) +{ + return file_node && CV_NODE_HAS_NAME(file_node->tag) ? + ((CvFileMapNode*)file_node)->key->str.ptr : 0; +} + +/****************************************************************************************\ +* Reading/Writing etc. for standard types * +\****************************************************************************************/ + +/*#define CV_TYPE_NAME_MAT "opencv-matrix" +#define CV_TYPE_NAME_MATND "opencv-nd-matrix" +#define CV_TYPE_NAME_SPARSE_MAT "opencv-sparse-matrix" +#define CV_TYPE_NAME_IMAGE "opencv-image" +#define CV_TYPE_NAME_SEQ "opencv-sequence" +#define CV_TYPE_NAME_SEQ_TREE "opencv-sequence-tree" +#define CV_TYPE_NAME_GRAPH "opencv-graph"*/ + +/******************************* CvMat ******************************/ + +static int +icvIsMat( const void* ptr ) +{ + return CV_IS_MAT_HDR_Z(ptr); +} + +static void +icvWriteMat( CvFileStorage* fs, const char* name, + const void* struct_ptr, CvAttrList /*attr*/ ) +{ + const CvMat* mat = (const CvMat*)struct_ptr; + char dt[16]; + CvSize size; + int y; + + assert( CV_IS_MAT_HDR_Z(mat) ); + + cvStartWriteStruct( fs, name, CV_NODE_MAP, CV_TYPE_NAME_MAT ); + cvWriteInt( fs, "rows", mat->rows ); + cvWriteInt( fs, "cols", mat->cols ); + cvWriteString( fs, "dt", icvEncodeFormat( CV_MAT_TYPE(mat->type), dt ), 0 ); + cvStartWriteStruct( fs, "data", CV_NODE_SEQ + CV_NODE_FLOW ); + + size = cvGetSize(mat); + if( size.height > 0 && size.width > 0 && mat->data.ptr ) + { + if( CV_IS_MAT_CONT(mat->type) ) + { + size.width *= size.height; + size.height = 1; + } + + for( y = 0; y < size.height; y++ ) + cvWriteRawData( fs, mat->data.ptr + y*mat->step, size.width, dt ); + } + cvEndWriteStruct( fs ); + cvEndWriteStruct( fs ); +} + + +static int +icvFileNodeSeqLen( CvFileNode* node ) +{ + return CV_NODE_IS_COLLECTION(node->tag) ? node->data.seq->total : + CV_NODE_TYPE(node->tag) != CV_NODE_NONE; +} + + +static void* +icvReadMat( CvFileStorage* fs, CvFileNode* node ) +{ + void* ptr = 0; + CvMat* mat; + const char* dt; + CvFileNode* data; + int rows, cols, elem_type; + + rows = cvReadIntByName( fs, node, "rows", -1 ); + cols = cvReadIntByName( fs, node, "cols", -1 ); + dt = cvReadStringByName( fs, node, "dt", 0 ); + + if( rows < 0 || cols < 0 || dt < 0 ) + CV_Error( CV_StsError, "Some of essential matrix attributes are absent" ); + + elem_type = icvDecodeSimpleFormat( dt ); + + data = cvGetFileNodeByName( fs, node, "data" ); + if( !data ) + CV_Error( CV_StsError, "The matrix data is not found in file storage" ); + + int nelems = icvFileNodeSeqLen( data ); + if( nelems > 0 && nelems != rows*cols*CV_MAT_CN(elem_type) ) + CV_Error( CV_StsUnmatchedSizes, + "The matrix size does not match to the number of stored elements" ); + + if( nelems > 0 ) + { + mat = cvCreateMat( rows, cols, elem_type ); + cvReadRawData( fs, data, mat->data.ptr, dt ); + } + else + mat = cvCreateMatHeader( rows, cols, elem_type ); + + ptr = mat; + return ptr; +} + + +/******************************* CvMatND ******************************/ + +static int +icvIsMatND( const void* ptr ) +{ + return CV_IS_MATND_HDR(ptr); +} + + +static void +icvWriteMatND( CvFileStorage* fs, const char* name, + const void* struct_ptr, CvAttrList /*attr*/ ) +{ + CvMatND* mat = (CvMatND*)struct_ptr; + CvMatND stub; + CvNArrayIterator iterator; + int dims, sizes[CV_MAX_DIM]; + char dt[16]; + + assert( CV_IS_MATND_HDR(mat) ); + + cvStartWriteStruct( fs, name, CV_NODE_MAP, CV_TYPE_NAME_MATND ); + dims = cvGetDims( mat, sizes ); + cvStartWriteStruct( fs, "sizes", CV_NODE_SEQ + CV_NODE_FLOW ); + cvWriteRawData( fs, sizes, dims, "i" ); + cvEndWriteStruct( fs ); + cvWriteString( fs, "dt", icvEncodeFormat( cvGetElemType(mat), dt ), 0 ); + cvStartWriteStruct( fs, "data", CV_NODE_SEQ + CV_NODE_FLOW ); + + if( mat->dim[0].size > 0 && mat->data.ptr ) + { + cvInitNArrayIterator( 1, (CvArr**)&mat, 0, &stub, &iterator ); + + do + cvWriteRawData( fs, iterator.ptr[0], iterator.size.width, dt ); + while( cvNextNArraySlice( &iterator )); + } + cvEndWriteStruct( fs ); + cvEndWriteStruct( fs ); +} + + +static void* +icvReadMatND( CvFileStorage* fs, CvFileNode* node ) +{ + void* ptr = 0; + CvMatND* mat; + const char* dt; + CvFileNode* data; + CvFileNode* sizes_node; + int sizes[CV_MAX_DIM], dims, elem_type; + int i, total_size; + + sizes_node = cvGetFileNodeByName( fs, node, "sizes" ); + dt = cvReadStringByName( fs, node, "dt", 0 ); + + if( !sizes_node || !dt ) + CV_Error( CV_StsError, "Some of essential matrix attributes are absent" ); + + dims = CV_NODE_IS_SEQ(sizes_node->tag) ? sizes_node->data.seq->total : + CV_NODE_IS_INT(sizes_node->tag) ? 1 : -1; + + if( dims <= 0 || dims > CV_MAX_DIM ) + CV_Error( CV_StsParseError, "Could not determine the matrix dimensionality" ); + + cvReadRawData( fs, sizes_node, sizes, "i" ); + elem_type = icvDecodeSimpleFormat( dt ); + + data = cvGetFileNodeByName( fs, node, "data" ); + if( !data ) + CV_Error( CV_StsError, "The matrix data is not found in file storage" ); + + + + for( total_size = CV_MAT_CN(elem_type), i = 0; i < dims; i++ ) + total_size *= sizes[i]; + + int nelems = icvFileNodeSeqLen( data ); + + if( nelems > 0 && nelems != total_size ) + CV_Error( CV_StsUnmatchedSizes, + "The matrix size does not match to the number of stored elements" ); + + if( nelems > 0 ) + { + mat = cvCreateMatND( dims, sizes, elem_type ); + cvReadRawData( fs, data, mat->data.ptr, dt ); + } + else + mat = cvCreateMatNDHeader( dims, sizes, elem_type ); + + ptr = mat; + return ptr; +} + + +/******************************* CvSparseMat ******************************/ + +static int +icvIsSparseMat( const void* ptr ) +{ + return CV_IS_SPARSE_MAT(ptr); +} + + +static int +icvSortIdxCmpFunc( const void* _a, const void* _b, void* userdata ) +{ + int i, dims = *(int*)userdata; + const int* a = *(const int**)_a; + const int* b = *(const int**)_b; + + for( i = 0; i < dims; i++ ) + { + int delta = a[i] - b[i]; + if( delta ) + return delta; + } + + return 0; +} + + +static void +icvWriteSparseMat( CvFileStorage* fs, const char* name, + const void* struct_ptr, CvAttrList /*attr*/ ) +{ + CvMemStorage* memstorage = 0; + const CvSparseMat* mat = (const CvSparseMat*)struct_ptr; + CvSparseMatIterator iterator; + CvSparseNode* node; + CvSeq* elements; + CvSeqReader reader; + int i, dims; + int *prev_idx = 0; + char dt[16]; + + assert( CV_IS_SPARSE_MAT(mat) ); + + memstorage = cvCreateMemStorage(); + + cvStartWriteStruct( fs, name, CV_NODE_MAP, CV_TYPE_NAME_SPARSE_MAT ); + dims = cvGetDims( mat, 0 ); + + cvStartWriteStruct( fs, "sizes", CV_NODE_SEQ + CV_NODE_FLOW ); + cvWriteRawData( fs, mat->size, dims, "i" ); + cvEndWriteStruct( fs ); + cvWriteString( fs, "dt", icvEncodeFormat( CV_MAT_TYPE(mat->type), dt ), 0 ); + cvStartWriteStruct( fs, "data", CV_NODE_SEQ + CV_NODE_FLOW ); + + elements = cvCreateSeq( CV_SEQ_ELTYPE_PTR, sizeof(CvSeq), sizeof(int*), memstorage ); + + node = cvInitSparseMatIterator( mat, &iterator ); + while( node ) + { + int* idx = CV_NODE_IDX( mat, node ); + cvSeqPush( elements, &idx ); + node = cvGetNextSparseNode( &iterator ); + } + + cvSeqSort( elements, icvSortIdxCmpFunc, &dims ); + cvStartReadSeq( elements, &reader, 0 ); + + for( i = 0; i < elements->total; i++ ) + { + int* idx; + void* val; + int k = 0; + + CV_READ_SEQ_ELEM( idx, reader ); + if( i > 0 ) + { + for( ; idx[k] == prev_idx[k]; k++ ) + assert( k < dims ); + if( k < dims - 1 ) + fs->write_int( fs, 0, k - dims + 1 ); + } + for( ; k < dims; k++ ) + fs->write_int( fs, 0, idx[k] ); + prev_idx = idx; + + node = (CvSparseNode*)((uchar*)idx - mat->idxoffset ); + val = CV_NODE_VAL( mat, node ); + + cvWriteRawData( fs, val, 1, dt ); + } + + cvEndWriteStruct( fs ); + cvEndWriteStruct( fs ); + cvReleaseMemStorage( &memstorage ); +} + + +static void* +icvReadSparseMat( CvFileStorage* fs, CvFileNode* node ) +{ + void* ptr = 0; + CvSparseMat* mat; + const char* dt; + CvFileNode* data; + CvFileNode* sizes_node; + CvSeqReader reader; + CvSeq* elements; + int sizes[CV_MAX_DIM_HEAP], dims, elem_type, cn; + int i; + + sizes_node = cvGetFileNodeByName( fs, node, "sizes" ); + dt = cvReadStringByName( fs, node, "dt", 0 ); + + if( !sizes_node || !dt ) + CV_Error( CV_StsError, "Some of essential matrix attributes are absent" ); + + dims = CV_NODE_IS_SEQ(sizes_node->tag) ? sizes_node->data.seq->total : + CV_NODE_IS_INT(sizes_node->tag) ? 1 : -1; + + if( dims <= 0 || dims > CV_MAX_DIM_HEAP ) + CV_Error( CV_StsParseError, "Could not determine sparse matrix dimensionality" ); + + cvReadRawData( fs, sizes_node, sizes, "i" ); + elem_type = icvDecodeSimpleFormat( dt ); + + data = cvGetFileNodeByName( fs, node, "data" ); + if( !data || !CV_NODE_IS_SEQ(data->tag) ) + CV_Error( CV_StsError, "The matrix data is not found in file storage" ); + + mat = cvCreateSparseMat( dims, sizes, elem_type ); + + cn = CV_MAT_CN(elem_type); + int idx[CV_MAX_DIM_HEAP]; + elements = data->data.seq; + cvStartReadRawData( fs, data, &reader ); + + for( i = 0; i < elements->total; ) + { + CvFileNode* elem = (CvFileNode*)reader.ptr; + uchar* val; + int k; + if( !CV_NODE_IS_INT(elem->tag )) + CV_Error( CV_StsParseError, "Sparse matrix data is corrupted" ); + k = elem->data.i; + if( i > 0 && k >= 0 ) + idx[dims-1] = k; + else + { + if( i > 0 ) + k = dims + k - 1; + else + idx[0] = k, k = 1; + for( ; k < dims; k++ ) + { + CV_NEXT_SEQ_ELEM( elements->elem_size, reader ); + i++; + elem = (CvFileNode*)reader.ptr; + if( !CV_NODE_IS_INT(elem->tag ) || elem->data.i < 0 ) + CV_Error( CV_StsParseError, "Sparse matrix data is corrupted" ); + idx[k] = elem->data.i; + } + } + CV_NEXT_SEQ_ELEM( elements->elem_size, reader ); + i++; + val = cvPtrND( mat, idx, 0, 1, 0 ); + cvReadRawDataSlice( fs, &reader, cn, val, dt ); + i += cn; + } + + ptr = mat; + return ptr; +} + + +/******************************* IplImage ******************************/ + +static int +icvIsImage( const void* ptr ) +{ + return CV_IS_IMAGE_HDR(ptr); +} + +static void +icvWriteImage( CvFileStorage* fs, const char* name, + const void* struct_ptr, CvAttrList /*attr*/ ) +{ + const IplImage* image = (const IplImage*)struct_ptr; + char dt_buf[16], *dt; + CvSize size; + int y, depth; + + assert( CV_IS_IMAGE(image) ); + + if( image->dataOrder == IPL_DATA_ORDER_PLANE ) + CV_Error( CV_StsUnsupportedFormat, + "Images with planar data layout are not supported" ); + + cvStartWriteStruct( fs, name, CV_NODE_MAP, CV_TYPE_NAME_IMAGE ); + cvWriteInt( fs, "width", image->width ); + cvWriteInt( fs, "height", image->height ); + cvWriteString( fs, "origin", image->origin == IPL_ORIGIN_TL + ? "top-left" : "bottom-left", 0 ); + cvWriteString( fs, "layout", image->dataOrder == IPL_DATA_ORDER_PLANE + ? "planar" : "interleaved", 0 ); + if( image->roi ) + { + cvStartWriteStruct( fs, "roi", CV_NODE_MAP + CV_NODE_FLOW ); + cvWriteInt( fs, "x", image->roi->xOffset ); + cvWriteInt( fs, "y", image->roi->yOffset ); + cvWriteInt( fs, "width", image->roi->width ); + cvWriteInt( fs, "height", image->roi->height ); + cvWriteInt( fs, "coi", image->roi->coi ); + cvEndWriteStruct( fs ); + } + + depth = IPL2CV_DEPTH(image->depth); + sprintf( dt_buf, "%d%c", image->nChannels, icvTypeSymbol[depth] ); + dt = dt_buf + (dt_buf[2] == '\0' && dt_buf[0] == '1'); + cvWriteString( fs, "dt", dt, 0 ); + + size = cvSize(image->width, image->height); + if( size.width*image->nChannels*CV_ELEM_SIZE(depth) == image->widthStep ) + { + size.width *= size.height; + size.height = 1; + } + + cvStartWriteStruct( fs, "data", CV_NODE_SEQ + CV_NODE_FLOW ); + for( y = 0; y < size.height; y++ ) + cvWriteRawData( fs, image->imageData + y*image->widthStep, size.width, dt ); + cvEndWriteStruct( fs ); + cvEndWriteStruct( fs ); +} + + +static void* +icvReadImage( CvFileStorage* fs, CvFileNode* node ) +{ + void* ptr = 0; + IplImage* image; + const char* dt; + CvFileNode* data; + CvFileNode* roi_node; + CvSeqReader reader; + CvRect roi; + int y, width, height, elem_type, coi, depth; + const char* origin, *data_order; + + width = cvReadIntByName( fs, node, "width", 0 ); + height = cvReadIntByName( fs, node, "height", 0 ); + dt = cvReadStringByName( fs, node, "dt", 0 ); + origin = cvReadStringByName( fs, node, "origin", 0 ); + + if( width == 0 || height == 0 || dt == 0 || origin == 0 ) + CV_Error( CV_StsError, "Some of essential image attributes are absent" ); + + elem_type = icvDecodeSimpleFormat( dt ); + data_order = cvReadStringByName( fs, node, "layout", "interleaved" ); + if( strcmp( data_order, "interleaved" ) != 0 ) + CV_Error( CV_StsError, "Only interleaved images can be read" ); + + data = cvGetFileNodeByName( fs, node, "data" ); + if( !data ) + CV_Error( CV_StsError, "The image data is not found in file storage" ); + + if( icvFileNodeSeqLen( data ) != width*height*CV_MAT_CN(elem_type) ) + CV_Error( CV_StsUnmatchedSizes, + "The matrix size does not match to the number of stored elements" ); + + depth = cvIplDepth(elem_type); + image = cvCreateImage( cvSize(width,height), depth, CV_MAT_CN(elem_type) ); + + roi_node = cvGetFileNodeByName( fs, node, "roi" ); + if( roi_node ) + { + roi.x = cvReadIntByName( fs, roi_node, "x", 0 ); + roi.y = cvReadIntByName( fs, roi_node, "y", 0 ); + roi.width = cvReadIntByName( fs, roi_node, "width", 0 ); + roi.height = cvReadIntByName( fs, roi_node, "height", 0 ); + coi = cvReadIntByName( fs, roi_node, "coi", 0 ); + + cvSetImageROI( image, roi ); + cvSetImageCOI( image, coi ); + } + + if( width*CV_ELEM_SIZE(elem_type) == image->widthStep ) + { + width *= height; + height = 1; + } + + width *= CV_MAT_CN(elem_type); + cvStartReadRawData( fs, data, &reader ); + for( y = 0; y < height; y++ ) + { + cvReadRawDataSlice( fs, &reader, width, + image->imageData + y*image->widthStep, dt ); + } + + ptr = image; + return ptr; +} + + +/******************************* CvSeq ******************************/ + +static int +icvIsSeq( const void* ptr ) +{ + return CV_IS_SEQ(ptr); +} + + +static void +icvReleaseSeq( void** ptr ) +{ + if( !ptr ) + CV_Error( CV_StsNullPtr, "NULL double pointer" ); + *ptr = 0; // it's impossible now to release seq, so just clear the pointer +} + + +static void* +icvCloneSeq( const void* ptr ) +{ + return cvSeqSlice( (CvSeq*)ptr, CV_WHOLE_SEQ, + 0 /* use the same storage as for the original sequence */, 1 ); +} + + +static void +icvWriteHeaderData( CvFileStorage* fs, const CvSeq* seq, + CvAttrList* attr, int initial_header_size ) +{ + char header_dt_buf[128]; + const char* header_dt = cvAttrValue( attr, "header_dt" ); + + if( header_dt ) + { + int dt_header_size; + dt_header_size = icvCalcElemSize( header_dt, initial_header_size ); + if( dt_header_size > seq->header_size ) + CV_Error( CV_StsUnmatchedSizes, + "The size of header calculated from \"header_dt\" is greater than header_size" ); + } + else if( seq->header_size > initial_header_size ) + { + if( CV_IS_SEQ(seq) && CV_IS_SEQ_POINT_SET(seq) && + seq->header_size == sizeof(CvPoint2DSeq) && + seq->elem_size == sizeof(int)*2 ) + { + CvPoint2DSeq* point_seq = (CvPoint2DSeq*)seq; + + cvStartWriteStruct( fs, "rect", CV_NODE_MAP + CV_NODE_FLOW ); + cvWriteInt( fs, "x", point_seq->rect.x ); + cvWriteInt( fs, "y", point_seq->rect.y ); + cvWriteInt( fs, "width", point_seq->rect.width ); + cvWriteInt( fs, "height", point_seq->rect.height ); + cvEndWriteStruct( fs ); + cvWriteInt( fs, "color", point_seq->color ); + } + else if( CV_IS_SEQ(seq) && CV_IS_SEQ_CHAIN(seq) && + CV_MAT_TYPE(seq->flags) == CV_8UC1 ) + { + CvChain* chain = (CvChain*)seq; + + cvStartWriteStruct( fs, "origin", CV_NODE_MAP + CV_NODE_FLOW ); + cvWriteInt( fs, "x", chain->origin.x ); + cvWriteInt( fs, "y", chain->origin.y ); + cvEndWriteStruct( fs ); + } + else + { + unsigned extra_size = seq->header_size - initial_header_size; + // a heuristic to provide nice defaults for sequences of int's & float's + if( extra_size % sizeof(int) == 0 ) + sprintf( header_dt_buf, "%ui", (unsigned)(extra_size/sizeof(int)) ); + else + sprintf( header_dt_buf, "%uu", extra_size ); + header_dt = header_dt_buf; + } + } + + if( header_dt ) + { + cvWriteString( fs, "header_dt", header_dt, 0 ); + cvStartWriteStruct( fs, "header_user_data", CV_NODE_SEQ + CV_NODE_FLOW ); + cvWriteRawData( fs, (uchar*)seq + sizeof(CvSeq), 1, header_dt ); + cvEndWriteStruct( fs ); + } +} + + +static char* +icvGetFormat( const CvSeq* seq, const char* dt_key, CvAttrList* attr, + int initial_elem_size, char* dt_buf ) +{ + char* dt = 0; + dt = (char*)cvAttrValue( attr, dt_key ); + + if( dt ) + { + int dt_elem_size; + dt_elem_size = icvCalcElemSize( dt, initial_elem_size ); + if( dt_elem_size != seq->elem_size ) + CV_Error( CV_StsUnmatchedSizes, + "The size of element calculated from \"dt\" and " + "the elem_size do not match" ); + } + else if( CV_MAT_TYPE(seq->flags) != 0 || seq->elem_size == 1 ) + { + if( CV_ELEM_SIZE(seq->flags) != seq->elem_size ) + CV_Error( CV_StsUnmatchedSizes, + "Size of sequence element (elem_size) is inconsistent with seq->flags" ); + dt = icvEncodeFormat( CV_MAT_TYPE(seq->flags), dt_buf ); + } + else if( seq->elem_size > initial_elem_size ) + { + unsigned extra_elem_size = seq->elem_size - initial_elem_size; + // a heuristic to provide nice defaults for sequences of int's & float's + if( extra_elem_size % sizeof(int) == 0 ) + sprintf( dt_buf, "%ui", (unsigned)(extra_elem_size/sizeof(int)) ); + else + sprintf( dt_buf, "%uu", extra_elem_size ); + dt = dt_buf; + } + + return dt; +} + + +static void +icvWriteSeq( CvFileStorage* fs, const char* name, + const void* struct_ptr, + CvAttrList attr, int level ) +{ + const CvSeq* seq = (CvSeq*)struct_ptr; + CvSeqBlock* block; + char buf[128]; + char dt_buf[128], *dt; + + assert( CV_IS_SEQ( seq )); + cvStartWriteStruct( fs, name, CV_NODE_MAP, CV_TYPE_NAME_SEQ ); + + if( level >= 0 ) + cvWriteInt( fs, "level", level ); + + dt = icvGetFormat( seq, "dt", &attr, 0, dt_buf ); + + strcpy(buf, ""); + if( CV_IS_SEQ_CLOSED(seq) ) + strcat(buf, " closed"); + if( CV_IS_SEQ_HOLE(seq) ) + strcat(buf, " hole"); + if( CV_IS_SEQ_CURVE(seq) ) + strcat(buf, " curve"); + if( CV_SEQ_ELTYPE(seq) == 0 && seq->elem_size != 1 ) + strcat(buf, " untyped"); + + cvWriteString( fs, "flags", buf + (buf[0] ? 1 : 0), 1 ); + + cvWriteInt( fs, "count", seq->total ); + + cvWriteString( fs, "dt", dt, 0 ); + + icvWriteHeaderData( fs, seq, &attr, sizeof(CvSeq) ); + cvStartWriteStruct( fs, "data", CV_NODE_SEQ + CV_NODE_FLOW ); + + for( block = seq->first; block; block = block->next ) + { + cvWriteRawData( fs, block->data, block->count, dt ); + if( block == seq->first->prev ) + break; + } + cvEndWriteStruct( fs ); + cvEndWriteStruct( fs ); +} + + +static void +icvWriteSeqTree( CvFileStorage* fs, const char* name, + const void* struct_ptr, CvAttrList attr ) +{ + const CvSeq* seq = (CvSeq*)struct_ptr; + const char* recursive_value = cvAttrValue( &attr, "recursive" ); + int is_recursive = recursive_value && + strcmp(recursive_value,"0") != 0 && + strcmp(recursive_value,"false") != 0 && + strcmp(recursive_value,"False") != 0 && + strcmp(recursive_value,"FALSE") != 0; + + assert( CV_IS_SEQ( seq )); + + if( !is_recursive ) + { + icvWriteSeq( fs, name, seq, attr, -1 ); + } + else + { + CvTreeNodeIterator tree_iterator; + + cvStartWriteStruct( fs, name, CV_NODE_MAP, CV_TYPE_NAME_SEQ_TREE ); + cvStartWriteStruct( fs, "sequences", CV_NODE_SEQ ); + cvInitTreeNodeIterator( &tree_iterator, seq, INT_MAX ); + + for(;;) + { + if( !tree_iterator.node ) + break; + icvWriteSeq( fs, 0, tree_iterator.node, attr, tree_iterator.level ); + cvNextTreeNode( &tree_iterator ); + } + + cvEndWriteStruct( fs ); + cvEndWriteStruct( fs ); + } +} + + +static void* +icvReadSeq( CvFileStorage* fs, CvFileNode* node ) +{ + void* ptr = 0; + CvSeq* seq; + CvSeqBlock* block; + CvFileNode *data, *header_node, *rect_node, *origin_node; + CvSeqReader reader; + int total, flags; + int elem_size, header_size = sizeof(CvSeq); + int fmt_pairs[CV_FS_MAX_FMT_PAIRS], i, fmt_pair_count; + int items_per_elem = 0; + const char* flags_str; + const char* header_dt; + const char* dt; + char* endptr = 0; + + flags_str = cvReadStringByName( fs, node, "flags", 0 ); + total = cvReadIntByName( fs, node, "count", -1 ); + dt = cvReadStringByName( fs, node, "dt", 0 ); + + if( !flags_str || total == -1 || !dt ) + CV_Error( CV_StsError, "Some of essential sequence attributes are absent" ); + + flags = CV_SEQ_MAGIC_VAL; + + if( cv_isdigit(flags_str[0]) ) + { + const int OLD_SEQ_ELTYPE_BITS = 9; + const int OLD_SEQ_ELTYPE_MASK = (1 << OLD_SEQ_ELTYPE_BITS) - 1; + const int OLD_SEQ_KIND_BITS = 3; + const int OLD_SEQ_KIND_MASK = ((1 << OLD_SEQ_KIND_BITS) - 1) << OLD_SEQ_ELTYPE_BITS; + const int OLD_SEQ_KIND_CURVE = 1 << OLD_SEQ_ELTYPE_BITS; + const int OLD_SEQ_FLAG_SHIFT = OLD_SEQ_KIND_BITS + OLD_SEQ_ELTYPE_BITS; + const int OLD_SEQ_FLAG_CLOSED = 1 << OLD_SEQ_FLAG_SHIFT; + const int OLD_SEQ_FLAG_HOLE = 8 << OLD_SEQ_FLAG_SHIFT; + + int flags0 = (int)strtol( flags_str, &endptr, 16 ); + if( endptr == flags_str || (flags0 & CV_MAGIC_MASK) != CV_SEQ_MAGIC_VAL ) + CV_Error( CV_StsError, "The sequence flags are invalid" ); + if( (flags0 & OLD_SEQ_KIND_MASK) == OLD_SEQ_KIND_CURVE ) + flags |= CV_SEQ_KIND_CURVE; + if( flags0 & OLD_SEQ_FLAG_CLOSED ) + flags |= CV_SEQ_FLAG_CLOSED; + if( flags0 & OLD_SEQ_FLAG_HOLE ) + flags |= CV_SEQ_FLAG_HOLE; + flags |= flags0 & OLD_SEQ_ELTYPE_MASK; + } + else + { + if( strstr(flags_str, "curve") ) + flags |= CV_SEQ_KIND_CURVE; + if( strstr(flags_str, "closed") ) + flags |= CV_SEQ_FLAG_CLOSED; + if( strstr(flags_str, "hole") ) + flags |= CV_SEQ_FLAG_HOLE; + if( !strstr(flags_str, "untyped") ) + { + try + { + flags |= icvDecodeSimpleFormat(dt); + } + catch(...) + { + } + } + } + + header_dt = cvReadStringByName( fs, node, "header_dt", 0 ); + header_node = cvGetFileNodeByName( fs, node, "header_user_data" ); + + if( (header_dt != 0) ^ (header_node != 0) ) + CV_Error( CV_StsError, + "One of \"header_dt\" and \"header_user_data\" is there, while the other is not" ); + + rect_node = cvGetFileNodeByName( fs, node, "rect" ); + origin_node = cvGetFileNodeByName( fs, node, "origin" ); + + if( (header_node != 0) + (rect_node != 0) + (origin_node != 0) > 1 ) + CV_Error( CV_StsError, "Only one of \"header_user_data\", \"rect\" and \"origin\" tags may occur" ); + + if( header_dt ) + { + header_size = icvCalcElemSize( header_dt, header_size ); + } + else if( rect_node ) + header_size = sizeof(CvPoint2DSeq); + else if( origin_node ) + header_size = sizeof(CvChain); + + elem_size = icvCalcElemSize( dt, 0 ); + seq = cvCreateSeq( flags, header_size, elem_size, fs->dststorage ); + + if( header_node ) + { + cvReadRawData( fs, header_node, (char*)seq + sizeof(CvSeq), header_dt ); + } + else if( rect_node ) + { + CvPoint2DSeq* point_seq = (CvPoint2DSeq*)seq; + point_seq->rect.x = cvReadIntByName( fs, rect_node, "x", 0 ); + point_seq->rect.y = cvReadIntByName( fs, rect_node, "y", 0 ); + point_seq->rect.width = cvReadIntByName( fs, rect_node, "width", 0 ); + point_seq->rect.height = cvReadIntByName( fs, rect_node, "height", 0 ); + point_seq->color = cvReadIntByName( fs, node, "color", 0 ); + } + else if( origin_node ) + { + CvChain* chain = (CvChain*)seq; + chain->origin.x = cvReadIntByName( fs, origin_node, "x", 0 ); + chain->origin.y = cvReadIntByName( fs, origin_node, "y", 0 ); + } + + cvSeqPushMulti( seq, 0, total, 0 ); + fmt_pair_count = icvDecodeFormat( dt, fmt_pairs, CV_FS_MAX_FMT_PAIRS ); + fmt_pair_count *= 2; + for( i = 0; i < fmt_pair_count; i += 2 ) + items_per_elem += fmt_pairs[i]; + + data = cvGetFileNodeByName( fs, node, "data" ); + if( !data ) + CV_Error( CV_StsError, "The image data is not found in file storage" ); + + if( icvFileNodeSeqLen( data ) != total*items_per_elem ) + CV_Error( CV_StsError, "The number of stored elements does not match to \"count\"" ); + + cvStartReadRawData( fs, data, &reader ); + for( block = seq->first; block; block = block->next ) + { + int delta = block->count*items_per_elem; + cvReadRawDataSlice( fs, &reader, delta, block->data, dt ); + if( block == seq->first->prev ) + break; + } + + ptr = seq; + return ptr; +} + + +static void* +icvReadSeqTree( CvFileStorage* fs, CvFileNode* node ) +{ + void* ptr = 0; + CvFileNode *sequences_node = cvGetFileNodeByName( fs, node, "sequences" ); + CvSeq* sequences; + CvSeq* root = 0; + CvSeq* parent = 0; + CvSeq* prev_seq = 0; + CvSeqReader reader; + int i, total; + int prev_level = 0; + + if( !sequences_node || !CV_NODE_IS_SEQ(sequences_node->tag) ) + CV_Error( CV_StsParseError, + "opencv-sequence-tree instance should contain a field \"sequences\" that should be a sequence" ); + + sequences = sequences_node->data.seq; + total = sequences->total; + + cvStartReadSeq( sequences, &reader, 0 ); + for( i = 0; i < total; i++ ) + { + CvFileNode* elem = (CvFileNode*)reader.ptr; + CvSeq* seq; + int level; + seq = (CvSeq*)cvRead( fs, elem ); + level = cvReadIntByName( fs, elem, "level", -1 ); + if( level < 0 ) + CV_Error( CV_StsParseError, "All the sequence tree nodes should contain \"level\" field" ); + if( !root ) + root = seq; + if( level > prev_level ) + { + assert( level == prev_level + 1 ); + parent = prev_seq; + prev_seq = 0; + if( parent ) + parent->v_next = seq; + } + else if( level < prev_level ) + { + for( ; prev_level > level; prev_level-- ) + prev_seq = prev_seq->v_prev; + parent = prev_seq->v_prev; + } + seq->h_prev = prev_seq; + if( prev_seq ) + prev_seq->h_next = seq; + seq->v_prev = parent; + prev_seq = seq; + prev_level = level; + CV_NEXT_SEQ_ELEM( sequences->elem_size, reader ); + } + + ptr = root; + return ptr; +} + +/******************************* CvGraph ******************************/ + +static int +icvIsGraph( const void* ptr ) +{ + return CV_IS_GRAPH(ptr); +} + + +static void +icvReleaseGraph( void** ptr ) +{ + if( !ptr ) + CV_Error( CV_StsNullPtr, "NULL double pointer" ); + + *ptr = 0; // it's impossible now to release graph, so just clear the pointer +} + + +static void* +icvCloneGraph( const void* ptr ) +{ + return cvCloneGraph( (const CvGraph*)ptr, 0 ); +} + + +static void +icvWriteGraph( CvFileStorage* fs, const char* name, + const void* struct_ptr, CvAttrList attr ) +{ + int* flag_buf = 0; + char* write_buf = 0; + const CvGraph* graph = (const CvGraph*)struct_ptr; + CvSeqReader reader; + char buf[128]; + int i, k, vtx_count, edge_count; + char vtx_dt_buf[128], *vtx_dt; + char edge_dt_buf[128], *edge_dt; + int write_buf_size; + + assert( CV_IS_GRAPH(graph) ); + vtx_count = cvGraphGetVtxCount( graph ); + edge_count = cvGraphGetEdgeCount( graph ); + flag_buf = (int*)cvAlloc( vtx_count*sizeof(flag_buf[0])); + + // count vertices + cvStartReadSeq( (CvSeq*)graph, &reader ); + for( i = 0, k = 0; i < graph->total; i++ ) + { + if( CV_IS_SET_ELEM( reader.ptr )) + { + CvGraphVtx* vtx = (CvGraphVtx*)reader.ptr; + flag_buf[k] = vtx->flags; + vtx->flags = k++; + } + CV_NEXT_SEQ_ELEM( graph->elem_size, reader ); + } + + // write header + cvStartWriteStruct( fs, name, CV_NODE_MAP, CV_TYPE_NAME_GRAPH ); + + cvWriteString(fs, "flags", CV_IS_GRAPH_ORIENTED(graph) ? "oriented" : "", 1); + + cvWriteInt( fs, "vertex_count", vtx_count ); + vtx_dt = icvGetFormat( (CvSeq*)graph, "vertex_dt", + &attr, sizeof(CvGraphVtx), vtx_dt_buf ); + if( vtx_dt ) + cvWriteString( fs, "vertex_dt", vtx_dt, 0 ); + + cvWriteInt( fs, "edge_count", edge_count ); + edge_dt = icvGetFormat( (CvSeq*)graph->edges, "edge_dt", + &attr, sizeof(CvGraphEdge), buf ); + sprintf( edge_dt_buf, "2if%s", edge_dt ? edge_dt : "" ); + edge_dt = edge_dt_buf; + cvWriteString( fs, "edge_dt", edge_dt, 0 ); + + icvWriteHeaderData( fs, (CvSeq*)graph, &attr, sizeof(CvGraph) ); + + write_buf_size = MAX( 3*graph->elem_size, 1 << 16 ); + write_buf_size = MAX( 3*graph->edges->elem_size, write_buf_size ); + write_buf = (char*)cvAlloc( write_buf_size ); + + // as vertices and edges are written in similar way, + // do it as a parametrized 2-iteration loop + for( k = 0; k < 2; k++ ) + { + const char* dt = k == 0 ? vtx_dt : edge_dt; + if( dt ) + { + CvSet* data = k == 0 ? (CvSet*)graph : graph->edges; + int elem_size = data->elem_size; + int write_elem_size = icvCalcElemSize( dt, 0 ); + char* src_ptr = write_buf; + int write_max = write_buf_size / write_elem_size, write_count = 0; + + // alignment of user part of the edge data following 2if + int edge_user_align = sizeof(float); + + if( k == 1 ) + { + int fmt_pairs[CV_FS_MAX_FMT_PAIRS], fmt_pair_count; + fmt_pair_count = icvDecodeFormat( dt, fmt_pairs, CV_FS_MAX_FMT_PAIRS ); + if( fmt_pair_count > 2 && CV_ELEM_SIZE(fmt_pairs[2*2+1]) >= (int)sizeof(double)) + edge_user_align = sizeof(double); + } + + cvStartWriteStruct( fs, k == 0 ? "vertices" : "edges", + CV_NODE_SEQ + CV_NODE_FLOW ); + cvStartReadSeq( (CvSeq*)data, &reader ); + for( i = 0; i < data->total; i++ ) + { + if( CV_IS_SET_ELEM( reader.ptr )) + { + if( k == 0 ) // vertices + memcpy( src_ptr, reader.ptr + sizeof(CvGraphVtx), write_elem_size ); + else + { + CvGraphEdge* edge = (CvGraphEdge*)reader.ptr; + src_ptr = (char*)cvAlignPtr( src_ptr, sizeof(int) ); + ((int*)src_ptr)[0] = edge->vtx[0]->flags; + ((int*)src_ptr)[1] = edge->vtx[1]->flags; + *(float*)(src_ptr + sizeof(int)*2) = edge->weight; + if( elem_size > (int)sizeof(CvGraphEdge) ) + { + char* src_ptr2 = (char*)cvAlignPtr( src_ptr + 2*sizeof(int) + + sizeof(float), edge_user_align ); + memcpy( src_ptr2, edge + 1, elem_size - sizeof(CvGraphEdge) ); + } + } + src_ptr += write_elem_size; + if( ++write_count >= write_max ) + { + cvWriteRawData( fs, write_buf, write_count, dt ); + write_count = 0; + src_ptr = write_buf; + } + } + CV_NEXT_SEQ_ELEM( data->elem_size, reader ); + } + + if( write_count > 0 ) + cvWriteRawData( fs, write_buf, write_count, dt ); + cvEndWriteStruct( fs ); + } + } + + cvEndWriteStruct( fs ); + + // final stage. restore the graph flags + cvStartReadSeq( (CvSeq*)graph, &reader ); + vtx_count = 0; + for( i = 0; i < graph->total; i++ ) + { + if( CV_IS_SET_ELEM( reader.ptr )) + ((CvGraphVtx*)reader.ptr)->flags = flag_buf[vtx_count++]; + CV_NEXT_SEQ_ELEM( graph->elem_size, reader ); + } + + cvFree( &write_buf ); + cvFree( &flag_buf ); +} + + +static void* +icvReadGraph( CvFileStorage* fs, CvFileNode* node ) +{ + void* ptr = 0; + char* read_buf = 0; + CvGraphVtx** vtx_buf = 0; + CvGraph* graph; + CvFileNode *header_node, *vtx_node, *edge_node; + int flags, vtx_count, edge_count; + int vtx_size = sizeof(CvGraphVtx), edge_size, header_size = sizeof(CvGraph); + int src_vtx_size = 0, src_edge_size; + int fmt_pairs[CV_FS_MAX_FMT_PAIRS], fmt_pair_count; + int vtx_items_per_elem = 0, edge_items_per_elem = 0; + int edge_user_align = sizeof(float); + int read_buf_size; + int i, k; + const char* flags_str; + const char* header_dt; + const char* vtx_dt; + const char* edge_dt; + char* endptr = 0; + + flags_str = cvReadStringByName( fs, node, "flags", 0 ); + vtx_dt = cvReadStringByName( fs, node, "vertex_dt", 0 ); + edge_dt = cvReadStringByName( fs, node, "edge_dt", 0 ); + vtx_count = cvReadIntByName( fs, node, "vertex_count", -1 ); + edge_count = cvReadIntByName( fs, node, "edge_count", -1 ); + + if( !flags_str || vtx_count == -1 || edge_count == -1 || !edge_dt ) + CV_Error( CV_StsError, "Some of essential graph attributes are absent" ); + + flags = CV_SET_MAGIC_VAL + CV_GRAPH; + + if( isxdigit(flags_str[0]) ) + { + const int OLD_SEQ_ELTYPE_BITS = 9; + const int OLD_SEQ_KIND_BITS = 3; + const int OLD_SEQ_FLAG_SHIFT = OLD_SEQ_KIND_BITS + OLD_SEQ_ELTYPE_BITS; + const int OLD_GRAPH_FLAG_ORIENTED = 1 << OLD_SEQ_FLAG_SHIFT; + + int flags0 = (int)strtol( flags_str, &endptr, 16 ); + if( endptr == flags_str || (flags0 & CV_MAGIC_MASK) != CV_SET_MAGIC_VAL ) + CV_Error( CV_StsError, "The sequence flags are invalid" ); + if( flags0 & OLD_GRAPH_FLAG_ORIENTED ) + flags |= CV_GRAPH_FLAG_ORIENTED; + } + else + { + if( strstr(flags_str, "oriented") ) + flags |= CV_GRAPH_FLAG_ORIENTED; + } + + header_dt = cvReadStringByName( fs, node, "header_dt", 0 ); + header_node = cvGetFileNodeByName( fs, node, "header_user_data" ); + + if( (header_dt != 0) ^ (header_node != 0) ) + CV_Error( CV_StsError, + "One of \"header_dt\" and \"header_user_data\" is there, while the other is not" ); + + if( header_dt ) + header_size = icvCalcElemSize( header_dt, header_size ); + + if( vtx_dt > 0 ) + { + src_vtx_size = icvCalcElemSize( vtx_dt, 0 ); + vtx_size = icvCalcElemSize( vtx_dt, vtx_size ); + fmt_pair_count = icvDecodeFormat( edge_dt, + fmt_pairs, CV_FS_MAX_FMT_PAIRS ); + fmt_pair_count *= 2; + for( i = 0; i < fmt_pair_count; i += 2 ) + vtx_items_per_elem += fmt_pairs[i]; + } + + { + char dst_edge_dt_buf[128]; + const char* dst_edge_dt = 0; + + fmt_pair_count = icvDecodeFormat( edge_dt, + fmt_pairs, CV_FS_MAX_FMT_PAIRS ); + if( fmt_pair_count < 2 || + fmt_pairs[0] != 2 || fmt_pairs[1] != CV_32S || + fmt_pairs[2] < 1 || fmt_pairs[3] != CV_32F ) + CV_Error( CV_StsBadArg, + "Graph edges should start with 2 integers and a float" ); + + // alignment of user part of the edge data following 2if + if( fmt_pair_count > 2 && CV_ELEM_SIZE(fmt_pairs[5]) >= (int)sizeof(double)) + edge_user_align = sizeof(double); + + fmt_pair_count *= 2; + for( i = 0; i < fmt_pair_count; i += 2 ) + edge_items_per_elem += fmt_pairs[i]; + + if( edge_dt[2] == 'f' || (edge_dt[2] == '1' && edge_dt[3] == 'f') ) + dst_edge_dt = edge_dt + 3 + cv_isdigit(edge_dt[2]); + else + { + int val = (int)strtol( edge_dt + 2, &endptr, 10 ); + sprintf( dst_edge_dt_buf, "%df%s", val-1, endptr ); + dst_edge_dt = dst_edge_dt_buf; + } + + edge_size = icvCalcElemSize( dst_edge_dt, sizeof(CvGraphEdge) ); + src_edge_size = icvCalcElemSize( edge_dt, 0 ); + } + + graph = cvCreateGraph( flags, header_size, vtx_size, edge_size, fs->dststorage ); + + if( header_node ) + cvReadRawData( fs, header_node, (char*)graph + sizeof(CvGraph), header_dt ); + + read_buf_size = MAX( src_vtx_size*3, 1 << 16 ); + read_buf_size = MAX( src_edge_size*3, read_buf_size ); + read_buf = (char*)cvAlloc( read_buf_size ); + vtx_buf = (CvGraphVtx**)cvAlloc( vtx_count * sizeof(vtx_buf[0]) ); + + vtx_node = cvGetFileNodeByName( fs, node, "vertices" ); + edge_node = cvGetFileNodeByName( fs, node, "edges" ); + if( !edge_node ) + CV_Error( CV_StsBadArg, "No edges data" ); + if( vtx_dt && !vtx_node ) + CV_Error( CV_StsBadArg, "No vertices data" ); + + // as vertices and edges are read in similar way, + // do it as a parametrized 2-iteration loop + for( k = 0; k < 2; k++ ) + { + const char* dt = k == 0 ? vtx_dt : edge_dt; + int elem_size = k == 0 ? vtx_size : edge_size; + int src_elem_size = k == 0 ? src_vtx_size : src_edge_size; + int items_per_elem = k == 0 ? vtx_items_per_elem : edge_items_per_elem; + int elem_count = k == 0 ? vtx_count : edge_count; + char* dst_ptr = read_buf; + int read_max = read_buf_size /MAX(src_elem_size, 1), read_count = 0; + CvSeqReader reader; + if(dt) + cvStartReadRawData( fs, k == 0 ? vtx_node : edge_node, &reader ); + + for( i = 0; i < elem_count; i++ ) + { + if( read_count == 0 && dt ) + { + int count = MIN( elem_count - i, read_max )*items_per_elem; + cvReadRawDataSlice( fs, &reader, count, read_buf, dt ); + read_count = count; + dst_ptr = read_buf; + } + + if( k == 0 ) + { + CvGraphVtx* vtx; + cvGraphAddVtx( graph, 0, &vtx ); + vtx_buf[i] = vtx; + if( dt ) + memcpy( vtx + 1, dst_ptr, src_elem_size ); + } + else + { + CvGraphEdge* edge = 0; + int vtx1 = ((int*)dst_ptr)[0]; + int vtx2 = ((int*)dst_ptr)[1]; + int result; + + if( (unsigned)vtx1 >= (unsigned)vtx_count || + (unsigned)vtx2 >= (unsigned)vtx_count ) + CV_Error( CV_StsOutOfRange, + "Some of stored vertex indices are out of range" ); + + result = cvGraphAddEdgeByPtr( graph, + vtx_buf[vtx1], vtx_buf[vtx2], 0, &edge ); + + if( result == 0 ) + CV_Error( CV_StsBadArg, "Duplicated edge has occured" ); + + edge->weight = *(float*)(dst_ptr + sizeof(int)*2); + if( elem_size > (int)sizeof(CvGraphEdge) ) + { + char* dst_ptr2 = (char*)cvAlignPtr( dst_ptr + sizeof(int)*2 + + sizeof(float), edge_user_align ); + memcpy( edge + 1, dst_ptr2, elem_size - sizeof(CvGraphEdge) ); + } + } + + dst_ptr += src_elem_size; + read_count--; + } + } + + ptr = graph; + cvFree( &read_buf ); + cvFree( &vtx_buf ); + + return ptr; +} + +/****************************************************************************************\ +* RTTI Functions * +\****************************************************************************************/ + +CvTypeInfo *CvType::first = 0, *CvType::last = 0; + +CvType::CvType( const char* type_name, + CvIsInstanceFunc is_instance, CvReleaseFunc release, + CvReadFunc read, CvWriteFunc write, CvCloneFunc clone ) +{ + CvTypeInfo _info; + _info.flags = 0; + _info.header_size = sizeof(_info); + _info.type_name = type_name; + _info.prev = _info.next = 0; + _info.is_instance = is_instance; + _info.release = release; + _info.clone = clone; + _info.read = read; + _info.write = write; + + cvRegisterType( &_info ); + info = first; +} + + +CvType::~CvType() +{ + cvUnregisterType( info->type_name ); +} + + +CvType seq_type( CV_TYPE_NAME_SEQ, icvIsSeq, icvReleaseSeq, icvReadSeq, + icvWriteSeqTree /* this is the entry point for + writing a single sequence too */, icvCloneSeq ); + +CvType seq_tree_type( CV_TYPE_NAME_SEQ_TREE, icvIsSeq, icvReleaseSeq, + icvReadSeqTree, icvWriteSeqTree, icvCloneSeq ); + +CvType seq_graph_type( CV_TYPE_NAME_GRAPH, icvIsGraph, icvReleaseGraph, + icvReadGraph, icvWriteGraph, icvCloneGraph ); + +CvType sparse_mat_type( CV_TYPE_NAME_SPARSE_MAT, icvIsSparseMat, + (CvReleaseFunc)cvReleaseSparseMat, icvReadSparseMat, + icvWriteSparseMat, (CvCloneFunc)cvCloneSparseMat ); + +CvType image_type( CV_TYPE_NAME_IMAGE, icvIsImage, (CvReleaseFunc)cvReleaseImage, + icvReadImage, icvWriteImage, (CvCloneFunc)cvCloneImage ); + +CvType mat_type( CV_TYPE_NAME_MAT, icvIsMat, (CvReleaseFunc)cvReleaseMat, + icvReadMat, icvWriteMat, (CvCloneFunc)cvCloneMat ); + +CvType matnd_type( CV_TYPE_NAME_MATND, icvIsMatND, (CvReleaseFunc)cvReleaseMatND, + icvReadMatND, icvWriteMatND, (CvCloneFunc)cvCloneMatND ); + +CV_IMPL void +cvRegisterType( const CvTypeInfo* _info ) +{ + CvTypeInfo* info = 0; + int i, len; + char c; + + //if( !CvType::first ) + // icvCreateStandardTypes(); + + if( !_info || _info->header_size != sizeof(CvTypeInfo) ) + CV_Error( CV_StsBadSize, "Invalid type info" ); + + if( !_info->is_instance || !_info->release || + !_info->read || !_info->write ) + CV_Error( CV_StsNullPtr, + "Some of required function pointers " + "(is_instance, release, read or write) are NULL"); + + c = _info->type_name[0]; + if( !cv_isalpha(c) && c != '_' ) + CV_Error( CV_StsBadArg, "Type name should start with a letter or _" ); + + len = (int)strlen(_info->type_name); + + for( i = 0; i < len; i++ ) + { + c = _info->type_name[i]; + if( !cv_isalnum(c) && c != '-' && c != '_' ) + CV_Error( CV_StsBadArg, + "Type name should contain only letters, digits, - and _" ); + } + + info = (CvTypeInfo*)malloc( sizeof(*info) + len + 1 ); + + *info = *_info; + info->type_name = (char*)(info + 1); + memcpy( (char*)info->type_name, _info->type_name, len + 1 ); + + info->flags = 0; + info->next = CvType::first; + info->prev = 0; + if( CvType::first ) + CvType::first->prev = info; + else + CvType::last = info; + CvType::first = info; +} + + +CV_IMPL void +cvUnregisterType( const char* type_name ) +{ + CvTypeInfo* info; + + info = cvFindType( type_name ); + if( info ) + { + if( info->prev ) + info->prev->next = info->next; + else + CvType::first = info->next; + + if( info->next ) + info->next->prev = info->prev; + else + CvType::last = info->prev; + + if( !CvType::first || !CvType::last ) + CvType::first = CvType::last = 0; + + free( info ); + } +} + + +CV_IMPL CvTypeInfo* +cvFirstType( void ) +{ + return CvType::first; +} + + +CV_IMPL CvTypeInfo* +cvFindType( const char* type_name ) +{ + CvTypeInfo* info = 0; + + if (type_name) + for( info = CvType::first; info != 0; info = info->next ) + if( strcmp( info->type_name, type_name ) == 0 ) + break; + + return info; +} + + +CV_IMPL CvTypeInfo* +cvTypeOf( const void* struct_ptr ) +{ + CvTypeInfo* info = 0; + + if( struct_ptr ) + { + for( info = CvType::first; info != 0; info = info->next ) + if( info->is_instance( struct_ptr )) + break; + } + + return info; +} + + +/* universal functions */ +CV_IMPL void +cvRelease( void** struct_ptr ) +{ + CvTypeInfo* info; + + if( !struct_ptr ) + CV_Error( CV_StsNullPtr, "NULL double pointer" ); + + if( *struct_ptr ) + { + info = cvTypeOf( *struct_ptr ); + if( !info ) + CV_Error( CV_StsError, "Unknown object type" ); + if( !info->release ) + CV_Error( CV_StsError, "release function pointer is NULL" ); + + info->release( struct_ptr ); + *struct_ptr = 0; + } +} + + +void* cvClone( const void* struct_ptr ) +{ + void* struct_copy = 0; + CvTypeInfo* info; + + if( !struct_ptr ) + CV_Error( CV_StsNullPtr, "NULL structure pointer" ); + + info = cvTypeOf( struct_ptr ); + if( !info ) + CV_Error( CV_StsError, "Unknown object type" ); + if( !info->clone ) + CV_Error( CV_StsError, "clone function pointer is NULL" ); + + struct_copy = info->clone( struct_ptr ); + return struct_copy; +} + + +/* reads matrix, image, sequence, graph etc. */ +CV_IMPL void* +cvRead( CvFileStorage* fs, CvFileNode* node, CvAttrList* list ) +{ + void* obj = 0; + CV_CHECK_FILE_STORAGE( fs ); + + if( !node ) + return 0; + + if( !CV_NODE_IS_USER(node->tag) || !node->info ) + CV_Error( CV_StsError, "The node does not represent a user object (unknown type?)" ); + + obj = node->info->read( fs, node ); + if( list ) + *list = cvAttrList(0,0); + + return obj; +} + + +/* writes matrix, image, sequence, graph etc. */ +CV_IMPL void +cvWrite( CvFileStorage* fs, const char* name, + const void* ptr, CvAttrList attributes ) +{ + CvTypeInfo* info; + + CV_CHECK_OUTPUT_FILE_STORAGE( fs ); + + if( !ptr ) + CV_Error( CV_StsNullPtr, "Null pointer to the written object" ); + + info = cvTypeOf( ptr ); + if( !info ) + CV_Error( CV_StsBadArg, "Unknown object" ); + + if( !info->write ) + CV_Error( CV_StsBadArg, "The object does not have write function" ); + + info->write( fs, name, ptr, attributes ); +} + + +/* simple API for reading/writing data */ +CV_IMPL void +cvSave( const char* filename, const void* struct_ptr, + const char* _name, const char* comment, CvAttrList attributes ) +{ + CvFileStorage* fs = 0; + + if( !struct_ptr ) + CV_Error( CV_StsNullPtr, "NULL object pointer" ); + + fs = cvOpenFileStorage( filename, 0, CV_STORAGE_WRITE ); + if( !fs ) + CV_Error( CV_StsError, "Could not open the file storage. Check the path and permissions" ); + + cv::string name = _name ? cv::string(_name) : cv::FileStorage::getDefaultObjectName(filename); + + if( comment ) + cvWriteComment( fs, comment, 0 ); + cvWrite( fs, name.c_str(), struct_ptr, attributes ); + cvReleaseFileStorage( &fs ); +} + +CV_IMPL void* +cvLoad( const char* filename, CvMemStorage* memstorage, + const char* name, const char** _real_name ) +{ + void* ptr = 0; + const char* real_name = 0; + cv::FileStorage fs(cvOpenFileStorage(filename, memstorage, CV_STORAGE_READ)); + + CvFileNode* node = 0; + + if( !fs.isOpened() ) + return 0; + + if( name ) + { + node = cvGetFileNodeByName( *fs, 0, name ); + } + else + { + int i, k; + for( k = 0; k < (*fs)->roots->total; k++ ) + { + CvSeq* seq; + CvSeqReader reader; + + node = (CvFileNode*)cvGetSeqElem( (*fs)->roots, k ); + if( !CV_NODE_IS_MAP( node->tag )) + return 0; + seq = node->data.seq; + node = 0; + + cvStartReadSeq( seq, &reader, 0 ); + + // find the first element in the map + for( i = 0; i < seq->total; i++ ) + { + if( CV_IS_SET_ELEM( reader.ptr )) + { + node = (CvFileNode*)reader.ptr; + goto stop_search; + } + CV_NEXT_SEQ_ELEM( seq->elem_size, reader ); + } + } + +stop_search: + ; + } + + if( !node ) + CV_Error( CV_StsObjectNotFound, "Could not find the/an object in file storage" ); + + real_name = cvGetFileNodeName( node ); + ptr = cvRead( *fs, node, 0 ); + + // sanity check + if( !memstorage && (CV_IS_SEQ( ptr ) || CV_IS_SET( ptr )) ) + CV_Error( CV_StsNullPtr, + "NULL memory storage is passed - the loaded dynamic structure can not be stored" ); + + if( cvGetErrStatus() < 0 ) + { + cvRelease( (void**)&ptr ); + real_name = 0; + } + + if( _real_name) + { + if (real_name) + { + *_real_name = (const char*)cvAlloc(strlen(real_name)); + memcpy((void*)*_real_name, real_name, strlen(real_name)); + } else { + *_real_name = 0; + } + } + + return ptr; +} + + +///////////////////////// new C++ interface for CvFileStorage /////////////////////////// + +namespace cv +{ + +static void getElemSize( const string& fmt, size_t& elemSize, size_t& cn ) +{ + const char* dt = fmt.c_str(); + cn = 1; + if( cv_isdigit(dt[0]) ) + { + cn = dt[0] - '0'; + dt++; + } + char c = dt[0]; + elemSize = cn*(c == 'u' || c == 'c' ? sizeof(uchar) : c == 'w' || c == 's' ? sizeof(ushort) : + c == 'i' ? sizeof(int) : c == 'f' ? sizeof(float) : c == 'd' ? sizeof(double) : + c == 'r' ? sizeof(void*) : (size_t)0); +} + +FileStorage::FileStorage() +{ + state = UNDEFINED; +} + +FileStorage::FileStorage(const string& filename, int flags, const string& encoding) +{ + state = UNDEFINED; + open( filename, flags, encoding ); +} + +FileStorage::FileStorage(CvFileStorage* _fs) +{ + fs = Ptr(_fs); + state = _fs ? NAME_EXPECTED + INSIDE_MAP : UNDEFINED; +} + +FileStorage::~FileStorage() +{ + while( structs.size() > 0 ) + { + cvEndWriteStruct(fs); + structs.pop_back(); + } +} + +bool FileStorage::open(const string& filename, int flags, const string& encoding) +{ + release(); + fs = Ptr(cvOpenFileStorage( filename.c_str(), 0, flags, + !encoding.empty() ? encoding.c_str() : 0)); + bool ok = isOpened(); + state = ok ? NAME_EXPECTED + INSIDE_MAP : UNDEFINED; + return ok; +} + +bool FileStorage::isOpened() const +{ + return !fs.empty(); +} + +void FileStorage::release() +{ + fs.release(); + structs.clear(); + state = UNDEFINED; +} + +FileNode FileStorage::root(int streamidx) const +{ + return isOpened() ? FileNode(fs, cvGetRootFileNode(fs, streamidx)) : FileNode(); +} + +FileStorage& operator << (FileStorage& fs, const string& str) +{ + enum { NAME_EXPECTED = FileStorage::NAME_EXPECTED, + VALUE_EXPECTED = FileStorage::VALUE_EXPECTED, + INSIDE_MAP = FileStorage::INSIDE_MAP }; + const char* _str = str.c_str(); + if( !fs.isOpened() || !_str ) + return fs; + if( *_str == '}' || *_str == ']' ) + { + if( fs.structs.empty() ) + CV_Error_( CV_StsError, ("Extra closing '%c'", *_str) ); + if( (*_str == ']' ? '[' : '{') != fs.structs.back() ) + CV_Error_( CV_StsError, + ("The closing '%c' does not match the opening '%c'", *_str, fs.structs.back())); + fs.structs.pop_back(); + fs.state = fs.structs.empty() || fs.structs.back() == '{' ? + INSIDE_MAP + NAME_EXPECTED : VALUE_EXPECTED; + cvEndWriteStruct( *fs ); + fs.elname = string(); + } + else if( fs.state == NAME_EXPECTED + INSIDE_MAP ) + { + if( !cv_isalpha(*_str) ) + CV_Error_( CV_StsError, ("Incorrect element name %s", _str) ); + fs.elname = str; + fs.state = VALUE_EXPECTED + INSIDE_MAP; + } + else if( (fs.state & 3) == VALUE_EXPECTED ) + { + if( *_str == '{' || *_str == '[' ) + { + fs.structs.push_back(*_str); + int flags = *_str++ == '{' ? CV_NODE_MAP : CV_NODE_SEQ; + fs.state = flags == CV_NODE_MAP ? INSIDE_MAP + + NAME_EXPECTED : VALUE_EXPECTED; + if( *_str == ':' ) + { + flags |= CV_NODE_FLOW; + _str++; + } + cvStartWriteStruct( *fs, fs.elname.size() > 0 ? fs.elname.c_str() : 0, + flags, *_str ? _str : 0 ); + fs.elname = string(); + } + else + { + write( fs, fs.elname, (_str[0] == '\\' && (_str[1] == '{' || _str[1] == '}' || + _str[1] == '[' || _str[1] == ']')) ? string(_str+1) : str ); + if( fs.state == INSIDE_MAP + VALUE_EXPECTED ) + fs.state = INSIDE_MAP + NAME_EXPECTED; + } + } + else + CV_Error( CV_StsError, "Invalid fs.state" ); + return fs; +} + + +void FileStorage::writeRaw( const string& fmt, const uchar* vec, size_t len ) +{ + if( !isOpened() ) + return; + size_t elemSize, cn; + getElemSize( fmt, elemSize, cn ); + CV_Assert( len % elemSize == 0 ); + cvWriteRawData( fs, vec, (int)(len/elemSize), fmt.c_str()); +} + + +void FileStorage::writeObj( const string& name, const void* obj ) +{ + if( !isOpened() ) + return; + cvWrite( fs, name.size() > 0 ? name.c_str() : 0, obj ); +} + + +FileNode FileStorage::operator[](const string& nodename) const +{ + return FileNode(fs, cvGetFileNodeByName(fs, 0, nodename.c_str())); +} + +FileNode FileStorage::operator[](const char* nodename) const +{ + return FileNode(fs, cvGetFileNodeByName(fs, 0, nodename)); +} + +FileNode FileNode::operator[](const string& nodename) const +{ + return FileNode(fs, cvGetFileNodeByName(fs, node, nodename.c_str())); +} + +FileNode FileNode::operator[](const char* nodename) const +{ + return FileNode(fs, cvGetFileNodeByName(fs, node, nodename)); +} + +FileNode FileNode::operator[](int i) const +{ + return isSeq() ? FileNode(fs, (CvFileNode*)cvGetSeqElem(node->data.seq, i)) : + i == 0 ? *this : FileNode(); +} + +string FileNode::name() const +{ + const char* str; + return !node || (str = cvGetFileNodeName(node)) == 0 ? string() : string(str); +} + +void* FileNode::readObj() const +{ + if( !fs || !node ) + return 0; + return cvRead( (CvFileStorage*)fs, (CvFileNode*)node ); +} + +FileNodeIterator::FileNodeIterator() +{ + fs = 0; + container = 0; + reader.ptr = 0; + remaining = 0; +} + +FileNodeIterator::FileNodeIterator(const CvFileStorage* _fs, + const CvFileNode* _node, size_t _ofs) +{ + if( _fs && _node ) + { + int node_type = _node->tag & FileNode::TYPE_MASK; + fs = _fs; + container = _node; + if( !(_node->tag & FileNode::USER) && (node_type == FileNode::SEQ || node_type == FileNode::MAP) ) + { + cvStartReadSeq( _node->data.seq, &reader ); + remaining = FileNode(_fs, _node).size(); + } + else + { + reader.ptr = (schar*)_node; + reader.seq = 0; + remaining = 1; + } + (*this) += (int)_ofs; + } + else + { + fs = 0; + container = 0; + reader.ptr = 0; + remaining = 0; + } +} + +FileNodeIterator::FileNodeIterator(const FileNodeIterator& it) +{ + fs = it.fs; + container = it.container; + reader = it.reader; + remaining = it.remaining; +} + +FileNodeIterator& FileNodeIterator::operator ++() +{ + if( remaining > 0 ) + { + if( reader.seq ) + CV_NEXT_SEQ_ELEM( reader.seq->elem_size, reader ); + remaining--; + } + return *this; +} + +FileNodeIterator FileNodeIterator::operator ++(int) +{ + FileNodeIterator it = *this; + ++(*this); + return it; +} + +FileNodeIterator& FileNodeIterator::operator --() +{ + if( remaining < FileNode(fs, container).size() ) + { + if( reader.seq ) + CV_PREV_SEQ_ELEM( reader.seq->elem_size, reader ); + remaining++; + } + return *this; +} + +FileNodeIterator FileNodeIterator::operator --(int) +{ + FileNodeIterator it = *this; + --(*this); + return it; +} + +FileNodeIterator& FileNodeIterator::operator += (int ofs) +{ + if( ofs == 0 ) + return *this; + if( ofs > 0 ) + ofs = std::min(ofs, (int)remaining); + else + { + size_t count = FileNode(fs, container).size(); + ofs = (int)(remaining - std::min(remaining - ofs, count)); + } + remaining -= ofs; + if( reader.seq ) + cvSetSeqReaderPos( &reader, ofs, 1 ); + return *this; +} + +FileNodeIterator& FileNodeIterator::operator -= (int ofs) +{ + return operator += (-ofs); +} + + +FileNodeIterator& FileNodeIterator::readRaw( const string& fmt, uchar* vec, size_t maxCount ) +{ + if( fs && container && remaining > 0 ) + { + size_t elem_size, cn; + getElemSize( fmt, elem_size, cn ); + CV_Assert( elem_size > 0 ); + size_t count = std::min(remaining, maxCount); + + if( reader.seq ) + { + cvReadRawDataSlice( fs, &reader, (int)count, vec, fmt.c_str() ); + remaining -= count*cn; + } + else + { + cvReadRawData( fs, container, vec, fmt.c_str() ); + remaining = 0; + } + } + return *this; +} + + +void write( FileStorage& fs, const string& name, int value ) +{ cvWriteInt( *fs, name.size() ? name.c_str() : 0, value ); } + +void write( FileStorage& fs, const string& name, float value ) +{ cvWriteReal( *fs, name.size() ? name.c_str() : 0, value ); } + +void write( FileStorage& fs, const string& name, double value ) +{ cvWriteReal( *fs, name.size() ? name.c_str() : 0, value ); } + +void write( FileStorage& fs, const string& name, const string& value ) +{ cvWriteString( *fs, name.size() ? name.c_str() : 0, value.c_str() ); } + +void writeScalar(FileStorage& fs, int value ) +{ cvWriteInt( *fs, 0, value ); } + +void writeScalar(FileStorage& fs, float value ) +{ cvWriteReal( *fs, 0, value ); } + +void writeScalar(FileStorage& fs, double value ) +{ cvWriteReal( *fs, 0, value ); } + +void writeScalar(FileStorage& fs, const string& value ) +{ cvWriteString( *fs, 0, value.c_str() ); } + + +void write( FileStorage& fs, const string& name, const Mat& value ) +{ + if( value.dims <= 2 ) + { + CvMat mat = value; + cvWrite( *fs, name.size() ? name.c_str() : 0, &mat ); + } + else + { + CvMatND mat = value; + cvWrite( *fs, name.size() ? name.c_str() : 0, &mat ); + } +} + +// TODO: the 4 functions below need to be implemented more efficiently +void write( FileStorage& fs, const string& name, const SparseMat& value ) +{ + Ptr mat = (CvSparseMat*)value; + cvWrite( *fs, name.size() ? name.c_str() : 0, mat ); +} + + +WriteStructContext::WriteStructContext(FileStorage& _fs, const string& name, + int flags, const string& typeName) : fs(&_fs) +{ + cvStartWriteStruct(**fs, !name.empty() ? name.c_str() : 0, flags, + !typeName.empty() ? typeName.c_str() : 0); +} + +WriteStructContext::~WriteStructContext() { cvEndWriteStruct(**fs); } + + +void read( const FileNode& node, Mat& mat, const Mat& default_mat ) +{ + if( node.empty() ) + { + default_mat.copyTo(mat); + return; + } + void* obj = cvRead((CvFileStorage*)node.fs, (CvFileNode*)*node); + if(CV_IS_MAT_HDR_Z(obj)) + { + Mat((const CvMat*)obj).copyTo(mat); + cvReleaseMat((CvMat**)&obj); + } + else if(CV_IS_MATND_HDR(obj)) + { + Mat((const CvMatND*)obj).copyTo(mat); + cvReleaseMatND((CvMatND**)&obj); + } + else + { + cvRelease(&obj); + CV_Error(CV_StsBadArg, "Unknown array type"); + } +} + +void read( const FileNode& node, SparseMat& mat, const SparseMat& default_mat ) +{ + if( node.empty() ) + { + default_mat.copyTo(mat); + return; + } + Ptr m = (CvSparseMat*)cvRead((CvFileStorage*)node.fs, (CvFileNode*)*node); + CV_Assert(CV_IS_SPARSE_MAT(m)); + SparseMat(m).copyTo(mat); +} + +} + +/* End of file. */ diff --git a/opencv/core/precomp.cpp b/opencv/core/precomp.cpp new file mode 100644 index 0000000..e540cc5 --- /dev/null +++ b/opencv/core/precomp.cpp @@ -0,0 +1,45 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +/* End of file. */ diff --git a/opencv/core/precomp.hpp b/opencv/core/precomp.hpp new file mode 100644 index 0000000..efd8c61 --- /dev/null +++ b/opencv/core/precomp.hpp @@ -0,0 +1,216 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_PRECOMP_H__ +#define __OPENCV_PRECOMP_H__ + +#if defined _MSC_VER && _MSC_VER >= 1200 + // disable warnings related to inline functions + #pragma warning( disable: 4251 4711 4710 4514 ) +#endif + +#ifdef HAVE_CVCONFIG_H +#include "cvconfig.h" +#endif + +#include "opencv2/core/core.hpp" +#include "opencv2/core/core_c.h" +#include "opencv2/core/internal.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_TEGRA_OPTIMIZATION +#include "opencv2/core/core_tegra.hpp" +#endif + +namespace cv +{ + +// -128.f ... 255.f +extern const float g_8x32fTab[]; +#define CV_8TO32F(x) cv::g_8x32fTab[(x)+128] + +extern const ushort g_8x16uSqrTab[]; +#define CV_SQR_8U(x) cv::g_8x16uSqrTab[(x)+255] + +extern const char* g_HersheyGlyphs[]; + +extern const uchar g_Saturate8u[]; +#define CV_FAST_CAST_8U(t) (assert(-256 <= (t) && (t) <= 512), cv::g_Saturate8u[(t)+256]) +#define CV_MIN_8U(a,b) ((a) - CV_FAST_CAST_8U((a) - (b))) +#define CV_MAX_8U(a,b) ((a) + CV_FAST_CAST_8U((b) - (a))) + + +#if defined WIN32 || defined _WIN32 +void deleteThreadAllocData(); +void deleteThreadRNGData(); +#endif + +template struct OpAdd +{ + typedef T1 type1; + typedef T2 type2; + typedef T3 rtype; + T3 operator ()(T1 a, T2 b) const { return saturate_cast(a + b); } +}; + +template struct OpSub +{ + typedef T1 type1; + typedef T2 type2; + typedef T3 rtype; + T3 operator ()(T1 a, T2 b) const { return saturate_cast(a - b); } +}; + +template struct OpRSub +{ + typedef T1 type1; + typedef T2 type2; + typedef T3 rtype; + T3 operator ()(T1 a, T2 b) const { return saturate_cast(b - a); } +}; + +template struct OpMin +{ + typedef T type1; + typedef T type2; + typedef T rtype; + T operator ()(T a, T b) const { return std::min(a, b); } +}; + +template struct OpMax +{ + typedef T type1; + typedef T type2; + typedef T rtype; + T operator ()(T a, T b) const { return std::max(a, b); } +}; + +inline Size getContinuousSize( const Mat& m1, int widthScale=1 ) +{ + return m1.isContinuous() ? Size(m1.cols*m1.rows*widthScale, 1) : + Size(m1.cols*widthScale, m1.rows); +} + +inline Size getContinuousSize( const Mat& m1, const Mat& m2, int widthScale=1 ) +{ + return (m1.flags & m2.flags & Mat::CONTINUOUS_FLAG) != 0 ? + Size(m1.cols*m1.rows*widthScale, 1) : Size(m1.cols*widthScale, m1.rows); +} + +inline Size getContinuousSize( const Mat& m1, const Mat& m2, + const Mat& m3, int widthScale=1 ) +{ + return (m1.flags & m2.flags & m3.flags & Mat::CONTINUOUS_FLAG) != 0 ? + Size(m1.cols*m1.rows*widthScale, 1) : Size(m1.cols*widthScale, m1.rows); +} + +inline Size getContinuousSize( const Mat& m1, const Mat& m2, + const Mat& m3, const Mat& m4, + int widthScale=1 ) +{ + return (m1.flags & m2.flags & m3.flags & m4.flags & Mat::CONTINUOUS_FLAG) != 0 ? + Size(m1.cols*m1.rows*widthScale, 1) : Size(m1.cols*widthScale, m1.rows); +} + +inline Size getContinuousSize( const Mat& m1, const Mat& m2, + const Mat& m3, const Mat& m4, + const Mat& m5, int widthScale=1 ) +{ + return (m1.flags & m2.flags & m3.flags & m4.flags & m5.flags & Mat::CONTINUOUS_FLAG) != 0 ? + Size(m1.cols*m1.rows*widthScale, 1) : Size(m1.cols*widthScale, m1.rows); +} + +struct NoVec +{ + size_t operator()(const void*, const void*, void*, size_t) const { return 0; } +}; + +extern volatile bool USE_SSE2; + +typedef void (*BinaryFunc)(const uchar* src1, size_t step1, + const uchar* src2, size_t step2, + uchar* dst, size_t step, Size sz, + void*); + +BinaryFunc getConvertFunc(int sdepth, int ddepth); +BinaryFunc getConvertScaleFunc(int sdepth, int ddepth); +BinaryFunc getCopyMaskFunc(size_t esz); + +enum { BLOCK_SIZE = 1024 }; + +#ifdef HAVE_IPP +static inline IppiSize ippiSize(int width, int height) { IppiSize sz = { width, height}; return sz; } +static inline IppiSize ippiSize(Size _sz) { IppiSize sz = { _sz.width, _sz.height}; return sz; } +#endif + +#if defined HAVE_IPP && (IPP_VERSION_MAJOR >= 7) +#define ARITHM_USE_IPP 1 +#define IF_IPP(then_call, else_call) then_call +#else +#define ARITHM_USE_IPP 0 +#define IF_IPP(then_call, else_call) else_call +#endif + +inline bool checkScalar(const Mat& sc, int atype, int sckind, int akind) +{ + if( sc.dims > 2 || (sc.cols != 1 && sc.rows != 1) || !sc.isContinuous() ) + return false; + int cn = CV_MAT_CN(atype); + if( akind == _InputArray::MATX && sckind != _InputArray::MATX ) + return false; + return sc.size() == Size(1, 1) || sc.size() == Size(1, cn) || sc.size() == Size(cn, 1) || + (sc.size() == Size(1, 4) && sc.type() == CV_64F && cn <= 4); +} + +void convertAndUnrollScalar( const Mat& sc, int buftype, uchar* scbuf, size_t blocksize ); + +} + +#endif /*_CXCORE_INTERNAL_H_*/ diff --git a/opencv/core/rand.cpp b/opencv/core/rand.cpp new file mode 100644 index 0000000..e143256 --- /dev/null +++ b/opencv/core/rand.cpp @@ -0,0 +1,837 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* //////////////////////////////////////////////////////////////////// +// +// Filling CvMat/IplImage instances with random numbers +// +// */ + +#include "precomp.hpp" + +namespace cv +{ + +///////////////////////////// Functions Declaration ////////////////////////////////////// + +/* + Multiply-with-carry generator is used here: + temp = ( A*X(n) + carry ) + X(n+1) = temp mod (2^32) + carry = temp / (2^32) +*/ + +#define RNG_NEXT(x) ((uint64)(unsigned)(x)*CV_RNG_COEFF + ((x) >> 32)) + +/***************************************************************************************\ +* Pseudo-Random Number Generators (PRNGs) * +\***************************************************************************************/ + +template static void +randBits_( T* arr, int len, uint64* state, const Vec2i* p, bool small_flag ) +{ + uint64 temp = *state; + int i; + + if( !small_flag ) + { + for( i = 0; i <= len - 4; i += 4 ) + { + int t0, t1; + + temp = RNG_NEXT(temp); + t0 = ((int)temp & p[i][0]) + p[i][1]; + temp = RNG_NEXT(temp); + t1 = ((int)temp & p[i+1][0]) + p[i+1][1]; + arr[i] = saturate_cast(t0); + arr[i+1] = saturate_cast(t1); + + temp = RNG_NEXT(temp); + t0 = ((int)temp & p[i+2][0]) + p[i+2][1]; + temp = RNG_NEXT(temp); + t1 = ((int)temp & p[i+3][0]) + p[i+3][1]; + arr[i+2] = saturate_cast(t0); + arr[i+3] = saturate_cast(t1); + } + } + else + { + for( i = 0; i <= len - 4; i += 4 ) + { + int t0, t1, t; + temp = RNG_NEXT(temp); + t = (int)temp; + t0 = (t & p[i][0]) + p[i][1]; + t1 = ((t >> 8) & p[i+1][0]) + p[i+1][1]; + arr[i] = saturate_cast(t0); + arr[i+1] = saturate_cast(t1); + + t0 = ((t >> 16) & p[i+2][0]) + p[i+2][1]; + t1 = ((t >> 24) & p[i+3][0]) + p[i+3][1]; + arr[i+2] = saturate_cast(t0); + arr[i+3] = saturate_cast(t1); + } + } + + for( ; i < len; i++ ) + { + int t0; + temp = RNG_NEXT(temp); + + t0 = ((int)temp & p[i][0]) + p[i][1]; + arr[i] = saturate_cast(t0); + } + + *state = temp; +} + +struct DivStruct +{ + unsigned d; + unsigned M; + int sh1, sh2; + int delta; +}; + +template static void +randi_( T* arr, int len, uint64* state, const DivStruct* p ) +{ + uint64 temp = *state; + int i = 0; + unsigned t0, t1, v0, v1; + + for( i = 0; i <= len - 4; i += 4 ) + { + temp = RNG_NEXT(temp); + t0 = (unsigned)temp; + temp = RNG_NEXT(temp); + t1 = (unsigned)temp; + v0 = (unsigned)(((uint64)t0 * p[i].M) >> 32); + v1 = (unsigned)(((uint64)t1 * p[i+1].M) >> 32); + v0 = (v0 + ((t0 - v0) >> p[i].sh1)) >> p[i].sh2; + v1 = (v1 + ((t1 - v1) >> p[i+1].sh1)) >> p[i+1].sh2; + v0 = t0 - v0*p[i].d + p[i].delta; + v1 = t1 - v1*p[i+1].d + p[i+1].delta; + arr[i] = saturate_cast((int)v0); + arr[i+1] = saturate_cast((int)v1); + + temp = RNG_NEXT(temp); + t0 = (unsigned)temp; + temp = RNG_NEXT(temp); + t1 = (unsigned)temp; + v0 = (unsigned)(((uint64)t0 * p[i+2].M) >> 32); + v1 = (unsigned)(((uint64)t1 * p[i+3].M) >> 32); + v0 = (v0 + ((t0 - v0) >> p[i+2].sh1)) >> p[i+2].sh2; + v1 = (v1 + ((t1 - v1) >> p[i+3].sh1)) >> p[i+3].sh2; + v0 = t0 - v0*p[i+2].d + p[i+2].delta; + v1 = t1 - v1*p[i+3].d + p[i+3].delta; + arr[i+2] = saturate_cast((int)v0); + arr[i+3] = saturate_cast((int)v1); + } + + for( ; i < len; i++ ) + { + temp = RNG_NEXT(temp); + t0 = (unsigned)temp; + v0 = (unsigned)(((uint64)t0 * p[i].M) >> 32); + v0 = (v0 + ((t0 - v0) >> p[i].sh1)) >> p[i].sh2; + v0 = t0 - v0*p[i].d + p[i].delta; + arr[i] = saturate_cast((int)v0); + } + + *state = temp; +} + + +#define DEF_RANDI_FUNC(suffix, type) \ +static void randBits_##suffix(type* arr, int len, uint64* state, \ + const Vec2i* p, bool small_flag) \ +{ randBits_(arr, len, state, p, small_flag); } \ +\ +static void randi_##suffix(type* arr, int len, uint64* state, \ + const DivStruct* p, bool ) \ +{ randi_(arr, len, state, p); } + +DEF_RANDI_FUNC(8u, uchar) +DEF_RANDI_FUNC(8s, schar) +DEF_RANDI_FUNC(16u, ushort) +DEF_RANDI_FUNC(16s, short) +DEF_RANDI_FUNC(32s, int) + +static void randf_32f( float* arr, int len, uint64* state, const Vec2f* p, bool ) +{ + uint64 temp = *state; + int i; + + for( i = 0; i <= len - 4; i += 4 ) + { + float f0, f1; + + temp = RNG_NEXT(temp); + f0 = (int)temp*p[i][0] + p[i][1]; + temp = RNG_NEXT(temp); + f1 = (int)temp*p[i+1][0] + p[i+1][1]; + arr[i] = f0; arr[i+1] = f1; + + temp = RNG_NEXT(temp); + f0 = (int)temp*p[i+2][0] + p[i+2][1]; + temp = RNG_NEXT(temp); + f1 = (int)temp*p[i+3][0] + p[i+3][1]; + arr[i+2] = f0; arr[i+3] = f1; + } + + for( ; i < len; i++ ) + { + temp = RNG_NEXT(temp); + arr[i] = (int)temp*p[i][0] + p[i][1]; + } + + *state = temp; +} + + +static void +randf_64f( double* arr, int len, uint64* state, const Vec2d* p, bool ) +{ + uint64 temp = *state; + int64 v = 0; + int i; + + for( i = 0; i <= len - 4; i += 4 ) + { + double f0, f1; + + temp = RNG_NEXT(temp); + v = (temp >> 32)|(temp << 32); + f0 = v*p[i][0] + p[i][1]; + temp = RNG_NEXT(temp); + v = (temp >> 32)|(temp << 32); + f1 = v*p[i+1][0] + p[i+1][1]; + arr[i] = f0; arr[i+1] = f1; + + temp = RNG_NEXT(temp); + v = (temp >> 32)|(temp << 32); + f0 = v*p[i+2][0] + p[i+2][1]; + temp = RNG_NEXT(temp); + v = (temp >> 32)|(temp << 32); + f1 = v*p[i+3][0] + p[i+3][1]; + arr[i+2] = f0; arr[i+3] = f1; + } + + for( ; i < len; i++ ) + { + temp = RNG_NEXT(temp); + v = (temp >> 32)|(temp << 32); + arr[i] = v*p[i][0] + p[i][1]; + } + + *state = temp; +} + +typedef void (*RandFunc)(uchar* arr, int len, uint64* state, const void* p, bool small_flag); + + +static RandFunc randTab[][8] = +{ + { + (RandFunc)randi_8u, (RandFunc)randi_8s, (RandFunc)randi_16u, (RandFunc)randi_16s, + (RandFunc)randi_32s, (RandFunc)randf_32f, (RandFunc)randf_64f, 0 + }, + { + (RandFunc)randBits_8u, (RandFunc)randBits_8s, (RandFunc)randBits_16u, (RandFunc)randBits_16s, + (RandFunc)randBits_32s, 0, 0, 0 + } +}; + +/* + The code below implements the algorithm described in + "The Ziggurat Method for Generating Random Variables" + by Marsaglia and Tsang, Journal of Statistical Software. +*/ +static void +randn_0_1_32f( float* arr, int len, uint64* state ) +{ + const float r = 3.442620f; // The start of the right tail + const float rng_flt = 2.3283064365386962890625e-10f; // 2^-32 + static unsigned kn[128]; + static float wn[128], fn[128]; + uint64 temp = *state; + static bool initialized=false; + int i; + + if( !initialized ) + { + const double m1 = 2147483648.0; + double dn = 3.442619855899, tn = dn, vn = 9.91256303526217e-3; + + // Set up the tables + double q = vn/std::exp(-.5*dn*dn); + kn[0] = (unsigned)((dn/q)*m1); + kn[1] = 0; + + wn[0] = (float)(q/m1); + wn[127] = (float)(dn/m1); + + fn[0] = 1.f; + fn[127] = (float)std::exp(-.5*dn*dn); + + for(i=126;i>=1;i--) + { + dn = std::sqrt(-2.*std::log(vn/dn+std::exp(-.5*dn*dn))); + kn[i+1] = (unsigned)((dn/tn)*m1); + tn = dn; + fn[i] = (float)std::exp(-.5*dn*dn); + wn[i] = (float)(dn/m1); + } + initialized = true; + } + + for( i = 0; i < len; i++ ) + { + float x, y; + for(;;) + { + int hz = (int)temp; + temp = RNG_NEXT(temp); + int iz = hz & 127; + x = hz*wn[iz]; + if( (unsigned)std::abs(hz) < kn[iz] ) + break; + if( iz == 0) // iz==0, handles the base strip + { + do + { + x = (unsigned)temp*rng_flt; + temp = RNG_NEXT(temp); + y = (unsigned)temp*rng_flt; + temp = RNG_NEXT(temp); + x = (float)(-std::log(x+FLT_MIN)*0.2904764); + y = (float)-std::log(y+FLT_MIN); + } // .2904764 is 1/r + while( y + y < x*x ); + x = hz > 0 ? r + x : -r - x; + break; + } + // iz > 0, handle the wedges of other strips + y = (unsigned)temp*rng_flt; + temp = RNG_NEXT(temp); + if( fn[iz] + y*(fn[iz - 1] - fn[iz]) < std::exp(-.5*x*x) ) + break; + } + arr[i] = x; + } + *state = temp; +} + + +double RNG::gaussian(double sigma) +{ + float temp; + randn_0_1_32f( &temp, 1, &state ); + return temp*sigma; +} + + +template static void +randnScale_( const float* src, T* dst, int len, int cn, const PT* mean, const PT* stddev, bool stdmtx ) +{ + int i, j, k; + if( !stdmtx ) + { + if( cn == 1 ) + { + PT b = mean[0], a = stddev[0]; + for( i = 0; i < len; i++ ) + dst[i] = saturate_cast(src[i]*a + b); + } + else + { + for( i = 0; i < len; i++, src += cn, dst += cn ) + for( k = 0; k < cn; k++ ) + dst[k] = saturate_cast(src[k]*stddev[k] + mean[k]); + } + } + else + { + for( i = 0; i < len; i++, src += cn, dst += cn ) + { + for( j = 0; j < cn; j++ ) + { + PT s = mean[j]; + for( k = 0; k < cn; k++ ) + s += src[k]*stddev[j*cn + k]; + dst[j] = saturate_cast(s); + } + } + } +} + +static void randnScale_8u( const float* src, uchar* dst, int len, int cn, + const float* mean, const float* stddev, bool stdmtx ) +{ randnScale_(src, dst, len, cn, mean, stddev, stdmtx); } + +static void randnScale_8s( const float* src, schar* dst, int len, int cn, + const float* mean, const float* stddev, bool stdmtx ) +{ randnScale_(src, dst, len, cn, mean, stddev, stdmtx); } + +static void randnScale_16u( const float* src, ushort* dst, int len, int cn, + const float* mean, const float* stddev, bool stdmtx ) +{ randnScale_(src, dst, len, cn, mean, stddev, stdmtx); } + +static void randnScale_16s( const float* src, short* dst, int len, int cn, + const float* mean, const float* stddev, bool stdmtx ) +{ randnScale_(src, dst, len, cn, mean, stddev, stdmtx); } + +static void randnScale_32s( const float* src, int* dst, int len, int cn, + const float* mean, const float* stddev, bool stdmtx ) +{ randnScale_(src, dst, len, cn, mean, stddev, stdmtx); } + +static void randnScale_32f( const float* src, float* dst, int len, int cn, + const float* mean, const float* stddev, bool stdmtx ) +{ randnScale_(src, dst, len, cn, mean, stddev, stdmtx); } + +static void randnScale_64f( const float* src, double* dst, int len, int cn, + const double* mean, const double* stddev, bool stdmtx ) +{ randnScale_(src, dst, len, cn, mean, stddev, stdmtx); } + +typedef void (*RandnScaleFunc)(const float* src, uchar* dst, int len, int cn, + const uchar*, const uchar*, bool); + +static RandnScaleFunc randnScaleTab[] = +{ + (RandnScaleFunc)randnScale_8u, (RandnScaleFunc)randnScale_8s, (RandnScaleFunc)randnScale_16u, + (RandnScaleFunc)randnScale_16s, (RandnScaleFunc)randnScale_32s, (RandnScaleFunc)randnScale_32f, + (RandnScaleFunc)randnScale_64f, 0 +}; + +void RNG::fill( InputOutputArray _mat, int disttype, InputArray _param1arg, InputArray _param2arg ) +{ + Mat mat = _mat.getMat(), _param1 = _param1arg.getMat(), _param2 = _param2arg.getMat(); + int depth = mat.depth(), cn = mat.channels(); + AutoBuffer _parambuf; + int j, k, fast_int_mode = 0, smallFlag = 1; + RandFunc func = 0; + RandnScaleFunc scaleFunc = 0; + + CV_Assert(_param1.channels() == 1 && (_param1.rows == 1 || _param1.cols == 1) && + (_param1.rows + _param1.cols - 1 == cn || _param1.rows + _param1.cols - 1 == 1 || + (_param1.size() == Size(1, 4) && _param1.type() == CV_64F && cn <= 4))); + CV_Assert( _param2.channels() == 1 && + (((_param2.rows == 1 || _param2.cols == 1) && + (_param2.rows + _param2.cols - 1 == cn || _param2.rows + _param2.cols - 1 == 1 || + (_param1.size() == Size(1, 4) && _param1.type() == CV_64F && cn <= 4))) || + (_param2.rows == cn && _param2.cols == cn && disttype == NORMAL))); + + Vec2i* ip = 0; + Vec2d* dp = 0; + Vec2f* fp = 0; + DivStruct* ds = 0; + uchar* mean = 0; + uchar* stddev = 0; + bool stdmtx = false; + int n1 = (int)_param1.total(); + int n2 = (int)_param2.total(); + + if( disttype == UNIFORM ) + { + _parambuf.allocate(cn*8 + n1 + n2); + double* parambuf = _parambuf; + double* p1 = (double*)_param1.data; + double* p2 = (double*)_param2.data; + + if( !_param1.isContinuous() || _param1.type() != CV_64F || n1 != cn ) + { + Mat tmp(_param1.size(), CV_64F, parambuf); + _param1.convertTo(tmp, CV_64F); + p1 = parambuf; + if( n1 < cn ) + for( j = n1; j < cn; j++ ) + p1[j] = p1[j-n1]; + } + + if( !_param2.isContinuous() || _param2.type() != CV_64F || n2 != cn ) + { + Mat tmp(_param2.size(), CV_64F, parambuf + cn); + _param2.convertTo(tmp, CV_64F); + p2 = parambuf + cn; + if( n2 < cn ) + for( j = n2; j < cn; j++ ) + p2[j] = p2[j-n2]; + } + + if( depth <= CV_32S ) + { + ip = (Vec2i*)(parambuf + cn*2); + for( j = 0, fast_int_mode = 1; j < cn; j++ ) + { + double a = min(p1[j], p2[j]); + double b = max(p1[j], p2[j]); + ip[j][1] = cvCeil(a); + int idiff = ip[j][0] = cvFloor(b) - ip[j][1] - 1; + double diff = b - a; + + fast_int_mode &= diff <= 4294967296. && (idiff & (idiff+1)) == 0; + if( fast_int_mode ) + smallFlag &= idiff <= 255; + } + + if( !fast_int_mode ) + { + ds = (DivStruct*)(ip + cn); + for( j = 0; j < cn; j++ ) + { + ds[j].delta = ip[j][1]; + unsigned d = ds[j].d = (unsigned)(ip[j][0]+1); + int l = 0; + while(((uint64)1 << l) < d) + l++; + ds[j].M = (unsigned)(((uint64)1 << 32)*(((uint64)1 << l) - d)/d) + 1; + ds[j].sh1 = min(l, 1); + ds[j].sh2 = max(l - 1, 0); + } + } + + func = randTab[fast_int_mode][depth]; + } + else + { + double scale = depth == CV_64F ? + 5.4210108624275221700372640043497e-20 : // 2**-64 + 2.3283064365386962890625e-10; // 2**-32 + + // for each channel i compute such dparam[0][i] & dparam[1][i], + // so that a signed 32/64-bit integer X is transformed to + // the range [param1.val[i], param2.val[i]) using + // dparam[1][i]*X + dparam[0][i] + if( depth == CV_32F ) + { + fp = (Vec2f*)(parambuf + cn*2); + for( j = 0; j < cn; j++ ) + { + fp[j][0] = (float)((p2[j] - p1[j])*scale); + fp[j][1] = (float)((p2[j] + p1[j])*0.5); + } + } + else + { + dp = (Vec2d*)(parambuf + cn*2); + for( j = 0; j < cn; j++ ) + { + dp[j][0] = ((p2[j] - p1[j])*scale); + dp[j][1] = ((p2[j] + p1[j])*0.5); + } + } + + func = randTab[0][depth]; + } + CV_Assert( func != 0 ); + } + else if( disttype == CV_RAND_NORMAL ) + { + _parambuf.allocate(MAX(n1, cn) + MAX(n2, cn)); + double* parambuf = _parambuf; + + int ptype = depth == CV_64F ? CV_64F : CV_32F; + int esz = (int)CV_ELEM_SIZE(ptype); + + if( _param1.isContinuous() && _param1.type() == ptype ) + mean = _param1.data; + else + { + Mat tmp(_param1.size(), ptype, parambuf); + _param1.convertTo(tmp, ptype); + mean = (uchar*)parambuf; + } + + if( n1 < cn ) + for( j = n1*esz; j < cn*esz; j++ ) + mean[j] = mean[j - n1*esz]; + + if( _param2.isContinuous() && _param2.type() == ptype ) + stddev = _param2.data; + else + { + Mat tmp(_param2.size(), ptype, parambuf + cn); + _param2.convertTo(tmp, ptype); + stddev = (uchar*)(parambuf + cn); + } + + if( n1 < cn ) + for( j = n1*esz; j < cn*esz; j++ ) + stddev[j] = stddev[j - n1*esz]; + + stdmtx = _param2.rows == cn && _param2.cols == cn; + scaleFunc = randnScaleTab[depth]; + CV_Assert( scaleFunc != 0 ); + } + else + CV_Error( CV_StsBadArg, "Unknown distribution type" ); + + const Mat* arrays[] = {&mat, 0}; + uchar* ptr; + NAryMatIterator it(arrays, &ptr); + int total = (int)it.size, blockSize = std::min((BLOCK_SIZE + cn - 1)/cn, total); + size_t esz = mat.elemSize(); + AutoBuffer buf; + uchar* param = 0; + float* nbuf = 0; + + if( disttype == UNIFORM ) + { + buf.allocate(blockSize*cn*4); + param = (uchar*)(double*)buf; + + if( ip ) + { + if( ds ) + { + DivStruct* p = (DivStruct*)param; + for( j = 0; j < blockSize*cn; j += cn ) + for( k = 0; k < cn; k++ ) + p[j + k] = ds[k]; + } + else + { + Vec2i* p = (Vec2i*)param; + for( j = 0; j < blockSize*cn; j += cn ) + for( k = 0; k < cn; k++ ) + p[j + k] = ip[k]; + } + } + else if( fp ) + { + Vec2f* p = (Vec2f*)param; + for( j = 0; j < blockSize*cn; j += cn ) + for( k = 0; k < cn; k++ ) + p[j + k] = fp[k]; + } + else + { + Vec2d* p = (Vec2d*)param; + for( j = 0; j < blockSize*cn; j += cn ) + for( k = 0; k < cn; k++ ) + p[j + k] = dp[k]; + } + } + else + { + buf.allocate((blockSize*cn+1)/2); + nbuf = (float*)(double*)buf; + } + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( j = 0; j < total; j += blockSize ) + { + int len = std::min(total - j, blockSize); + + if( disttype == CV_RAND_UNI ) + func( ptr, len*cn, &state, param, smallFlag != 0 ); + else + { + randn_0_1_32f(nbuf, len*cn, &state); + scaleFunc(nbuf, ptr, len, cn, mean, stddev, stdmtx); + } + ptr += len*esz; + } + } +} + +#ifdef WIN32 +#ifdef WINCE +# define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF) +#endif +static DWORD tlsRNGKey = TLS_OUT_OF_INDEXES; + +void deleteThreadRNGData() +{ + if( tlsRNGKey != TLS_OUT_OF_INDEXES ) + delete (RNG*)TlsGetValue( tlsRNGKey ); +} + +RNG& theRNG() +{ + if( tlsRNGKey == TLS_OUT_OF_INDEXES ) + { + tlsRNGKey = TlsAlloc(); + CV_Assert(tlsRNGKey != TLS_OUT_OF_INDEXES); + } + RNG* rng = (RNG*)TlsGetValue( tlsRNGKey ); + if( !rng ) + { + rng = new RNG; + TlsSetValue( tlsRNGKey, rng ); + } + return *rng; +} + +#else + +static pthread_key_t tlsRNGKey = 0; +static pthread_once_t tlsRNGKeyOnce = PTHREAD_ONCE_INIT; + +static void deleteRNG(void* data) +{ + delete (RNG*)data; +} + +static void makeRNGKey() +{ + int errcode = pthread_key_create(&tlsRNGKey, deleteRNG); + CV_Assert(errcode == 0); +} + +RNG& theRNG() +{ + pthread_once(&tlsRNGKeyOnce, makeRNGKey); + RNG* rng = (RNG*)pthread_getspecific(tlsRNGKey); + if( !rng ) + { + rng = new RNG; + pthread_setspecific(tlsRNGKey, rng); + } + return *rng; +} + +#endif + +} + +void cv::randu(InputOutputArray dst, InputArray low, InputArray high) +{ + theRNG().fill(dst, RNG::UNIFORM, low, high); +} + +void cv::randn(InputOutputArray dst, InputArray mean, InputArray stddev) +{ + theRNG().fill(dst, RNG::NORMAL, mean, stddev); +} + +namespace cv +{ + +template static void +randShuffle_( Mat& _arr, RNG& rng, double iterFactor ) +{ + int sz = _arr.rows*_arr.cols, iters = cvRound(iterFactor*sz); + if( _arr.isContinuous() ) + { + T* arr = (T*)_arr.data; + for( int i = 0; i < iters; i++ ) + { + int j = (unsigned)rng % sz, k = (unsigned)rng % sz; + std::swap( arr[j], arr[k] ); + } + } + else + { + uchar* data = _arr.data; + size_t step = _arr.step; + int cols = _arr.cols; + for( int i = 0; i < iters; i++ ) + { + int j1 = (unsigned)rng % sz, k1 = (unsigned)rng % sz; + int j0 = j1/cols, k0 = k1/cols; + j1 -= j0*cols; k1 -= k0*cols; + std::swap( ((T*)(data + step*j0))[j1], ((T*)(data + step*k0))[k1] ); + } + } +} + +typedef void (*RandShuffleFunc)( Mat& dst, RNG& rng, double iterFactor ); + +} + +void cv::randShuffle( InputOutputArray _dst, double iterFactor, RNG* _rng ) +{ + RandShuffleFunc tab[] = + { + 0, + randShuffle_, // 1 + randShuffle_, // 2 + randShuffle_ >, // 3 + randShuffle_, // 4 + 0, + randShuffle_ >, // 6 + 0, + randShuffle_ >, // 8 + 0, 0, 0, + randShuffle_ >, // 12 + 0, 0, 0, + randShuffle_ >, // 16 + 0, 0, 0, 0, 0, 0, 0, + randShuffle_ >, // 24 + 0, 0, 0, 0, 0, 0, 0, + randShuffle_ > // 32 + }; + + Mat dst = _dst.getMat(); + RNG& rng = _rng ? *_rng : theRNG(); + CV_Assert( dst.elemSize() <= 32 ); + RandShuffleFunc func = tab[dst.elemSize()]; + CV_Assert( func != 0 ); + func( dst, rng, iterFactor ); +} + +void cv::randShuffle_( InputOutputArray _dst, double iterFactor ) +{ + randShuffle(_dst, iterFactor); +} + +CV_IMPL void +cvRandArr( CvRNG* _rng, CvArr* arr, int disttype, CvScalar param1, CvScalar param2 ) +{ + cv::Mat mat = cv::cvarrToMat(arr); + // !!! this will only work for current 64-bit MWC RNG !!! + cv::RNG& rng = _rng ? (cv::RNG&)*_rng : cv::theRNG(); + rng.fill(mat, disttype == CV_RAND_NORMAL ? + cv::RNG::NORMAL : cv::RNG::UNIFORM, cv::Scalar(param1), cv::Scalar(param2) ); +} + +CV_IMPL void cvRandShuffle( CvArr* arr, CvRNG* _rng, double iter_factor ) +{ + cv::Mat dst = cv::cvarrToMat(arr); + cv::RNG& rng = _rng ? (cv::RNG&)*_rng : cv::theRNG(); + cv::randShuffle( dst, iter_factor, &rng ); +} + +/* End of file. */ diff --git a/opencv/core/stat.cpp b/opencv/core/stat.cpp new file mode 100644 index 0000000..307a68b --- /dev/null +++ b/opencv/core/stat.cpp @@ -0,0 +1,1340 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +namespace cv +{ + +template static inline Scalar rawToScalar(const T& v) +{ + Scalar s; + typedef typename DataType::channel_type T1; + int i, n = DataType::channels; + for( i = 0; i < n; i++ ) + s.val[i] = ((T1*)&v)[i]; + return s; +} + +/****************************************************************************************\ +* sum * +\****************************************************************************************/ + +template +static int sum_(const T* src0, const uchar* mask, ST* dst, int len, int cn ) +{ + const T* src = src0; + if( !mask ) + { + int i; + int k = cn % 4; + if( k == 1 ) + { + ST s0 = dst[0]; + for( i = 0; i <= len - 4; i += 4, src += cn*4 ) + s0 += src[0] + src[cn] + src[cn*2] + src[cn*3]; + for( ; i < len; i++, src += cn ) + s0 += src[0]; + dst[0] = s0; + } + else if( k == 2 ) + { + ST s0 = dst[0], s1 = dst[1]; + for( i = 0; i < len; i++, src += cn ) + { + s0 += src[0]; + s1 += src[1]; + } + dst[0] = s0; + dst[1] = s1; + } + else if( k == 3 ) + { + ST s0 = dst[0], s1 = dst[1], s2 = dst[2]; + for( i = 0; i < len; i++, src += cn ) + { + s0 += src[0]; + s1 += src[1]; + s2 += src[2]; + } + dst[0] = s0; + dst[1] = s1; + dst[2] = s2; + } + + for( ; k < cn; k += 4 ) + { + src = src0 + k; + ST s0 = dst[k], s1 = dst[k+1], s2 = dst[k+2], s3 = dst[k+3]; + for( i = 0; i < len; i++, src += cn ) + { + s0 += src[0]; s1 += src[1]; + s2 += src[2]; s3 += src[3]; + } + dst[k] = s0; + dst[k+1] = s1; + dst[k+2] = s2; + dst[k+3] = s3; + } + return len; + } + + int i, nzm = 0; + if( cn == 1 ) + { + ST s = dst[0]; + for( i = 0; i < len; i++ ) + if( mask[i] ) + { + s += src[i]; + nzm++; + } + dst[0] = s; + } + else if( cn == 3 ) + { + ST s0 = dst[0], s1 = dst[1], s2 = dst[2]; + for( i = 0; i < len; i++, src += 3 ) + if( mask[i] ) + { + s0 += src[0]; + s1 += src[1]; + s2 += src[2]; + nzm++; + } + dst[0] = s0; + dst[1] = s1; + dst[2] = s2; + } + else + { + for( i = 0; i < len; i++, src += cn ) + if( mask[i] ) + { + int k = 0; + for( ; k <= cn - 4; k += 4 ) + { + ST s0, s1; + s0 = dst[k] + src[k]; + s1 = dst[k+1] + src[k+1]; + dst[k] = s0; dst[k+1] = s1; + s0 = dst[k+2] + src[k+2]; + s1 = dst[k+3] + src[k+3]; + dst[k+2] = s0; dst[k+3] = s1; + } + for( ; k < cn; k++ ) + dst[k] += src[k]; + nzm++; + } + } + return nzm; +} + + +static int sum8u( const uchar* src, const uchar* mask, int* dst, int len, int cn ) +{ return sum_(src, mask, dst, len, cn); } + +static int sum8s( const schar* src, const uchar* mask, int* dst, int len, int cn ) +{ return sum_(src, mask, dst, len, cn); } + +static int sum16u( const ushort* src, const uchar* mask, int* dst, int len, int cn ) +{ return sum_(src, mask, dst, len, cn); } + +static int sum16s( const short* src, const uchar* mask, int* dst, int len, int cn ) +{ return sum_(src, mask, dst, len, cn); } + +static int sum32s( const int* src, const uchar* mask, double* dst, int len, int cn ) +{ return sum_(src, mask, dst, len, cn); } + +static int sum32f( const float* src, const uchar* mask, double* dst, int len, int cn ) +{ return sum_(src, mask, dst, len, cn); } + +static int sum64f( const double* src, const uchar* mask, double* dst, int len, int cn ) +{ return sum_(src, mask, dst, len, cn); } + +typedef int (*SumFunc)(const uchar*, const uchar* mask, uchar*, int, int); + +static SumFunc sumTab[] = +{ + (SumFunc)sum8u, (SumFunc)sum8s, (SumFunc)sum16u, (SumFunc)sum16s, + (SumFunc)sum32s, (SumFunc)sum32f, (SumFunc)sum64f, 0 +}; + + +template +static int countNonZero_(const T* src, int len ) +{ + int i, nz = 0; + for( i = 0; i <= len - 4; i += 4 ) + nz += (src[i] != 0) + (src[i+1] != 0) + (src[i+2] != 0) + (src[i+3] != 0); + for( ; i < len; i++ ) + nz += src[i] != 0; + return nz; +} + +static int countNonZero8u( const uchar* src, int len ) +{ return countNonZero_(src, len); } + +static int countNonZero16u( const ushort* src, int len ) +{ return countNonZero_(src, len); } + +static int countNonZero32s( const int* src, int len ) +{ return countNonZero_(src, len); } + +static int countNonZero32f( const float* src, int len ) +{ return countNonZero_(src, len); } + +static int countNonZero64f( const double* src, int len ) +{ return countNonZero_(src, len); } + +typedef int (*CountNonZeroFunc)(const uchar*, int); + +static CountNonZeroFunc countNonZeroTab[] = +{ + (CountNonZeroFunc)countNonZero8u, (CountNonZeroFunc)countNonZero8u, + (CountNonZeroFunc)countNonZero16u, (CountNonZeroFunc)countNonZero16u, + (CountNonZeroFunc)countNonZero32s, (CountNonZeroFunc)countNonZero32f, + (CountNonZeroFunc)countNonZero64f, 0 +}; + + +template +static int sumsqr_(const T* src0, const uchar* mask, ST* sum, SQT* sqsum, int len, int cn ) +{ + const T* src = src0; + + if( !mask ) + { + int i; + int k = cn % 4; + + if( k == 1 ) + { + ST s0 = sum[0]; + SQT sq0 = sqsum[0]; + for( i = 0; i < len; i++, src += cn ) + { + T v = src[0]; + s0 += v; sq0 += (SQT)v*v; + } + sum[0] = s0; + sqsum[0] = sq0; + } + else if( k == 2 ) + { + ST s0 = sum[0], s1 = sum[1]; + SQT sq0 = sqsum[0], sq1 = sqsum[1]; + for( i = 0; i < len; i++, src += cn ) + { + T v0 = src[0], v1 = src[1]; + s0 += v0; sq0 += (SQT)v0*v0; + s1 += v1; sq1 += (SQT)v1*v1; + } + sum[0] = s0; sum[1] = s1; + sqsum[0] = sq0; sqsum[1] = sq1; + } + else if( k == 3 ) + { + ST s0 = sum[0], s1 = sum[1], s2 = sum[2]; + SQT sq0 = sqsum[0], sq1 = sqsum[1], sq2 = sqsum[2]; + for( i = 0; i < len; i++, src += cn ) + { + T v0 = src[0], v1 = src[1], v2 = src[2]; + s0 += v0; sq0 += (SQT)v0*v0; + s1 += v1; sq1 += (SQT)v1*v1; + s2 += v2; sq2 += (SQT)v2*v2; + } + sum[0] = s0; sum[1] = s1; sum[2] = s2; + sqsum[0] = sq0; sqsum[1] = sq1; sqsum[2] = sq2; + } + + for( ; k < cn; k += 4 ) + { + src = src0 + k; + ST s0 = sum[k], s1 = sum[k+1], s2 = sum[k+2], s3 = sum[k+3]; + SQT sq0 = sqsum[k], sq1 = sqsum[k+1], sq2 = sqsum[k+2], sq3 = sqsum[k+3]; + for( i = 0; i < len; i++, src += cn ) + { + T v0, v1; + v0 = src[0], v1 = src[1]; + s0 += v0; sq0 += (SQT)v0*v0; + s1 += v1; sq1 += (SQT)v1*v1; + v0 = src[2], v1 = src[3]; + s2 += v0; sq2 += (SQT)v0*v0; + s3 += v1; sq3 += (SQT)v1*v1; + } + sum[k] = s0; sum[k+1] = s1; + sum[k+2] = s2; sum[k+3] = s3; + sqsum[k] = sq0; sqsum[k+1] = sq1; + sqsum[k+2] = sq2; sqsum[k+3] = sq3; + } + return len; + } + + int i, nzm = 0; + + if( cn == 1 ) + { + ST s0 = sum[0]; + SQT sq0 = sqsum[0]; + for( i = 0; i < len; i++ ) + if( mask[i] ) + { + T v = src[i]; + s0 += v; sq0 += (SQT)v*v; + nzm++; + } + sum[0] = s0; + sqsum[0] = sq0; + } + else if( cn == 3 ) + { + ST s0 = sum[0], s1 = sum[1], s2 = sum[2]; + SQT sq0 = sqsum[0], sq1 = sqsum[1], sq2 = sqsum[2]; + for( i = 0; i < len; i++, src += 3 ) + if( mask[i] ) + { + T v0 = src[0], v1 = src[1], v2 = src[2]; + s0 += v0; sq0 += (SQT)v0*v0; + s1 += v1; sq1 += (SQT)v1*v1; + s2 += v2; sq2 += (SQT)v2*v2; + nzm++; + } + sum[0] = s0; sum[1] = s1; sum[2] = s2; + sqsum[0] = sq0; sqsum[1] = sq1; sqsum[2] = sq2; + } + else + { + for( i = 0; i < len; i++, src += cn ) + if( mask[i] ) + { + for( int k = 0; k < cn; k++ ) + { + T v = src[k]; + ST s = sum[k] + v; + SQT sq = sqsum[k] + (SQT)v*v; + sum[k] = s; sqsum[k] = sq; + } + nzm++; + } + } + return nzm; +} + + +static int sqsum8u( const uchar* src, const uchar* mask, int* sum, int* sqsum, int len, int cn ) +{ return sumsqr_(src, mask, sum, sqsum, len, cn); } + +static int sqsum8s( const schar* src, const uchar* mask, int* sum, int* sqsum, int len, int cn ) +{ return sumsqr_(src, mask, sum, sqsum, len, cn); } + +static int sqsum16u( const ushort* src, const uchar* mask, int* sum, double* sqsum, int len, int cn ) +{ return sumsqr_(src, mask, sum, sqsum, len, cn); } + +static int sqsum16s( const short* src, const uchar* mask, int* sum, double* sqsum, int len, int cn ) +{ return sumsqr_(src, mask, sum, sqsum, len, cn); } + +static int sqsum32s( const int* src, const uchar* mask, double* sum, double* sqsum, int len, int cn ) +{ return sumsqr_(src, mask, sum, sqsum, len, cn); } + +static int sqsum32f( const float* src, const uchar* mask, double* sum, double* sqsum, int len, int cn ) +{ return sumsqr_(src, mask, sum, sqsum, len, cn); } + +static int sqsum64f( const double* src, const uchar* mask, double* sum, double* sqsum, int len, int cn ) +{ return sumsqr_(src, mask, sum, sqsum, len, cn); } + +typedef int (*SumSqrFunc)(const uchar*, const uchar* mask, uchar*, uchar*, int, int); + +static SumSqrFunc sumSqrTab[] = +{ + (SumSqrFunc)sqsum8u, (SumSqrFunc)sqsum8s, (SumSqrFunc)sqsum16u, (SumSqrFunc)sqsum16s, + (SumSqrFunc)sqsum32s, (SumSqrFunc)sqsum32f, (SumSqrFunc)sqsum64f, 0 +}; + +} + +cv::Scalar cv::sum( InputArray _src ) +{ + Mat src = _src.getMat(); + int k, cn = src.channels(), depth = src.depth(); + SumFunc func = sumTab[depth]; + + CV_Assert( cn <= 4 && func != 0 ); + + const Mat* arrays[] = {&src, 0}; + uchar* ptrs[1]; + NAryMatIterator it(arrays, ptrs); + Scalar s; + int total = (int)it.size, blockSize = total, intSumBlockSize = 0; + int j, count = 0; + AutoBuffer _buf; + int* buf = (int*)&s[0]; + size_t esz = 0; + bool blockSum = depth < CV_32S; + + if( blockSum ) + { + intSumBlockSize = depth <= CV_8S ? (1 << 23) : (1 << 15); + blockSize = std::min(blockSize, intSumBlockSize); + _buf.allocate(cn); + buf = _buf; + + for( k = 0; k < cn; k++ ) + buf[k] = 0; + esz = src.elemSize(); + } + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( j = 0; j < total; j += blockSize ) + { + int bsz = std::min(total - j, blockSize); + func( ptrs[0], 0, (uchar*)buf, bsz, cn ); + count += bsz; + if( blockSum && (count + blockSize >= intSumBlockSize || (i+1 >= it.nplanes && j+bsz >= total)) ) + { + for( k = 0; k < cn; k++ ) + { + s[k] += buf[k]; + buf[k] = 0; + } + count = 0; + } + ptrs[0] += bsz*esz; + } + } + return s; +} + +int cv::countNonZero( InputArray _src ) +{ + Mat src = _src.getMat(); + CountNonZeroFunc func = countNonZeroTab[src.depth()]; + + CV_Assert( src.channels() == 1 && func != 0 ); + + const Mat* arrays[] = {&src, 0}; + uchar* ptrs[1]; + NAryMatIterator it(arrays, ptrs); + int total = (int)it.size, nz = 0; + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + nz += func( ptrs[0], total ); + + return nz; +} + +cv::Scalar cv::mean( InputArray _src, InputArray _mask ) +{ + Mat src = _src.getMat(), mask = _mask.getMat(); + CV_Assert( mask.empty() || mask.type() == CV_8U ); + + int k, cn = src.channels(), depth = src.depth(); + SumFunc func = sumTab[depth]; + + CV_Assert( cn <= 4 && func != 0 ); + + const Mat* arrays[] = {&src, &mask, 0}; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs); + Scalar s; + int total = (int)it.size, blockSize = total, intSumBlockSize = 0; + int j, count = 0; + AutoBuffer _buf; + int* buf = (int*)&s[0]; + bool blockSum = depth <= CV_16S; + size_t esz = 0, nz0 = 0; + + if( blockSum ) + { + intSumBlockSize = depth <= CV_8S ? (1 << 23) : (1 << 15); + blockSize = std::min(blockSize, intSumBlockSize); + _buf.allocate(cn); + buf = _buf; + + for( k = 0; k < cn; k++ ) + buf[k] = 0; + esz = src.elemSize(); + } + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( j = 0; j < total; j += blockSize ) + { + int bsz = std::min(total - j, blockSize); + int nz = func( ptrs[0], ptrs[1], (uchar*)buf, bsz, cn ); + count += nz; + nz0 += nz; + if( blockSum && (count + blockSize >= intSumBlockSize || (i+1 >= it.nplanes && j+bsz >= total)) ) + { + for( k = 0; k < cn; k++ ) + { + s[k] += buf[k]; + buf[k] = 0; + } + count = 0; + } + ptrs[0] += bsz*esz; + if( ptrs[1] ) + ptrs[1] += bsz; + } + } + return s*(nz0 ? 1./nz0 : 0); +} + + +void cv::meanStdDev( InputArray _src, OutputArray _mean, OutputArray _sdv, InputArray _mask ) +{ + Mat src = _src.getMat(), mask = _mask.getMat(); + CV_Assert( mask.empty() || mask.type() == CV_8U ); + + int k, cn = src.channels(), depth = src.depth(); + SumSqrFunc func = sumSqrTab[depth]; + + CV_Assert( func != 0 ); + + const Mat* arrays[] = {&src, &mask, 0}; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs); + int total = (int)it.size, blockSize = total, intSumBlockSize = 0; + int j, count = 0, nz0 = 0; + AutoBuffer _buf(cn*4); + double *s = (double*)_buf, *sq = s + cn; + int *sbuf = (int*)s, *sqbuf = (int*)sq; + bool blockSum = depth <= CV_16S, blockSqSum = depth <= CV_8S; + size_t esz = 0; + + for( k = 0; k < cn; k++ ) + s[k] = sq[k] = 0; + + if( blockSum ) + { + intSumBlockSize = 1 << 15; + blockSize = std::min(blockSize, intSumBlockSize); + sbuf = (int*)(sq + cn); + if( blockSqSum ) + sqbuf = sbuf + cn; + for( k = 0; k < cn; k++ ) + sbuf[k] = sqbuf[k] = 0; + esz = src.elemSize(); + } + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( j = 0; j < total; j += blockSize ) + { + int bsz = std::min(total - j, blockSize); + int nz = func( ptrs[0], ptrs[1], (uchar*)sbuf, (uchar*)sqbuf, bsz, cn ); + count += nz; + nz0 += nz; + if( blockSum && (count + blockSize >= intSumBlockSize || (i+1 >= it.nplanes && j+bsz >= total)) ) + { + for( k = 0; k < cn; k++ ) + { + s[k] += sbuf[k]; + sbuf[k] = 0; + } + if( blockSqSum ) + { + for( k = 0; k < cn; k++ ) + { + sq[k] += sqbuf[k]; + sqbuf[k] = 0; + } + } + count = 0; + } + ptrs[0] += bsz*esz; + if( ptrs[1] ) + ptrs[1] += bsz; + } + } + + double scale = nz0 ? 1./nz0 : 0.; + for( k = 0; k < cn; k++ ) + { + s[k] *= scale; + sq[k] = std::sqrt(std::max(sq[k]*scale - s[k]*s[k], 0.)); + } + + for( j = 0; j < 2; j++ ) + { + const double* sptr = j == 0 ? s : sq; + _OutputArray _dst = j == 0 ? _mean : _sdv; + if( !_dst.needed() ) + continue; + + if( !_dst.fixedSize() ) + _dst.create(cn, 1, CV_64F, -1, true); + Mat dst = _dst.getMat(); + int dcn = (int)dst.total(); + CV_Assert( dst.type() == CV_64F && dst.isContinuous() && + (dst.cols == 1 || dst.rows == 1) && dcn >= cn ); + double* dptr = dst.ptr(); + for( k = 0; k < cn; k++ ) + dptr[k] = sptr[k]; + for( ; k < dcn; k++ ) + dptr[k] = 0; + } +} + +/****************************************************************************************\ +* minMaxLoc * +\****************************************************************************************/ + +namespace cv +{ + +template static void +minMaxIdx_( const T* src, const uchar* mask, WT* _minVal, WT* _maxVal, + size_t* _minIdx, size_t* _maxIdx, int len, size_t startIdx ) +{ + WT minVal = *_minVal, maxVal = *_maxVal; + size_t minIdx = *_minIdx, maxIdx = *_maxIdx; + + if( !mask ) + { + for( int i = 0; i < len; i++ ) + { + T val = src[i]; + if( val < minVal ) + { + minVal = val; + minIdx = startIdx + i; + } + if( val > maxVal ) + { + maxVal = val; + maxIdx = startIdx + i; + } + } + } + else + { + for( int i = 0; i < len; i++ ) + { + T val = src[i]; + if( mask[i] && val < minVal ) + { + minVal = val; + minIdx = startIdx + i; + } + if( mask[i] && val > maxVal ) + { + maxVal = val; + maxIdx = startIdx + i; + } + } + } + + *_minIdx = minIdx; + *_maxIdx = maxIdx; + *_minVal = minVal; + *_maxVal = maxVal; +} + +static void minMaxIdx_8u(const uchar* src, const uchar* mask, int* minval, int* maxval, + size_t* minidx, size_t* maxidx, int len, size_t startidx ) +{ minMaxIdx_(src, mask, minval, maxval, minidx, maxidx, len, startidx ); } + +static void minMaxIdx_8s(const schar* src, const uchar* mask, int* minval, int* maxval, + size_t* minidx, size_t* maxidx, int len, size_t startidx ) +{ minMaxIdx_(src, mask, minval, maxval, minidx, maxidx, len, startidx ); } + +static void minMaxIdx_16u(const ushort* src, const uchar* mask, int* minval, int* maxval, + size_t* minidx, size_t* maxidx, int len, size_t startidx ) +{ minMaxIdx_(src, mask, minval, maxval, minidx, maxidx, len, startidx ); } + +static void minMaxIdx_16s(const short* src, const uchar* mask, int* minval, int* maxval, + size_t* minidx, size_t* maxidx, int len, size_t startidx ) +{ minMaxIdx_(src, mask, minval, maxval, minidx, maxidx, len, startidx ); } + +static void minMaxIdx_32s(const int* src, const uchar* mask, int* minval, int* maxval, + size_t* minidx, size_t* maxidx, int len, size_t startidx ) +{ minMaxIdx_(src, mask, minval, maxval, minidx, maxidx, len, startidx ); } + +static void minMaxIdx_32f(const float* src, const uchar* mask, float* minval, float* maxval, + size_t* minidx, size_t* maxidx, int len, size_t startidx ) +{ minMaxIdx_(src, mask, minval, maxval, minidx, maxidx, len, startidx ); } + +static void minMaxIdx_64f(const double* src, const uchar* mask, double* minval, double* maxval, + size_t* minidx, size_t* maxidx, int len, size_t startidx ) +{ minMaxIdx_(src, mask, minval, maxval, minidx, maxidx, len, startidx ); } + +typedef void (*MinMaxIdxFunc)(const uchar*, const uchar*, int*, int*, size_t*, size_t*, int, size_t); + +static MinMaxIdxFunc minmaxTab[] = +{ + (MinMaxIdxFunc)minMaxIdx_8u, (MinMaxIdxFunc)minMaxIdx_8s, (MinMaxIdxFunc)minMaxIdx_16u, + (MinMaxIdxFunc)minMaxIdx_16s, (MinMaxIdxFunc)minMaxIdx_32s, (MinMaxIdxFunc)minMaxIdx_32f, + (MinMaxIdxFunc)minMaxIdx_64f, 0 +}; + +static void ofs2idx(const Mat& a, size_t ofs, int* idx) +{ + int i, d = a.dims; + if( ofs > 0 ) + { + ofs--; + for( i = d-1; i >= 0; i-- ) + { + int sz = a.size[i]; + idx[i] = (int)(ofs % sz); + ofs /= sz; + } + } + else + { + for( i = d-1; i >= 0; i-- ) + idx[i] = -1; + } +} + +} + +void cv::minMaxIdx(InputArray _src, double* minVal, + double* maxVal, int* minIdx, int* maxIdx, + InputArray _mask) +{ + Mat src = _src.getMat(), mask = _mask.getMat(); + int depth = src.depth(), cn = src.channels(); + + CV_Assert( (cn == 1 && (mask.empty() || mask.type() == CV_8U)) || + (cn >= 1 && mask.empty() && !minIdx && !maxIdx) ); + MinMaxIdxFunc func = minmaxTab[depth]; + CV_Assert( func != 0 ); + + const Mat* arrays[] = {&src, &mask, 0}; + uchar* ptrs[2]; + NAryMatIterator it(arrays, ptrs); + + size_t minidx = 0, maxidx = 0; + int iminval = INT_MAX, imaxval = INT_MIN; + float fminval = FLT_MAX, fmaxval = -FLT_MAX; + double dminval = DBL_MAX, dmaxval = -DBL_MAX; + size_t startidx = 1; + int *minval = &iminval, *maxval = &imaxval; + int planeSize = (int)it.size*cn; + + if( depth == CV_32F ) + minval = (int*)&fminval, maxval = (int*)&fmaxval; + else if( depth == CV_64F ) + minval = (int*)&dminval, maxval = (int*)&dmaxval; + + for( size_t i = 0; i < it.nplanes; i++, ++it, startidx += planeSize ) + func( ptrs[0], ptrs[1], minval, maxval, &minidx, &maxidx, planeSize, startidx ); + + if( minidx == 0 ) + dminval = dmaxval = 0; + else if( depth == CV_32F ) + dminval = fminval, dmaxval = fmaxval; + else if( depth <= CV_32S ) + dminval = iminval, dmaxval = imaxval; + + if( minVal ) + *minVal = dminval; + if( maxVal ) + *maxVal = dmaxval; + + if( minIdx ) + ofs2idx(src, minidx, minIdx); + if( maxIdx ) + ofs2idx(src, maxidx, maxIdx); +} + +void cv::minMaxLoc( InputArray _img, double* minVal, double* maxVal, + Point* minLoc, Point* maxLoc, InputArray mask ) +{ + Mat img = _img.getMat(); + CV_Assert(img.dims <= 2); + + minMaxIdx(_img, minVal, maxVal, (int*)minLoc, (int*)maxLoc, mask); + if( minLoc ) + std::swap(minLoc->x, minLoc->y); + if( maxLoc ) + std::swap(maxLoc->x, maxLoc->y); +} + +/****************************************************************************************\ +* norm * +\****************************************************************************************/ + +namespace cv +{ + +template int +normInf_(const T* src, const uchar* mask, ST* _result, int len, int cn) +{ + ST result = *_result; + if( !mask ) + { + len *= cn; + for( int i = 0; i < len; i++ ) + result = std::max(result, ST(std::abs(src[i]))); + } + else + { + for( int i = 0; i < len; i++, src += cn ) + if( mask[i] ) + { + for( int k = 0; k < cn; k++ ) + result = std::max(result, ST(std::abs(src[k]))); + } + } + *_result = result; + return 0; +} + +template int +normL1_(const T* src, const uchar* mask, ST* _result, int len, int cn) +{ + ST result = *_result; + if( !mask ) + { + len *= cn; + for( int i = 0; i < len; i++ ) + result += std::abs(src[i]); + } + else + { + for( int i = 0; i < len; i++, src += cn ) + if( mask[i] ) + { + for( int k = 0; k < cn; k++ ) + result += std::abs(src[k]); + } + } + *_result = result; + return 0; +} + +template int +normL2_(const T* src, const uchar* mask, ST* _result, int len, int cn) +{ + ST result = *_result; + if( !mask ) + { + len *= cn; + for( int i = 0; i < len; i++ ) + { + T v = src[i]; + result += (ST)v*v; + } + } + else + { + for( int i = 0; i < len; i++, src += cn ) + if( mask[i] ) + { + for( int k = 0; k < cn; k++ ) + { + T v = src[k]; + result += (ST)v*v; + } + } + } + *_result = result; + return 0; +} + +template int +normDiffInf_(const T* src1, const T* src2, const uchar* mask, ST* _result, int len, int cn) +{ + ST result = *_result; + if( !mask ) + { + len *= cn; + for( int i = 0; i < len; i++ ) + result = std::max(result, (ST)std::abs(src1[i] - src2[i])); + } + else + { + for( int i = 0; i < len; i++, src1 += cn, src2 += cn ) + if( mask[i] ) + { + for( int k = 0; k < cn; k++ ) + result = std::max(result, (ST)std::abs(src1[k] - src2[k])); + } + } + *_result = result; + return 0; +} + +template int +normDiffL1_(const T* src1, const T* src2, const uchar* mask, ST* _result, int len, int cn) +{ + ST result = *_result; + if( !mask ) + { + len *= cn; + for( int i = 0; i < len; i++ ) + result += std::abs(src1[i] - src2[i]); + } + else + { + for( int i = 0; i < len; i++, src1 += cn, src2 += cn ) + if( mask[i] ) + { + for( int k = 0; k < cn; k++ ) + result += std::abs(src1[k] - src2[k]); + } + } + *_result = result; + return 0; +} + +template int +normDiffL2_(const T* src1, const T* src2, const uchar* mask, ST* _result, int len, int cn) +{ + ST result = *_result; + if( !mask ) + { + len *= cn; + for( int i = 0; i < len; i++ ) + { + ST v = src1[i] - src2[i]; + result += v*v; + } + } + else + { + for( int i = 0; i < len; i++, src1 += cn, src2 += cn ) + if( mask[i] ) + { + for( int k = 0; k < cn; k++ ) + { + ST v = src1[k] - src2[k]; + result += v*v; + } + } + } + *_result = result; + return 0; +} + + +#define CV_DEF_NORM_FUNC(L, suffix, type, ntype) \ +static int norm##L##_##suffix(const type* src, const uchar* mask, ntype* r, int len, int cn) \ +{ return norm##L##_(src, mask, r, len, cn); } \ +static int normDiff##L##_##suffix(const type* src1, const type* src2, \ + const uchar* mask, ntype* r, int len, int cn) \ +{ return normDiff##L##_(src1, src2, mask, r, (int)len, cn); } + +#define CV_DEF_NORM_ALL(suffix, type, inftype, l1type, l2type) \ +CV_DEF_NORM_FUNC(Inf, suffix, type, inftype) \ +CV_DEF_NORM_FUNC(L1, suffix, type, l1type) \ +CV_DEF_NORM_FUNC(L2, suffix, type, l2type) + +CV_DEF_NORM_ALL(8u, uchar, int, int, int) +CV_DEF_NORM_ALL(8s, schar, int, int, int) +CV_DEF_NORM_ALL(16u, ushort, int, int, double) +CV_DEF_NORM_ALL(16s, short, int, int, double) +CV_DEF_NORM_ALL(32s, int, int, double, double) +CV_DEF_NORM_ALL(32f, float, float, double, double) +CV_DEF_NORM_ALL(64f, double, double, double, double) + + +typedef int (*NormFunc)(const uchar*, const uchar*, uchar*, int, int); +typedef int (*NormDiffFunc)(const uchar*, const uchar*, const uchar*, uchar*, int, int); + +static NormFunc normTab[3][8] = +{ + { + (NormFunc)normInf_8u, (NormFunc)normInf_8s, (NormFunc)normInf_16u, (NormFunc)normInf_16s, + (NormFunc)normInf_32s, (NormFunc)normInf_32f, (NormFunc)normInf_64f, 0 + }, + { + (NormFunc)normL1_8u, (NormFunc)normL1_8s, (NormFunc)normL1_16u, (NormFunc)normL1_16s, + (NormFunc)normL1_32s, (NormFunc)normL1_32f, (NormFunc)normL1_64f, 0 + }, + { + (NormFunc)normL2_8u, (NormFunc)normL2_8s, (NormFunc)normL2_16u, (NormFunc)normL2_16s, + (NormFunc)normL2_32s, (NormFunc)normL2_32f, (NormFunc)normL2_64f, 0 + } +}; + +static NormDiffFunc normDiffTab[3][8] = +{ + { + (NormDiffFunc)normDiffInf_8u, (NormDiffFunc)normDiffInf_8s, + (NormDiffFunc)normDiffInf_16u, (NormDiffFunc)normDiffInf_16s, + (NormDiffFunc)normDiffInf_32s, (NormDiffFunc)normDiffInf_32f, + (NormDiffFunc)normDiffInf_64f, 0 + }, + { + (NormDiffFunc)normDiffL1_8u, (NormDiffFunc)normDiffL1_8s, + (NormDiffFunc)normDiffL1_16u, (NormDiffFunc)normDiffL1_16s, + (NormDiffFunc)normDiffL1_32s, (NormDiffFunc)normDiffL1_32f, + (NormDiffFunc)normDiffL1_64f, 0 + }, + { + (NormDiffFunc)normDiffL2_8u, (NormDiffFunc)normDiffL2_8s, + (NormDiffFunc)normDiffL2_16u, (NormDiffFunc)normDiffL2_16s, + (NormDiffFunc)normDiffL2_32s, (NormDiffFunc)normDiffL2_32f, + (NormDiffFunc)normDiffL2_64f, 0 + } +}; + +} + +double cv::norm( InputArray _src, int normType, InputArray _mask ) +{ + Mat src = _src.getMat(), mask = _mask.getMat(); + int depth = src.depth(), cn = src.channels(); + + normType &= 7; + CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 ); + + if( depth == CV_32F && src.isContinuous() && mask.empty() ) + { + size_t len = src.total()*cn; + if( len == (size_t)(int)len ) + { + const float* data = src.ptr(); + + if( normType == NORM_L2 ) + { + double result = 0; + normL2_32f(data, 0, &result, (int)len, 1); + return std::sqrt(result); + } + if( normType == NORM_L1 ) + { + double result = 0; + normL1_32f(data, 0, &result, (int)len, 1); + return result; + } + { + float result = 0; + normInf_32f(data, 0, &result, (int)len, 1); + return result; + } + } + } + + CV_Assert( mask.empty() || mask.type() == CV_8U ); + + NormFunc func = normTab[normType >> 1][depth]; + CV_Assert( func != 0 ); + + const Mat* arrays[] = {&src, &mask, 0}; + uchar* ptrs[2]; + union + { + double d; + int i; + float f; + } + result; + result.d = 0; + NAryMatIterator it(arrays, ptrs); + int j, total = (int)it.size, blockSize = total, intSumBlockSize = 0, count = 0; + bool blockSum = (normType == NORM_L1 && depth <= CV_16S) || + (normType == NORM_L2 && depth <= CV_8S); + int isum = 0; + int *ibuf = &result.i; + size_t esz = 0; + + if( blockSum ) + { + intSumBlockSize = (normType == NORM_L1 && depth <= CV_8S ? (1 << 23) : (1 << 15))/cn; + blockSize = std::min(blockSize, intSumBlockSize); + ibuf = &isum; + esz = src.elemSize(); + } + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( j = 0; j < total; j += blockSize ) + { + int bsz = std::min(total - j, blockSize); + func( ptrs[0], ptrs[1], (uchar*)ibuf, bsz, cn ); + count += bsz; + if( blockSum && (count + blockSize >= intSumBlockSize || (i+1 >= it.nplanes && j+bsz >= total)) ) + { + result.d += isum; + isum = 0; + count = 0; + } + ptrs[0] += bsz*esz; + if( ptrs[1] ) + ptrs[1] += bsz; + } + } + + if( normType == NORM_INF ) + { + if( depth == CV_64F ) + ; + else if( depth == CV_32F ) + result.d = result.f; + else + result.d = result.i; + } + else if( normType == NORM_L2 ) + result.d = std::sqrt(result.d); + + return result.d; +} + + +double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _mask ) +{ + if( normType & CV_RELATIVE ) + return norm(_src1, _src2, normType & ~CV_RELATIVE, _mask)/(norm(_src2, normType, _mask) + DBL_EPSILON); + + Mat src1 = _src1.getMat(), src2 = _src2.getMat(), mask = _mask.getMat(); + int depth = src1.depth(), cn = src1.channels(); + + CV_Assert( src1.size == src2.size && src1.type() == src2.type() ); + + normType &= 7; + CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 ); + + if( src1.depth() == CV_32F && src1.isContinuous() && src2.isContinuous() && mask.empty() ) + { + size_t len = src1.total()*src1.channels(); + if( len == (size_t)(int)len ) + { + const float* data1 = src1.ptr(); + const float* data2 = src2.ptr(); + + if( normType == NORM_L2 ) + { + double result = 0; + normDiffL2_32f(data1, data2, 0, &result, (int)len, 1); + return std::sqrt(result); + } + if( normType == NORM_L1 ) + { + double result = 0; + normDiffL1_32f(data1, data2, 0, &result, (int)len, 1); + return result; + } + { + float result = 0; + normDiffInf_32f(data1, data2, 0, &result, (int)len, 1); + return result; + } + } + } + + CV_Assert( mask.empty() || mask.type() == CV_8U ); + + NormDiffFunc func = normDiffTab[normType >> 1][depth]; + CV_Assert( func != 0 ); + + const Mat* arrays[] = {&src1, &src2, &mask, 0}; + uchar* ptrs[3]; + union + { + double d; + float f; + int i; + unsigned u; + } + result; + result.d = 0; + NAryMatIterator it(arrays, ptrs); + int j, total = (int)it.size, blockSize = total, intSumBlockSize = 0, count = 0; + bool blockSum = (normType == NORM_L1 && depth <= CV_16S) || + (normType == NORM_L2 && depth <= CV_8S); + unsigned isum = 0; + unsigned *ibuf = &result.u; + size_t esz = 0; + + if( blockSum ) + { + intSumBlockSize = normType == NORM_L1 && depth <= CV_8S ? (1 << 23) : (1 << 15); + blockSize = std::min(blockSize, intSumBlockSize); + ibuf = &isum; + esz = src1.elemSize(); + } + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + for( j = 0; j < total; j += blockSize ) + { + int bsz = std::min(total - j, blockSize); + func( ptrs[0], ptrs[1], ptrs[2], (uchar*)ibuf, bsz, cn ); + count += bsz; + if( blockSum && (count + blockSize >= intSumBlockSize || (i+1 >= it.nplanes && j+bsz >= total)) ) + { + result.d += isum; + isum = 0; + count = 0; + } + ptrs[0] += bsz*esz; + ptrs[1] += bsz*esz; + if( ptrs[2] ) + ptrs[2] += bsz; + } + } + + if( normType == NORM_INF ) + { + if( depth == CV_64F ) + ; + else if( depth == CV_32F ) + result.d = result.f; + else + result.d = result.u; + } + else if( normType == NORM_L2 ) + result.d = std::sqrt(result.d); + + return result.d; +} + + +CV_IMPL CvScalar cvSum( const CvArr* srcarr ) +{ + cv::Scalar sum = cv::sum(cv::cvarrToMat(srcarr, false, true, 1)); + if( CV_IS_IMAGE(srcarr) ) + { + int coi = cvGetImageCOI((IplImage*)srcarr); + if( coi ) + { + CV_Assert( 0 < coi && coi <= 4 ); + sum = cv::Scalar(sum[coi-1]); + } + } + return sum; +} + +CV_IMPL int cvCountNonZero( const CvArr* imgarr ) +{ + cv::Mat img = cv::cvarrToMat(imgarr, false, true, 1); + if( img.channels() > 1 ) + cv::extractImageCOI(imgarr, img); + return countNonZero(img); +} + + +CV_IMPL CvScalar +cvAvg( const void* imgarr, const void* maskarr ) +{ + cv::Mat img = cv::cvarrToMat(imgarr, false, true, 1); + cv::Scalar mean = !maskarr ? cv::mean(img) : cv::mean(img, cv::cvarrToMat(maskarr)); + if( CV_IS_IMAGE(imgarr) ) + { + int coi = cvGetImageCOI((IplImage*)imgarr); + if( coi ) + { + CV_Assert( 0 < coi && coi <= 4 ); + mean = cv::Scalar(mean[coi-1]); + } + } + return mean; +} + + +CV_IMPL void +cvAvgSdv( const CvArr* imgarr, CvScalar* _mean, CvScalar* _sdv, const void* maskarr ) +{ + cv::Scalar mean, sdv; + + cv::Mat mask; + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + + cv::meanStdDev(cv::cvarrToMat(imgarr, false, true, 1), mean, sdv, mask ); + + if( CV_IS_IMAGE(imgarr) ) + { + int coi = cvGetImageCOI((IplImage*)imgarr); + if( coi ) + { + CV_Assert( 0 < coi && coi <= 4 ); + mean = cv::Scalar(mean[coi-1]); + sdv = cv::Scalar(sdv[coi-1]); + } + } + + if( _mean ) + *(cv::Scalar*)_mean = mean; + if( _sdv ) + *(cv::Scalar*)_sdv = sdv; +} + + +CV_IMPL void +cvMinMaxLoc( const void* imgarr, double* _minVal, double* _maxVal, + CvPoint* _minLoc, CvPoint* _maxLoc, const void* maskarr ) +{ + cv::Mat mask, img = cv::cvarrToMat(imgarr, false, true, 1); + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + if( img.channels() > 1 ) + cv::extractImageCOI(imgarr, img); + + cv::minMaxLoc( img, _minVal, _maxVal, + (cv::Point*)_minLoc, (cv::Point*)_maxLoc, mask ); +} + + +CV_IMPL double +cvNorm( const void* imgA, const void* imgB, int normType, const void* maskarr ) +{ + cv::Mat a, mask; + if( !imgA ) + { + imgA = imgB; + imgB = 0; + } + + a = cv::cvarrToMat(imgA, false, true, 1); + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + + if( a.channels() > 1 && CV_IS_IMAGE(imgA) && cvGetImageCOI((const IplImage*)imgA) > 0 ) + cv::extractImageCOI(imgA, a); + + if( !imgB ) + return !maskarr ? cv::norm(a, normType) : cv::norm(a, normType, mask); + + cv::Mat b = cv::cvarrToMat(imgB, false, true, 1); + if( b.channels() > 1 && CV_IS_IMAGE(imgB) && cvGetImageCOI((const IplImage*)imgB) > 0 ) + cv::extractImageCOI(imgB, b); + + return !maskarr ? cv::norm(a, b, normType) : cv::norm(a, b, normType, mask); +} diff --git a/opencv/core/system.cpp b/opencv/core/system.cpp new file mode 100644 index 0000000..c7b3451 --- /dev/null +++ b/opencv/core/system.cpp @@ -0,0 +1,835 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#if defined _MSC_VER && (defined _M_IX86 || defined _M_X64) + //#include +#endif + +#if defined WIN32 || defined _WIN32 || defined WINCE +#include +#if defined _MSC_VER + #if _MSC_VER >= 1400 + #include + #elif defined _M_IX86 + static void __cpuid(int* cpuid_data, int) + { + __asm + { + push ebx + push edi + mov edi, cpuid_data + mov eax, 1 + cpuid + mov [edi], eax + mov [edi + 4], ebx + mov [edi + 8], ecx + mov [edi + 12], edx + pop edi + pop ebx + } + } + #endif +#endif +#else +#include +#include +#include + +#if defined __MACH__ && defined __APPLE__ +#include +#include +#endif + +#endif + +#ifdef _OPENMP +#include "omp.h" +#endif + +#include + +#if defined __linux__ || defined __APPLE__ +#include +#include +#include +#if defined ANDROID +#include +#else +#include +#endif +#endif + +namespace cv +{ + +Exception::Exception() { code = 0; line = 0; } + +Exception::Exception(int _code, const string& _err, const string& _func, const string& _file, int _line) +: code(_code), err(_err), func(_func), file(_file), line(_line) +{ + formatMessage(); +} + +Exception::~Exception() throw() {} + +/*! + \return the error description and the context as a text string. + */ +const char* Exception::what() const throw() { return msg.c_str(); } + +void Exception::formatMessage() +{ + if( func.size() > 0 ) + msg = format("%s:%d: error: (%d) %s in function %s\n", file.c_str(), line, code, err.c_str(), func.c_str()); + else + msg = format("%s:%d: error: (%d) %s\n", file.c_str(), line, code, err.c_str()); +} + +struct HWFeatures +{ + enum { MAX_FEATURE = CV_HARDWARE_MAX_FEATURE }; + + HWFeatures(void) + { + memset( have, 0, sizeof(have) ); + x86_family = 0; + } + + static HWFeatures initialize(void) + { + HWFeatures f; + int cpuid_data[4] = { 0, 0, 0, 0 }; + + #if defined _MSC_VER && (defined _M_IX86 || defined _M_X64) + //__cpuid(cpuid_data, 1); ///XXXX - Lior lgorithms.com - do not work for + #elif defined __GNUC__ && (defined __i386__ || defined __x86_64__) + #ifdef __x86_64__ + asm __volatile__ + ( + "movl $1, %%eax\n\t" + "cpuid\n\t" + :[eax]"=a"(cpuid_data[0]),[ebx]"=b"(cpuid_data[1]),[ecx]"=c"(cpuid_data[2]),[edx]"=d"(cpuid_data[3]) + : + : "cc" + ); + #else + asm volatile + ( + "pushl %%ebx\n\t" + "movl $1,%%eax\n\t" + "cpuid\n\t" + "popl %%ebx\n\t" + : "=a"(cpuid_data[0]), "=c"(cpuid_data[2]), "=d"(cpuid_data[3]) + : + : "cc" + ); + #endif + #endif + + f.x86_family = (cpuid_data[0] >> 8) & 15; + if( f.x86_family >= 6 ) + { + f.have[CV_CPU_MMX] = (cpuid_data[3] & (1 << 23)) != 0; + f.have[CV_CPU_SSE] = (cpuid_data[3] & (1<<25)) != 0; + f.have[CV_CPU_SSE2] = (cpuid_data[3] & (1<<26)) != 0; + f.have[CV_CPU_SSE3] = (cpuid_data[2] & (1<<0)) != 0; + f.have[CV_CPU_SSSE3] = (cpuid_data[2] & (1<<9)) != 0; + f.have[CV_CPU_SSE4_1] = (cpuid_data[2] & (1<<19)) != 0; + f.have[CV_CPU_SSE4_2] = (cpuid_data[2] & (1<<20)) != 0; + f.have[CV_CPU_POPCNT] = (cpuid_data[2] & (1<<23)) != 0; + f.have[CV_CPU_AVX] = (cpuid_data[2] & (1<<28)) != 0; + } + + return f; + } + + int x86_family; + bool have[MAX_FEATURE+1]; +}; + +static HWFeatures featuresEnabled = HWFeatures::initialize(), featuresDisabled = HWFeatures(); +static HWFeatures* currentFeatures = &featuresEnabled; + +bool checkHardwareSupport(int feature) +{ + CV_DbgAssert( 0 <= feature && feature <= CV_HARDWARE_MAX_FEATURE ); + return currentFeatures->have[feature]; +} + +#ifdef HAVE_IPP +volatile bool useOptimizedFlag = true; + +struct IPPInitializer +{ + IPPInitializer(void) { ippStaticInit(); } +}; + +IPPInitializer ippInitializer; +#else +volatile bool useOptimizedFlag = true; +#endif + +volatile bool USE_SSE2 = false; + +void setUseOptimized( bool flag ) +{ + useOptimizedFlag = flag; + currentFeatures = flag ? &featuresEnabled : &featuresDisabled; + USE_SSE2 = currentFeatures->have[CV_CPU_SSE2]; +} + +bool useOptimized(void) +{ + return useOptimizedFlag; +} + +int64 getTickCount(void) +{ +#if defined WIN32 || defined _WIN32 || defined WINCE + LARGE_INTEGER counter; + QueryPerformanceCounter( &counter ); + return (int64)counter.QuadPart; +#elif defined __linux || defined __linux__ + struct timespec tp; + clock_gettime(CLOCK_MONOTONIC, &tp); + return (int64)tp.tv_sec*1000000000 + tp.tv_nsec; +#elif defined __MACH__ && defined __APPLE__ + return (int64)mach_absolute_time(); +#else + struct timeval tv; + struct timezone tz; + gettimeofday( &tv, &tz ); + return (int64)tv.tv_sec*1000000 + tv.tv_usec; +#endif +} + +double getTickFrequency(void) +{ +#if defined WIN32 || defined _WIN32 || defined WINCE + LARGE_INTEGER freq; + QueryPerformanceFrequency(&freq); + return (double)freq.QuadPart; +#elif defined __linux || defined __linux__ + return 1e9; +#elif defined __MACH__ && defined __APPLE__ + static double freq = 0; + if( freq == 0 ) + { + mach_timebase_info_data_t sTimebaseInfo; + mach_timebase_info(&sTimebaseInfo); + freq = sTimebaseInfo.denom*1e9/sTimebaseInfo.numer; + } + return freq; +#else + return 1e6; +#endif +} + +#if defined __GNUC__ && (defined __i386__ || defined __x86_64__ || defined __ppc__) +#if defined(__i386__) + +int64 getCPUTickCount(void) +{ + int64 x; + __asm__ volatile (".byte 0x0f, 0x31" : "=A" (x)); + return x; +} +#elif defined(__x86_64__) + +int64 getCPUTickCount(void) +{ + unsigned hi, lo; + __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi)); + return (int64)lo | ((int64)hi << 32); +} + +#elif defined(__ppc__) + +int64 getCPUTickCount(void) +{ + int64 result = 0; + unsigned upper, lower, tmp; + __asm__ volatile( + "0: \n" + "\tmftbu %0 \n" + "\tmftb %1 \n" + "\tmftbu %2 \n" + "\tcmpw %2,%0 \n" + "\tbne 0b \n" + : "=r"(upper),"=r"(lower),"=r"(tmp) + ); + return lower | ((int64)upper << 32); +} + +#else + +#error "RDTSC not defined" + +#endif + +#elif defined _MSC_VER && defined WIN32 && defined _M_IX86 + +int64 getCPUTickCount(void) +{ + __asm _emit 0x0f; + __asm _emit 0x31; +} + +#else + +#ifdef HAVE_IPP +int64 getCPUTickCount(void) +{ + return ippGetCpuClocks(); +} +#else +int64 getCPUTickCount(void) +{ + return getTickCount(); +} +#endif + +#endif + + +static int numThreads = 0; +static int numProcs = 0; + +int getNumThreads(void) +{ + if( !numProcs ) + setNumThreads(0); + return numThreads; +} + +void setNumThreads( int +#ifdef _OPENMP + threads +#endif + ) +{ + if( !numProcs ) + { +#ifdef _OPENMP + numProcs = omp_get_num_procs(); +#else + numProcs = 1; +#endif + } + +#ifdef _OPENMP + if( threads <= 0 ) + threads = numProcs; + else + threads = MIN( threads, numProcs ); + + numThreads = threads; +#else + numThreads = 1; +#endif +} + + +int getThreadNum(void) +{ +#ifdef _OPENMP + return omp_get_thread_num(); +#else + return 0; +#endif +} + +int getNumberOfCPUs(void) +{ +#if defined WIN32 || defined _WIN32 + SYSTEM_INFO sysinfo; + GetSystemInfo( &sysinfo ); + + return (int)sysinfo.dwNumberOfProcessors; +#elif defined __linux__ + return (int)sysconf( _SC_NPROCESSORS_ONLN ); +#elif defined __APPLE__ + int numCPU=0; + int mib[4]; + size_t len = sizeof(numCPU); + + /* set the mib for hw.ncpu */ + mib[0] = CTL_HW; + mib[1] = HW_AVAILCPU; // alternatively, try HW_NCPU; + + /* get the number of CPUs from the system */ + sysctl(mib, 2, &numCPU, &len, NULL, 0); + + if( numCPU < 1 ) + { + mib[1] = HW_NCPU; + sysctl( mib, 2, &numCPU, &len, NULL, 0 ); + + if( numCPU < 1 ) + numCPU = 1; + } + + return (int)numCPU; +#else + return 1; +#endif +} + +string format( const char* fmt, ... ) +{ + char buf[1 << 16]; + va_list args; + va_start( args, fmt ); + vsprintf( buf, fmt, args ); + return string(buf); +} + +string tempfile( const char* suffix ) +{ + char buf[L_tmpnam]; + char* name = 0; +#if ANDROID + strcpy(buf, "/sdcard/__opencv_temp_XXXXXX"); + name = mktemp(buf); +#else + name = tmpnam(buf); +#endif + if (*name == '\\') + ++name; + string n(name); + if (suffix != 0) + n += (n[n.size()-1] == '.' && suffix[0] == '.' ? suffix + 1 : suffix); + return n; +} + +static CvErrorCallback customErrorCallback = 0; +static void* customErrorCallbackData = 0; +static bool breakOnError = false; + +bool setBreakOnError(bool value) +{ + bool prevVal = breakOnError; + breakOnError = value; + return prevVal; +} + +void error( const Exception& exc ) +{ + if (customErrorCallback != 0) + customErrorCallback(exc.code, exc.func.c_str(), exc.err.c_str(), + exc.file.c_str(), exc.line, customErrorCallbackData); + else + { + const char* errorStr = cvErrorStr(exc.code); + char buf[1 << 16]; + + sprintf( buf, "OpenCV Error: %s (%s) in %s, file %s, line %d", + errorStr, exc.err.c_str(), exc.func.size() > 0 ? + exc.func.c_str() : "unknown function", exc.file.c_str(), exc.line ); + fprintf( stderr, "%s\n", buf ); + fflush( stderr ); + } + + if(breakOnError) + { + static volatile int* p = 0; + *p = 0; + } + + throw exc; +} + +CvErrorCallback +redirectError( CvErrorCallback errCallback, void* userdata, void** prevUserdata) +{ + if( prevUserdata ) + *prevUserdata = customErrorCallbackData; + + CvErrorCallback prevCallback = customErrorCallback; + + customErrorCallback = errCallback; + customErrorCallbackData = userdata; + + return prevCallback; +} + +} + +/*CV_IMPL int +cvGuiBoxReport( int code, const char *func_name, const char *err_msg, + const char *file, int line, void* ) +{ +#if (!defined WIN32 && !defined _WIN32) || defined WINCE + return cvStdErrReport( code, func_name, err_msg, file, line, 0 ); +#else + if( code != CV_StsBackTrace && code != CV_StsAutoTrace ) + { + size_t msg_len = strlen(err_msg ? err_msg : "") + 1024; + char* message = (char*)alloca(msg_len); + char title[100]; + + wsprintf( message, "%s (%s)\nin function %s, %s(%d)\n\n" + "Press \"Abort\" to terminate application.\n" + "Press \"Retry\" to debug (if the app is running under debugger).\n" + "Press \"Ignore\" to continue (this is not safe).\n", + cvErrorStr(code), err_msg ? err_msg : "no description", + func_name, file, line ); + + wsprintf( title, "OpenCV GUI Error Handler" ); + + int answer = MessageBox( NULL, message, title, MB_ICONERROR|MB_ABORTRETRYIGNORE|MB_SYSTEMMODAL ); + + if( answer == IDRETRY ) + { + CV_DBG_BREAK(); + } + return answer != IDIGNORE; + } + return 0; +#endif +}*/ + +CV_IMPL int cvCheckHardwareSupport(int feature) +{ + CV_DbgAssert( 0 <= feature && feature <= CV_HARDWARE_MAX_FEATURE ); + return cv::currentFeatures->have[feature]; +} + +CV_IMPL int cvUseOptimized( int flag ) +{ + int prevMode = cv::useOptimizedFlag; + cv::setUseOptimized( flag != 0 ); + return prevMode; +} + +CV_IMPL int64 cvGetTickCount(void) +{ + return cv::getTickCount(); +} + +CV_IMPL double cvGetTickFrequency(void) +{ + return cv::getTickFrequency()*1e-6; +} + +CV_IMPL void cvSetNumThreads(int nt) +{ + cv::setNumThreads(nt); +} + +CV_IMPL int cvGetNumThreads() +{ + return cv::getNumThreads(); +} + +CV_IMPL int cvGetThreadNum() +{ + return cv::getThreadNum(); +} + + +CV_IMPL CvErrorCallback +cvRedirectError( CvErrorCallback errCallback, void* userdata, void** prevUserdata) +{ + return cv::redirectError(errCallback, userdata, prevUserdata); +} + +CV_IMPL int cvNulDevReport( int, const char*, const char*, + const char*, int, void* ) +{ + return 0; +} + +CV_IMPL int cvStdErrReport( int, const char*, const char*, + const char*, int, void* ) +{ + return 0; +} + +CV_IMPL int cvGuiBoxReport( int, const char*, const char*, + const char*, int, void* ) +{ + return 0; +} + +CV_IMPL int cvGetErrInfo( const char**, const char**, const char**, int* ) +{ + return 0; +} + + +CV_IMPL const char* cvErrorStr( int status ) +{ + static char buf[256]; + + switch (status) + { + case CV_StsOk : return "No Error"; + case CV_StsBackTrace : return "Backtrace"; + case CV_StsError : return "Unspecified error"; + case CV_StsInternal : return "Internal error"; + case CV_StsNoMem : return "Insufficient memory"; + case CV_StsBadArg : return "Bad argument"; + case CV_StsNoConv : return "Iterations do not converge"; + case CV_StsAutoTrace : return "Autotrace call"; + case CV_StsBadSize : return "Incorrect size of input array"; + case CV_StsNullPtr : return "Null pointer"; + case CV_StsDivByZero : return "Division by zero occured"; + case CV_BadStep : return "Image step is wrong"; + case CV_StsInplaceNotSupported : return "Inplace operation is not supported"; + case CV_StsObjectNotFound : return "Requested object was not found"; + case CV_BadDepth : return "Input image depth is not supported by function"; + case CV_StsUnmatchedFormats : return "Formats of input arguments do not match"; + case CV_StsUnmatchedSizes : return "Sizes of input arguments do not match"; + case CV_StsOutOfRange : return "One of arguments\' values is out of range"; + case CV_StsUnsupportedFormat : return "Unsupported format or combination of formats"; + case CV_BadCOI : return "Input COI is not supported"; + case CV_BadNumChannels : return "Bad number of channels"; + case CV_StsBadFlag : return "Bad flag (parameter or structure field)"; + case CV_StsBadPoint : return "Bad parameter of type CvPoint"; + case CV_StsBadMask : return "Bad type of mask argument"; + case CV_StsParseError : return "Parsing error"; + case CV_StsNotImplemented : return "The function/feature is not implemented"; + case CV_StsBadMemBlock : return "Memory block has been corrupted"; + case CV_StsAssert : return "Assertion failed"; + case CV_GpuNotSupported : return "No GPU support"; + case CV_GpuApiCallError : return "Gpu Api call"; + case CV_GpuNppCallError : return "Npp Api call"; + }; + + sprintf(buf, "Unknown %s code %d", status >= 0 ? "status":"error", status); + return buf; +} + +CV_IMPL int cvGetErrMode(void) +{ + return 0; +} + +CV_IMPL int cvSetErrMode(int) +{ + return 0; +} + +CV_IMPL int cvGetErrStatus(void) +{ + return 0; +} + +CV_IMPL void cvSetErrStatus(int) +{ +} + + +CV_IMPL void cvError( int code, const char* func_name, + const char* err_msg, + const char* file_name, int line ) +{ + cv::error(cv::Exception(code, err_msg, func_name, file_name, line)); +} + +/* function, which converts int to int */ +CV_IMPL int +cvErrorFromIppStatus( int status ) +{ + switch (status) + { + case CV_BADSIZE_ERR: return CV_StsBadSize; + case CV_BADMEMBLOCK_ERR: return CV_StsBadMemBlock; + case CV_NULLPTR_ERR: return CV_StsNullPtr; + case CV_DIV_BY_ZERO_ERR: return CV_StsDivByZero; + case CV_BADSTEP_ERR: return CV_BadStep; + case CV_OUTOFMEM_ERR: return CV_StsNoMem; + case CV_BADARG_ERR: return CV_StsBadArg; + case CV_NOTDEFINED_ERR: return CV_StsError; + case CV_INPLACE_NOT_SUPPORTED_ERR: return CV_StsInplaceNotSupported; + case CV_NOTFOUND_ERR: return CV_StsObjectNotFound; + case CV_BADCONVERGENCE_ERR: return CV_StsNoConv; + case CV_BADDEPTH_ERR: return CV_BadDepth; + case CV_UNMATCHED_FORMATS_ERR: return CV_StsUnmatchedFormats; + case CV_UNSUPPORTED_COI_ERR: return CV_BadCOI; + case CV_UNSUPPORTED_CHANNELS_ERR: return CV_BadNumChannels; + case CV_BADFLAG_ERR: return CV_StsBadFlag; + case CV_BADRANGE_ERR: return CV_StsBadArg; + case CV_BADCOEF_ERR: return CV_StsBadArg; + case CV_BADFACTOR_ERR: return CV_StsBadArg; + case CV_BADPOINT_ERR: return CV_StsBadPoint; + + default: + return CV_StsError; + } +} + +static CvModuleInfo cxcore_info = { 0, "cxcore", CV_VERSION, 0 }; + +CvModuleInfo* CvModule::first = 0, *CvModule::last = 0; + +CvModule::CvModule( CvModuleInfo* _info ) +{ + cvRegisterModule( _info ); + info = last; +} + +CvModule::~CvModule(void) +{ + if( info ) + { + CvModuleInfo* p = first; + for( ; p != 0 && p->next != info; p = p->next ) + ; + + if( p ) + p->next = info->next; + + if( first == info ) + first = info->next; + + if( last == info ) + last = p; + + free( info ); + info = 0; + } +} + +CV_IMPL int +cvRegisterModule( const CvModuleInfo* module ) +{ + CV_Assert( module != 0 && module->name != 0 && module->version != 0 ); + + size_t name_len = strlen(module->name); + size_t version_len = strlen(module->version); + + CvModuleInfo* module_copy = (CvModuleInfo*)malloc( sizeof(*module_copy) + + name_len + 1 + version_len + 1 ); + + *module_copy = *module; + module_copy->name = (char*)(module_copy + 1); + module_copy->version = (char*)(module_copy + 1) + name_len + 1; + + memcpy( (void*)module_copy->name, module->name, name_len + 1 ); + memcpy( (void*)module_copy->version, module->version, version_len + 1 ); + module_copy->next = 0; + + if( CvModule::first == 0 ) + CvModule::first = module_copy; + else + CvModule::last->next = module_copy; + + CvModule::last = module_copy; + + return 0; +} + +CvModule cxcore_module( &cxcore_info ); + +CV_IMPL void +cvGetModuleInfo( const char* name, const char **version, const char **plugin_list ) +{ + static char joint_verinfo[1024] = ""; + static char plugin_list_buf[1024] = ""; + + if( version ) + *version = 0; + + if( plugin_list ) + *plugin_list = 0; + + CvModuleInfo* module; + + if( version ) + { + if( name ) + { + size_t i, name_len = strlen(name); + + for( module = CvModule::first; module != 0; module = module->next ) + { + if( strlen(module->name) == name_len ) + { + for( i = 0; i < name_len; i++ ) + { + int c0 = toupper(module->name[i]), c1 = toupper(name[i]); + if( c0 != c1 ) + break; + } + if( i == name_len ) + break; + } + } + if( !module ) + CV_Error( CV_StsObjectNotFound, "The module is not found" ); + + *version = module->version; + } + else + { + char* ptr = joint_verinfo; + + for( module = CvModule::first; module != 0; module = module->next ) + { + sprintf( ptr, "%s: %s%s", module->name, module->version, module->next ? ", " : "" ); + ptr += strlen(ptr); + } + + *version = joint_verinfo; + } + } + + if( plugin_list ) + *plugin_list = plugin_list_buf; +} + +#if defined BUILD_SHARED_LIBS && defined CVAPI_EXPORTS && defined WIN32 && !defined WINCE +BOOL WINAPI DllMain( HINSTANCE, DWORD fdwReason, LPVOID ) +{ + if( fdwReason == DLL_THREAD_DETACH || fdwReason == DLL_PROCESS_DETACH ) + { + cv::deleteThreadAllocData(); + cv::deleteThreadRNGData(); + } + return TRUE; +} +#endif + +/* End of file. */ diff --git a/opencv/core/tables.cpp b/opencv/core/tables.cpp new file mode 100644 index 0000000..bd5e3cf --- /dev/null +++ b/opencv/core/tables.cpp @@ -0,0 +1,3512 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* //////////////////////////////////////////////////////////////////// +// +// CvMat helper tables +// +// */ + +#include "precomp.hpp" + +namespace cv +{ + +const float g_8x32fTab[] = +{ + -128.f, -127.f, -126.f, -125.f, -124.f, -123.f, -122.f, -121.f, + -120.f, -119.f, -118.f, -117.f, -116.f, -115.f, -114.f, -113.f, + -112.f, -111.f, -110.f, -109.f, -108.f, -107.f, -106.f, -105.f, + -104.f, -103.f, -102.f, -101.f, -100.f, -99.f, -98.f, -97.f, + -96.f, -95.f, -94.f, -93.f, -92.f, -91.f, -90.f, -89.f, + -88.f, -87.f, -86.f, -85.f, -84.f, -83.f, -82.f, -81.f, + -80.f, -79.f, -78.f, -77.f, -76.f, -75.f, -74.f, -73.f, + -72.f, -71.f, -70.f, -69.f, -68.f, -67.f, -66.f, -65.f, + -64.f, -63.f, -62.f, -61.f, -60.f, -59.f, -58.f, -57.f, + -56.f, -55.f, -54.f, -53.f, -52.f, -51.f, -50.f, -49.f, + -48.f, -47.f, -46.f, -45.f, -44.f, -43.f, -42.f, -41.f, + -40.f, -39.f, -38.f, -37.f, -36.f, -35.f, -34.f, -33.f, + -32.f, -31.f, -30.f, -29.f, -28.f, -27.f, -26.f, -25.f, + -24.f, -23.f, -22.f, -21.f, -20.f, -19.f, -18.f, -17.f, + -16.f, -15.f, -14.f, -13.f, -12.f, -11.f, -10.f, -9.f, + -8.f, -7.f, -6.f, -5.f, -4.f, -3.f, -2.f, -1.f, + 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, + 8.f, 9.f, 10.f, 11.f, 12.f, 13.f, 14.f, 15.f, + 16.f, 17.f, 18.f, 19.f, 20.f, 21.f, 22.f, 23.f, + 24.f, 25.f, 26.f, 27.f, 28.f, 29.f, 30.f, 31.f, + 32.f, 33.f, 34.f, 35.f, 36.f, 37.f, 38.f, 39.f, + 40.f, 41.f, 42.f, 43.f, 44.f, 45.f, 46.f, 47.f, + 48.f, 49.f, 50.f, 51.f, 52.f, 53.f, 54.f, 55.f, + 56.f, 57.f, 58.f, 59.f, 60.f, 61.f, 62.f, 63.f, + 64.f, 65.f, 66.f, 67.f, 68.f, 69.f, 70.f, 71.f, + 72.f, 73.f, 74.f, 75.f, 76.f, 77.f, 78.f, 79.f, + 80.f, 81.f, 82.f, 83.f, 84.f, 85.f, 86.f, 87.f, + 88.f, 89.f, 90.f, 91.f, 92.f, 93.f, 94.f, 95.f, + 96.f, 97.f, 98.f, 99.f, 100.f, 101.f, 102.f, 103.f, + 104.f, 105.f, 106.f, 107.f, 108.f, 109.f, 110.f, 111.f, + 112.f, 113.f, 114.f, 115.f, 116.f, 117.f, 118.f, 119.f, + 120.f, 121.f, 122.f, 123.f, 124.f, 125.f, 126.f, 127.f, + 128.f, 129.f, 130.f, 131.f, 132.f, 133.f, 134.f, 135.f, + 136.f, 137.f, 138.f, 139.f, 140.f, 141.f, 142.f, 143.f, + 144.f, 145.f, 146.f, 147.f, 148.f, 149.f, 150.f, 151.f, + 152.f, 153.f, 154.f, 155.f, 156.f, 157.f, 158.f, 159.f, + 160.f, 161.f, 162.f, 163.f, 164.f, 165.f, 166.f, 167.f, + 168.f, 169.f, 170.f, 171.f, 172.f, 173.f, 174.f, 175.f, + 176.f, 177.f, 178.f, 179.f, 180.f, 181.f, 182.f, 183.f, + 184.f, 185.f, 186.f, 187.f, 188.f, 189.f, 190.f, 191.f, + 192.f, 193.f, 194.f, 195.f, 196.f, 197.f, 198.f, 199.f, + 200.f, 201.f, 202.f, 203.f, 204.f, 205.f, 206.f, 207.f, + 208.f, 209.f, 210.f, 211.f, 212.f, 213.f, 214.f, 215.f, + 216.f, 217.f, 218.f, 219.f, 220.f, 221.f, 222.f, 223.f, + 224.f, 225.f, 226.f, 227.f, 228.f, 229.f, 230.f, 231.f, + 232.f, 233.f, 234.f, 235.f, 236.f, 237.f, 238.f, 239.f, + 240.f, 241.f, 242.f, 243.f, 244.f, 245.f, 246.f, 247.f, + 248.f, 249.f, 250.f, 251.f, 252.f, 253.f, 254.f, 255.f +}; + +/* [-255..255].^2 */ +const ushort g_8x16uSqrTab[] = +{ + 65025, 64516, 64009, 63504, 63001, 62500, 62001, 61504, 61009, 60516, 60025, 59536, + 59049, 58564, 58081, 57600, 57121, 56644, 56169, 55696, 55225, 54756, 54289, 53824, + 53361, 52900, 52441, 51984, 51529, 51076, 50625, 50176, 49729, 49284, 48841, 48400, + 47961, 47524, 47089, 46656, 46225, 45796, 45369, 44944, 44521, 44100, 43681, 43264, + 42849, 42436, 42025, 41616, 41209, 40804, 40401, 40000, 39601, 39204, 38809, 38416, + 38025, 37636, 37249, 36864, 36481, 36100, 35721, 35344, 34969, 34596, 34225, 33856, + 33489, 33124, 32761, 32400, 32041, 31684, 31329, 30976, 30625, 30276, 29929, 29584, + 29241, 28900, 28561, 28224, 27889, 27556, 27225, 26896, 26569, 26244, 25921, 25600, + 25281, 24964, 24649, 24336, 24025, 23716, 23409, 23104, 22801, 22500, 22201, 21904, + 21609, 21316, 21025, 20736, 20449, 20164, 19881, 19600, 19321, 19044, 18769, 18496, + 18225, 17956, 17689, 17424, 17161, 16900, 16641, 16384, 16129, 15876, 15625, 15376, + 15129, 14884, 14641, 14400, 14161, 13924, 13689, 13456, 13225, 12996, 12769, 12544, + 12321, 12100, 11881, 11664, 11449, 11236, 11025, 10816, 10609, 10404, 10201, 10000, + 9801, 9604, 9409, 9216, 9025, 8836, 8649, 8464, 8281, 8100, 7921, 7744, + 7569, 7396, 7225, 7056, 6889, 6724, 6561, 6400, 6241, 6084, 5929, 5776, + 5625, 5476, 5329, 5184, 5041, 4900, 4761, 4624, 4489, 4356, 4225, 4096, + 3969, 3844, 3721, 3600, 3481, 3364, 3249, 3136, 3025, 2916, 2809, 2704, + 2601, 2500, 2401, 2304, 2209, 2116, 2025, 1936, 1849, 1764, 1681, 1600, + 1521, 1444, 1369, 1296, 1225, 1156, 1089, 1024, 961, 900, 841, 784, + 729, 676, 625, 576, 529, 484, 441, 400, 361, 324, 289, 256, + 225, 196, 169, 144, 121, 100, 81, 64, 49, 36, 25, 16, + 9, 4, 1, 0, 1, 4, 9, 16, 25, 36, 49, 64, + 81, 100, 121, 144, 169, 196, 225, 256, 289, 324, 361, 400, + 441, 484, 529, 576, 625, 676, 729, 784, 841, 900, 961, 1024, + 1089, 1156, 1225, 1296, 1369, 1444, 1521, 1600, 1681, 1764, 1849, 1936, + 2025, 2116, 2209, 2304, 2401, 2500, 2601, 2704, 2809, 2916, 3025, 3136, + 3249, 3364, 3481, 3600, 3721, 3844, 3969, 4096, 4225, 4356, 4489, 4624, + 4761, 4900, 5041, 5184, 5329, 5476, 5625, 5776, 5929, 6084, 6241, 6400, + 6561, 6724, 6889, 7056, 7225, 7396, 7569, 7744, 7921, 8100, 8281, 8464, + 8649, 8836, 9025, 9216, 9409, 9604, 9801, 10000, 10201, 10404, 10609, 10816, + 11025, 11236, 11449, 11664, 11881, 12100, 12321, 12544, 12769, 12996, 13225, 13456, + 13689, 13924, 14161, 14400, 14641, 14884, 15129, 15376, 15625, 15876, 16129, 16384, + 16641, 16900, 17161, 17424, 17689, 17956, 18225, 18496, 18769, 19044, 19321, 19600, + 19881, 20164, 20449, 20736, 21025, 21316, 21609, 21904, 22201, 22500, 22801, 23104, + 23409, 23716, 24025, 24336, 24649, 24964, 25281, 25600, 25921, 26244, 26569, 26896, + 27225, 27556, 27889, 28224, 28561, 28900, 29241, 29584, 29929, 30276, 30625, 30976, + 31329, 31684, 32041, 32400, 32761, 33124, 33489, 33856, 34225, 34596, 34969, 35344, + 35721, 36100, 36481, 36864, 37249, 37636, 38025, 38416, 38809, 39204, 39601, 40000, + 40401, 40804, 41209, 41616, 42025, 42436, 42849, 43264, 43681, 44100, 44521, 44944, + 45369, 45796, 46225, 46656, 47089, 47524, 47961, 48400, 48841, 49284, 49729, 50176, + 50625, 51076, 51529, 51984, 52441, 52900, 53361, 53824, 54289, 54756, 55225, 55696, + 56169, 56644, 57121, 57600, 58081, 58564, 59049, 59536, 60025, 60516, 61009, 61504, + 62001, 62500, 63001, 63504, 64009, 64516, 65025 +}; + +const uchar g_Saturate8u[] = +{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, + 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255 +}; + +const char* g_HersheyGlyphs[] = { + "", + "MWRMNV RMVV PSTS", + "MWOMOV OMSMUNUPSQ OQSQURUUSVOV", + "MXVNTMRMPNOPOSPURVTVVU", + "MWOMOV OMRMTNUPUSTURVOV", + "MWOMOV OMUM OQSQ OVUV", + "MVOMOV OMUM OQSQ", + "MXVNTMRMPNOPOSPURVTVVUVR SRVR", + "MWOMOV UMUV OQUQ", + "PTRMRV", + "NUSMSTRVPVOTOS", + "MWOMOV UMOS QQUV", + "MVOMOV OVUV", + "LXNMNV NMRV VMRV VMVV", + "MWOMOV OMUV UMUV", + "MXRMPNOPOSPURVSVUUVSVPUNSMRM", + "MWOMOV OMSMUNUQSROR", + "MXRMPNOPOSPURVSVUUVSVPUNSMRM STVW", + "MWOMOV OMSMUNUQSROR RRUV", + "MWUNSMQMONOOPPTRUSUUSVQVOU", + "MWRMRV NMVM", + "MXOMOSPURVSVUUVSVM", + "MWNMRV VMRV", + "LXNMPV RMPV RMTV VMTV", + "MWOMUV UMOV", + "MWNMRQRV VMRQ", + "MWUMOV OMUM OVUV", + "MWRMNV RMVV PSTS", + "MWOMOV OMSMUNUPSQ OQSQURUUSVOV", + "MVOMOV OMUM", + "MWRMNV RMVV NVVV", + "MWOMOV OMUM OQSQ OVUV", + "MWUMOV OMUM OVUV", + "MWOMOV UMUV OQUQ", + "MXRMPNOPOSPURVSVUUVSVPUNSMRM QQTR TQQR", + "PTRMRV", + "MWOMOV UMOS QQUV", + "MWRMNV RMVV", + "LXNMNV NMRV VMRV VMVV", + "MWOMOV OMUV UMUV", + "MWOMUM PQTR TQPR OVUV", + "MXRMPNOPOSPURVSVUUVSVPUNSMRM", + "MWOMOV UMUV OMUM", + "MWOMOV OMSMUNUQSROR", + "MWOMRQOV OMUM OVUV", + "MWRMRV NMVM", + "MWNONNOMPMQNRPRV VOVNUMTMSNRP", + "LXRMRV PONPNSPTTTVSVPTOPO", + "MWOMUV UMOV", + "LXRMRV NOOPOSQTSTUSUPVO", + "MXOVQVOROPPNRMSMUNVPVRTVVV", + "MWSMMV SMUV OSTS", + "MWQMNV QMTMVNVPSQPQ SQURUTTURVNV", + "LXVPUNTMRMPNOONQNSOUPVRVTUUT", + "MXQMNV QMUMVOVQUTTURVNV", + "MVQMNV QMVM PQSQ NVSV", + "MVQMNV QMVM PQSQ", + "LXVPUNTMRMPNOONQNSOUPVRVTUUSRS", + "MXQMNV WMTV PQUQ", + "PUTMQV", + "OVUMSSRUQVPVOUOT", + "MVQMNV VMOS RQTV", + "NVRMOV OVTV", + "LYPMMV PMQV XMQV XMUV", + "MXQMNV QMTV WMTV", + "LXRMPNOONQNSOUPVRVTUUTVRVPUNTMRM", + "MWQMNV QMUMVNVPUQSRPR", + "LXRMPNOONQNSOUPVRVTUUTVRVPUNTMRM QVPUPTQSRSSTTVUWVW", + "MWQMNV QMUMVNVPUQSRPR QRRUSVTVUU", + "MWVNTMRMPNPPQQTRUSUUSVPVNU", + "MVSMPV PMVM", + "LXPMNSNUOVRVTUUSWM", + "MWOMQV WMQV", + "KXNMNV SMNV SMSV XMSV", + "NWQMTV WMNV", + "NWQMSQQV WMSQ", + "MWQMWMNVTV", + "", + "", + "", + "", + "", + "", + "LXNMRV VMRV NMVM", + "MWNLVX", + "LXRONU ROVU", + "MWNVVV", + "PVRMUQ", + "MWMMOKQKTMVMWK", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "NWQPTPUQUV URQSPTPUQVSVUU", + "MWOMOV OSPURVTUUSTQRPPQOS", + "MWUQSPRPPQOSPURVSVUU", + "MWUMUV USTQRPPQOSPURVTUUS", + "MWOSUSTQRPPQOSPURVTV", + "NVUNTMSMRNRV PPTP", + "MWUPUVTXRYPY USTQRPPQOSPURVTUUS", + "MWOMOV OSPQRPTQUSUV", + "PTRLQMRNSMRL RPRV", + "PUSLRMSNTMSL SPSXRYQYPX", + "NWPMPV UPPT RSUV", + "PTRMRV", + "KYMPMV MSNQOPPPQQRSRV RSSQTPUPVQWSWV", + "MWOPOV OSPQRPTQUSUV", + "MWRPPQOSPURVTUUSTQRP", + "MWOPOY OSPURVTUUSTQRPPQOS", + "MWUPUY USTQRPPQOSPURVTUUS", + "NVPPPV PSQQSPTP", + "NWUQTPQPPQPRQSTSUTUUTVQVPU", + "NVRMRUSVTVUU PPTP", + "MWUPUV OPOSPURVTUUS", + "NVOPRV UPRV", + "LXNPPV RPPV RPTV VPTV", + "MWOPUV UPOV", + "MWOPRV UPRVQXPYOY", + "MWOPUPOVUV", + "MXVPUSTURVPUOSPQRPTQUUVV", + "MWOTQVSVTUTSSRPQRQTPUOUNTMRMQNPPOTNY", + "MXNQOPQPRQRSQW VPURSTQWPY", + "MWTNSMRMQNQORPTQUSTURVPUOSPQRP", + "NWUQSPQPPQPRQS SSQSPTPUQVSVUU", + "NWTMSNSOTP UPSPQQPSPUQVSWSXRYQY", + "LXNQOPPPQQQSPV QSRQTPUPVQVSUVTY", + "LXNQOPPPQQQURVSVTUUSVPVNUMTMSNSPTRUSWT", + "OVRPQSQURVSVTU", + "MWQPOV UPTPRQPS PSQUSVTV", + "MWOMPMQNRPUV RPOV", + "LYPPMY UPTSSUQVPVOUOS TSTUUVVVWU", + "MWNPOPOV UPTSRUOV", + "NWTMSNSOTP UPSPQQQRRSTS SSQTPUPVQWSXSYRZQZ", + "MWRPPQOSPURVTUUSTQRP", + "MXOQQPVP QPQRPV TPTRUV", + "MWOSPURVTUUSTQRPPQOSNY", + "MXVPRPPQOSPURVTUUSTQRP", + "MXOQQPVP SPRV", + "KXMQNPOPPQPUQVSVTUUSVP", + "MXPPOQOSPURVSVUUVSVQUPTPSQRSQY", + "MWOPPPQQSXTYUY UPTRPWOY", + "KYTMRY MQNPOPPQPUQVTVUUVSWP", + "LXOPNRNTOVQVRTRR UPVRVTUVSVRT", + "LWTSSQQPOQNSOUQVSUTS UPTSTUUVVV", + "MWQMOSPURVTUUSTQRPPQOS", + "MWUQSPRPPQOSPURVTV", + "LWTSSQQPOQNSOUQVSUTS VMTSTUUVVV", + "MWOSTSURUQSPRPPQOSPURVTV", + "OVVMUMTNSPQVPXOYNY QPUP", + "MXUSTQRPPQOSPURVTUUS VPTVSXRYPYOX", + "MVQMNV OSPQQPSPTQTRSTSUTVUV", + "PUSMSNTNTMSM QPRPSQSRRTRUSVTV", + "OUSMSNTNTMSM QPRPSQSRRVQXPYOYNX", + "NVRMOV UPTPRQPS PSQUSVTV", + "OTSMQSQURVSV", + "JYKPLPMQMSLV MSNQOPQPRQRSQV RSSQTPVPWQWRVTVUWVXV", + "MWNPOPPQPSOV PSQQRPTPUQURTTTUUVVV", + "MWRPPQOSPURVTUUSTQRP", + "MXNPOPPQPSNY PSQUSVUUVSUQSPQQPS", + "MXUSTQRPPQOSPURVTUUS VPSY", + "MVOPPPQQQSPV UQTPSPRQQS", + "NVTQSPQPPQPRQSRSSTSURVPVOU", + "NUSMQSQURVSV PPTP", + "MWNPOPPQPROTOUPVRVSUTS UPTSTUUVVV", + "MWNPOPPQPROTOUPVRVTUURUP", + "KYLPMPNQNRMTMUNVPVQURSSP RSRUSVUVVUWRWP", + "MWOQPPQPRQRUSVTVUU VQUPTPSQQUPVOVNU", + "MWNPOPPQPROTOUPVRVSUTS UPSVRXQYOYNX", + "NVUPOV PQQPSPTQ PUQVSVTU", + "", + "", + "", + "", + "", + "", + "MWUSTQRPPQOSPURVTUUSUPTNRMQM", + "MWUQSPRPPQOSPURVSVUU OSSS", + "MWRMQNPPOSOVPWRWSVTTUQUNTMRM PRTR", + "MWTMQY RPPQOSPURVSVUUVSUQSPRP", + "MWUQSPQPOQOSPTRUSVSWRXQX", + "", + "", + "KYTPTSUTVTWSWQVOUNSMQMONNOMQMSNUOVQWSWUV TQSPQPPQPSQTSTTS", + "MWUNORUV", + "MWONUROV", + "OUTKQKQYTY", + "OUPKSKSYPY", + "OUTKSLRNROSQQRSSRURVSXTY", + "OUPKQLRNROQQSRQSRURVQXPY", + "LYPMQNQOPPOPNONNOMPMSNUNWMNV USTTTUUVVVWUWTVSUS", + "PT", + "NV", + "MWRMPNOPOSPURVTUUSUPTNRM", + "MWPORMRV", + "MWONQMSMUNUPTROVUV", + "MWONQMSMUNUPSQ RQSQURUUSVQVOU", + "MWSMSV SMNSVS", + "MWPMOQQPRPTQUSTURVQVOU PMTM", + "MWTMRMPNOPOSPURVTUUSTQRPPQOS", + "MWUMQV OMUM", + "MWQMONOPQQSQUPUNSMQM QQOROUQVSVUUURSQ", + "MWUPTRRSPROPPNRMTNUPUSTURVPV", + "PURURVSVSURU", + "PUSVRVRUSUSWRY", + "PURPRQSQSPRP RURVSVSURU", + "PURPRQSQSPRP SVRVRUSUSWRY", + "PURMRR SMSR RURVSVSURU", + "NWPNRMSMUNUPRQRRSRSQUP RURVSVSURU", + "PTRMRQ", + "NVPMPQ TMTQ", + "NVQMPNPPQQSQTPTNSMQM", + "MWRKRX UNSMQMONOPQQTRUSUUSVQVOU", + "MWVLNX", + "OUTKRNQQQSRVTY", + "OUPKRNSQSSRVPY", + "PTRKRY", + "LXNRVR", + "LXRNRV NRVR", + "LXNPVP NTVT", + "MWOOUU UOOU", + "MWRORU OPUT UPOT", + "PURQRRSRSQRQ", + "PUSMRORQSQSPRP", + "PUSNRNRMSMSORQ", + "LXSOVRSU NRVR", + "MXQLQY TLTY OQVQ OTVT", + "LXVRURTSSURVOVNUNSORRQSPSNRMPMONOPQSSUUVVV", + "LXNNOQOSNV VNUQUSVV NNQOSOVN NVQUSUVV", + "LYRQQPOPNQNSOTQTRSSQTPVPWQWSVTTTSSRQ", + "", + "H\\NRMQLRMSNR VRWQXRWSVR", + "H\\MPLQLRMSNSOROQNPMP MQMRNRNQMQ WPVQVRWSXSYRYQXPWP WQWRXRXQWQ", + "I[KRYR", + "", + "H\\RUJPRTZPRU", + "", + "", + "", + "", + "", + "F^ISJQLPNPPQTTVUXUZT[Q ISJPLONOPPTSVTXTZS[Q IYJWLVNVPWTZV[X[ZZ[W IYJVLUNUPVTYVZXZZY[W", + "", + "F^ISJQLPNPPQTTVUXUZT[Q ISJPLONOPPTSVTXTZS[Q IW[W I[[[", + "", + "CaGO]OXI L[GU]U", + "", + "D`F^^^^FFFF^", + "", + "KYQVOUNSNQOOQNSNUOVQVSUUSVQV SVVS QVVQ OUUO NSSN NQQN", + "", + "H\\IR[R", + "H\\IR[R IQ[Q", + "", + "LYPFSCSP RDRP OPVP MRXR OVOWNWNVOUQTTTVUWWVYTZQ[O\\N^Na TTUUVWUYTZ N`O_P_S`V`W_ P_SaVaW_W^", + "LYPFSCSP RDRP OPVP MRXR OVOWNWNVOUQTTTVUWWVYTZ TTUUVWUYTZ RZTZV[W]W^V`TaQaO`N_N^O^O_ TZU[V]V^U`Ta", + "LYPFSCSP RDRP OPVP MRXR VVVWWWWVVUTTRTPUOVNYN^O`QaTaV`W^W\\VZTYQYN[ RTPVOYO^P`Qa TaU`V^V\\UZTY", + "LYPFSCSP RDRP OPVP MRXR QTOUNWOYQZTZVYWWVUTTQT QTPUOWPYQZ TZUYVWUUTT QZO[N]N^O`QaTaV`W^W]V[TZ QZP[O]O^P`Qa TaU`V^V]U[TZ", + "LYOEOFNFNEODQCTCVDWFVHTIQJOKNMNP TCUDVFUHTI NOONPNSOVOWN PNSPVPWNWM MRXR OVOWNWNVOUQTTTVUWWVYTZ TTUUVWUYTZ RZTZV[W]W^V`TaQaO`N_N^O^O_ TZU[V]V^U`Ta", + "LYOEOFNFNEODQCTCVDWFVHTI TCUDVFUHTI RITIVJWLWMVOTPQPOONNNMOMON TIUJVLVMUOTP MRXR QTOUNWOYQZTZVYWWVUTTQT QTPUOWPYQZ TZUYVWUUTT QZO[N]N^O`QaTaV`W^W]V[TZ QZP[O]O^P`Qa TaU`V^V]U[TZ", + "LYOCNI OCVC ODSDVC NIOHQGTGVHWJWMVOTPQPOONNNMOMON TGUHVJVMUOTP MRXR QTOUNWOYQZTZVYWWVUTTQT QTPUOWPYQZ TZUYVWUUTT QZO[N]N^O`QaTaV`W^W]V[TZ QZP[O]O^P`Qa TaU`V^V]U[TZ", + "LYNCNG VERLPP WCTIQP NEPCRCUE NEPDRDUEVE MRXR QTOUNWOYQZTZVYWWVUTTQT QTPUOWPYQZ TZUYVWUUTT QZO[N]N^O`QaTaV`W^W]V[TZ QZP[O]O^P`Qa TaU`V^V]U[TZ", + "LYOCNI OCVC ODSDVC NIOHQGTGVHWJWMVOTPQPOONNNMOMON TGUHVJVMUOTP MRXR VVVWWWWVVUTTRTPUOVNYN^O`QaTaV`W^W\\VZTYQYN[ RTPVOYO^P`Qa TaU`V^V\\UZTY", + "LYPFSCSP RDRP OPVP MRXR SVSa TTTa TTM]X] QaVa", + "LYOEOFNFNEODQCTCVDWFVHTI TCUDVFUHTI RITIVJWLWMVOTPQPOONNNMOMON TIUJVLVMUOTP MRXR SVSa TTTa TTM]X] QaVa", + "F^YXWZU[R[PZMXKWIWHXHZI[K[MZOWPURQTKWGYFZF[G\\H[IZH[G[FZFYFWGVHTLRPPVNZMZ OPUP", + "E^P[MZJXHUGRGOHLJIMGPFTFWGYI[L\\O\\R[UYXVZS[P[ NJNW OJOW LJSJVKWMWNVPSQOQ SJUKVMVNUPSQ LWQW SQTRUVVWWWXV SQURVVWW", + "E^P[MZJXHUGRGOHLJIMGPFTFWGYI[L\\O\\R[UYXVZS[P[ UKVJVNUKSJPJNKMLLOLRMUNVPWSWUVVT PJNLMOMRNUPW", + "E_IM[M IR[R IW[W K[YI", + "CaHQGRHSIRHQ RQQRRSSRRQ \\Q[R\\S]R\\Q", + "", + "E_NWLTIRLPNM LPJRLT JRZR VWXT[RXPVM XPZRXT", + "JZWNTLRIPLMN PLRJTL RJRZ WVTXR[PXMV PXRZTX", + "F^ZJSJOKMLKNJQJSKVMXOYSZZZ SFS^", + "F^JJQJUKWLYNZQZSYVWXUYQZJZ QFQ^", + "F^JJQJUKWLYNZQZSYVWXUYQZJZ ORZR", + "", + "H\\LBL[ RBR[ XBX[", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "I[RFJ[ RFZ[ MTWT", + "G\\KFK[ KFTFWGXHYJYLXNWOTP KPTPWQXRYTYWXYWZT[K[", + "H]ZKYIWGUFQFOGMILKKNKSLVMXOZQ[U[WZYXZV", + "G\\KFK[ KFRFUGWIXKYNYSXVWXUZR[K[", + "H[LFL[ LFYF LPTP L[Y[", + "HZLFL[ LFYF LPTP", + "H]ZKYIWGUFQFOGMILKKNKSLVMXOZQ[U[WZYXZVZS USZS", + "G]KFK[ YFY[ KPYP", + "NVRFR[", + "JZVFVVUYTZR[P[NZMYLVLT", + "G\\KFK[ YFKT POY[", + "HYLFL[ L[X[", + "F^JFJ[ JFR[ ZFR[ ZFZ[", + "G]KFK[ KFY[ YFY[", + "G]PFNGLIKKJNJSKVLXNZP[T[VZXXYVZSZNYKXIVGTFPF", + "G\\KFK[ KFTFWGXHYJYMXOWPTQKQ", + "G]PFNGLIKKJNJSKVLXNZP[T[VZXXYVZSZNYKXIVGTFPF SWY]", + "G\\KFK[ KFTFWGXHYJYLXNWOTPKP RPY[", + "H\\YIWGTFPFMGKIKKLMMNOOUQWRXSYUYXWZT[P[MZKX", + "JZRFR[ KFYF", + "G]KFKULXNZQ[S[VZXXYUYF", + "I[JFR[ ZFR[", + "F^HFM[ RFM[ RFW[ \\FW[", + "H\\KFY[ YFK[", + "I[JFRPR[ ZFRP", + "H\\YFK[ KFYF K[Y[", + "I[RFJ[ RFZ[ MTWT", + "G\\KFK[ KFTFWGXHYJYLXNWOTP KPTPWQXRYTYWXYWZT[K[", + "HYLFL[ LFXF", + "I[RFJ[ RFZ[ J[Z[", + "H[LFL[ LFYF LPTP L[Y[", + "H\\YFK[ KFYF K[Y[", + "G]KFK[ YFY[ KPYP", + "G]PFNGLIKKJNJSKVLXNZP[T[VZXXYVZSZNYKXIVGTFPF OPUP", + "NVRFR[", + "G\\KFK[ YFKT POY[", + "I[RFJ[ RFZ[", + "F^JFJ[ JFR[ ZFR[ ZFZ[", + "G]KFK[ KFY[ YFY[", + "I[KFYF OPUP K[Y[", + "G]PFNGLIKKJNJSKVLXNZP[T[VZXXYVZSZNYKXIVGTFPF", + "G]KFK[ YFY[ KFYF", + "G\\KFK[ KFTFWGXHYJYMXOWPTQKQ", + "I[KFRPK[ KFYF K[Y[", + "JZRFR[ KFYF", + "I[KKKILGMFOFPGQIRMR[ YKYIXGWFUFTGSIRM", + "H\\RFR[ PKMLLMKOKRLTMUPVTVWUXTYRYOXMWLTKPK", + "H\\KFY[ K[YF", + "G]RFR[ ILJLKMLQMSNTQUSUVTWSXQYMZL[L", + "H\\K[O[LTKPKLLINGQFSFVGXIYLYPXTU[Y[", + "G[G[IZLWOSSLVFV[UXSUQSNQLQKRKTLVNXQZT[Y[", + "F]SHTITLSPRSQUOXMZK[J[IZIWJRKOLMNJPHRGUFXFZG[I[KZMYNWOTP SPTPWQXRYTYWXYWZU[R[PZOX", + "H\\TLTMUNWNYMZKZIYGWFTFQGOIMLLNKRKVLYMZO[Q[TZVXWV", + "G^TFRGQIPMOSNVMXKZI[G[FZFXGWIWKXMZP[S[VZXXZT[O[KZHYGWFTFRHRJSMUPWRZT\\U", + "H\\VJVKWLYLZKZIYGVFRFOGNINLONPOSPPPMQLRKTKWLYMZP[S[VZXXYV", + "H\\RLPLNKMINGQFTFXG[G]F XGVNTTRXPZN[L[JZIXIVJULUNV QPZP", + "G^G[IZMVPQQNRJRGQFPFOGNINLONQOUOXNYMZKZQYVXXVZS[O[LZJXIVIT", + "F^MMKLJJJIKGMFNFPGQIQKPONULYJ[H[GZGX MRVOXN[L]J^H^G]F\\FZHXLVRUWUZV[W[YZZY\\V", + "IZWVUTSQROQLQIRGSFUFVGWIWLVQTVSXQZO[M[KZJXJVKUMUOV", + "JYT^R[PVOPOJPGRFTFUGVJVMURR[PaOdNfLgKfKdLaN^P\\SZWX", + "F^MMKLJJJIKGMFNFPGQIQKPONULYJ[H[GZGX ^I^G]F\\FZGXIVLTNROPO ROSQSXTZU[V[XZYY[V", + "I\\MRORSQVOXMYKYHXFVFUGTISNRSQVPXNZL[J[IZIXJWLWNXQZT[V[YZ[X", + "@aEMCLBJBICGEFFFHGIIIKHPGTE[ GTJLLHMGOFPFRGSISKRPQTO[ QTTLVHWGYFZF\\G]I]K\\PZWZZ[[\\[^Z_YaV", + "E]JMHLGJGIHGJFKFMGNINKMPLTJ[ LTOLQHRGTFVFXGYIYKXPVWVZW[X[ZZ[Y]V", + "H]TFQGOIMLLNKRKVLYMZO[Q[TZVXXUYSZOZKYHXGVFTFRHRKSNUQWSZU\\V", + "F_SHTITLSPRSQUOXMZK[J[IZIWJRKOLMNJPHRGUFZF\\G]H^J^M]O\\PZQWQUPTO", + "H^ULTNSOQPOPNNNLOIQGTFWFYGZIZMYPWSSWPYNZK[I[HZHXIWKWMXPZS[V[YZ[X", + "F_SHTITLSPRSQUOXMZK[J[IZIWJRKOLMNJPHRGUFYF[G\\H]J]M\\O[PYQVQSPTQUSUXVZX[ZZ[Y]V", + "H\\H[JZLXOTQQSMTJTGSFRFQGPIPKQMSOVQXSYUYWXYWZT[P[MZKXJVJT", + "H[RLPLNKMINGQFTFXG[G]F XGVNTTRXPZN[L[JZIXIVJULUNV", + "E]JMHLGJGIHGJFKFMGNINKMOLRKVKXLZN[P[RZSYUUXMZF XMWQVWVZW[X[ZZ[Y]V", + "F]KMILHJHIIGKFLFNGOIOKNOMRLVLYM[O[QZTWVTXPYMZIZGYFXFWGVIVKWNYP[Q", + "C_HMFLEJEIFGHFIFKGLILLK[ UFK[ UFS[ aF_G\\JYNVTS[", + "F^NLLLKKKILGNFPFRGSISLQUQXRZT[V[XZYXYVXUVU ]I]G\\FZFXGVITLPUNXLZJ[H[GZGX", + "F]KMILHJHIIGKFLFNGOIOKNOMRLVLXMZN[P[RZTXVUWSYM [FYMVWT]RbPfNgMfMdNaP^S[VY[V", + "H]ULTNSOQPOPNNNLOIQGTFWFYGZIZMYPWTTWPZN[K[JZJXKWNWPXQYR[R^QaPcNfLgKfKdLaN^Q[TYZV", + "", + "", + "", + "", + "", + "", + "I[JFR[ ZFR[ JFZF", + "G]IL[b", + "E_RJIZ RJ[Z", + "I[J[Z[", + "I[J[Z[ZZJZJ[", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "I\\XMX[ XPVNTMQMONMPLSLUMXOZQ[T[VZXX", + "H[LFL[ LPNNPMSMUNWPXSXUWXUZS[P[NZLX", + "I[XPVNTMQMONMPLSLUMXOZQ[T[VZXX", + "I\\XFX[ XPVNTMQMONMPLSLUMXOZQ[T[VZXX", + "I[LSXSXQWOVNTMQMONMPLSLUMXOZQ[T[VZXX", + "MYWFUFSGRJR[ OMVM", + "I\\XMX]W`VaTbQbOa XPVNTMQMONMPLSLUMXOZQ[T[VZXX", + "I\\MFM[ MQPNRMUMWNXQX[", + "NVQFRGSFREQF RMR[", + "MWRFSGTFSERF SMS^RaPbNb", + "IZMFM[ WMMW QSX[", + "NVRFR[", + "CaGMG[ GQJNLMOMQNRQR[ RQUNWMZM\\N]Q][", + "I\\MMM[ MQPNRMUMWNXQX[", + "I\\QMONMPLSLUMXOZQ[T[VZXXYUYSXPVNTMQM", + "H[LMLb LPNNPMSMUNWPXSXUWXUZS[P[NZLX", + "I\\XMXb XPVNTMQMONMPLSLUMXOZQ[T[VZXX", + "KXOMO[ OSPPRNTMWM", + "J[XPWNTMQMNNMPNRPSUTWUXWXXWZT[Q[NZMX", + "MYRFRWSZU[W[ OMVM", + "I\\MMMWNZP[S[UZXW XMX[", + "JZLMR[ XMR[", + "G]JMN[ RMN[ RMV[ ZMV[", + "J[MMX[ XMM[", + "JZLMR[ XMR[P_NaLbKb", + "J[XMM[ MMXM M[X[", + "H]QMONMPLRKUKXLZN[P[RZUWWTYPZM QMSMTNUPWXXZY[Z[", + "I\\UFSGQIOMNPMTLZKb UFWFYHYKXMWNUORO ROTPVRWTWWVYUZS[Q[OZNYMV", + "I\\JPLNNMOMQNROSRSVR[ ZMYPXRR[P_Ob", + "I[TMQMONMPLSLVMYNZP[R[TZVXWUWRVOTMRKQIQGRFTFVGXI", + "JZWOVNTMQMONOPPRSS SSOTMVMXNZP[S[UZWX", + "JYTFRGQHQIRJUKXK XKTMQONRMUMWNYP[S]T_TaSbQbP`", + "H\\IQJOLMNMONOPNTL[ NTPPRNTMVMXOXRWWTb", + "G\\HQIOKMMMNNNPMUMXNZO[Q[SZUWVUWRXMXJWGUFSFRHRJSMUPWRZT", + "LWRMPTOXOZP[R[TYUW", + "I[OMK[ YNXMWMUNQROSNS NSPTQUSZT[U[VZ", + "JZKFMFOGPHX[ RML[", + "H]OMIb NQMVMYO[Q[SZUXWT YMWTVXVZW[Y[[Y\\W", + "I[LMOMNSMXL[ YMXPWRUURXOZL[", + "JZTFRGQHQIRJUKXK UKRLPMOOOQQSTTVT TTPUNVMXMZO\\S^T_TaRbPb", + "J[RMPNNPMSMVNYOZQ[S[UZWXXUXRWOVNTMRM", + "G]PML[ UMVSWXX[ IPKNNM[M", + "I[MSMVNYOZQ[S[UZWXXUXRWOVNTMRMPNNPMSIb", + "I][MQMONMPLSLVMYNZP[R[TZVXWUWRVOUNSM", + "H\\SMP[ JPLNOMZM", + "H\\IQJOLMNMONOPMVMYO[Q[TZVXXTYPYM", + "G]ONMOKQJTJWKYLZN[Q[TZWXYUZRZOXMVMTORSPXMb", + "I[KMMMOOU`WbYb ZMYOWRM]K`Jb", + "F]VFNb GQHOJMLMMNMPLULXMZO[Q[TZVXXUZP[M", + "F]NMLNJQITIWJZK[M[OZQW RSQWRZS[U[WZYWZTZQYNXM", + "L\\UUTSRRPRNSMTLVLXMZO[Q[SZTXVRUWUZV[W[YZZY\\V", + "M[MVOSRNSLTITGSFQGPIOMNTNZO[P[RZTXUUURVVWWYW[V", + "MXTTTSSRQROSNTMVMXNZP[S[VYXV", + "L\\UUTSRRPRNSMTLVLXMZO[Q[SZTXZF VRUWUZV[W[YZZY\\V", + "NXOYQXRWSUSSRRQROSNUNXOZQ[S[UZVYXV", + "OWOVSQUNVLWIWGVFTGSIQQNZKaJdJfKgMfNcOZP[R[TZUYWV", + "L[UUTSRRPRNSMTLVLXMZO[Q[SZTY VRTYPdOfMgLfLdMaP^S\\U[XY[V", + "M\\MVOSRNSLTITGSFQGPIOMNSM[ M[NXOVQSSRURVSVUUXUZV[W[YZZY\\V", + "PWSMSNTNTMSM PVRRPXPZQ[R[TZUYWV", + "PWSMSNTNTMSM PVRRLdKfIgHfHdIaL^O\\Q[TYWV", + "M[MVOSRNSLTITGSFQGPIOMNSM[ M[NXOVQSSRURVSVUTVQV QVSWTZU[V[XZYY[V", + "OWOVQSTNULVIVGUFSGRIQMPTPZQ[R[TZUYWV", + "E^EVGSIRJSJTIXH[ IXJVLSNRPRQSQTPXO[ PXQVSSURWRXSXUWXWZX[Y[[Z\\Y^V", + "J\\JVLSNROSOTNXM[ NXOVQSSRURVSVUUXUZV[W[YZZY\\V", + "LZRRPRNSMTLVLXMZO[Q[SZTYUWUUTSRRQSQURWTXWXYWZV", + "KZKVMSNQMUGg MUNSPRRRTSUUUWTYSZQ[ MZO[R[UZWYZV", + "L[UUTSRRPRNSMTLVLXMZO[Q[SZ VRUUSZPaOdOfPgRfScS\\U[XY[V", + "MZMVOSPQPSSSTTTVSYSZT[U[WZXYZV", + "NYNVPSQQQSSVTXTZR[ NZP[T[VZWYYV", + "OXOVQSSO VFPXPZQ[S[UZVYXV PNWN", + "L[LVNRLXLZM[O[QZSXUU VRTXTZU[V[XZYY[V", + "L[LVNRMWMZN[O[RZTXUUUR URVVWWYW[V", + "I^LRJTIWIYJ[L[NZPX RRPXPZQ[S[UZWXXUXR XRYVZW\\W^V", + "JZJVLSNRPRQSQZR[U[XYZV WSVRTRSSOZN[L[KZ", + "L[LVNRLXLZM[O[QZSXUU VRPdOfMgLfLdMaP^S\\U[XY[V", + "LZLVNSPRRRTTTVSXQZN[P\\Q^QaPdOfMgLfLdMaP^S\\WYZV", + "J\\K[NZQXSVUSWOXKXIWGUFSGRHQJPOPTQXRZT[V[XZYY", + "", + "", + "", + "", + "", + "I[WUWRVOUNSMQMONMPLSLVMYNZP[R[TZVXWUXPXKWHVGTFRFPGNI", + "JZWNUMRMPNNPMSMVNYOZQ[T[VZ MTUT", + "J[TFRGPJOLNOMTMXNZO[Q[SZUWVUWRXMXIWGVFTF NPWP", + "H\\VFNb QMNNLPKSKVLXNZQ[S[VZXXYUYRXPVNSMQM", + "I[XOWNTMQMNNMOLQLSMUOWSZT\\T^S_Q_", + "", + "", + "DaWNVLTKQKOLNMMOMRNTOUQVTVVUWS WKWSXUYV[V\\U]S]O\\L[JYHWGTFQFNGLHJJILHOHRIUJWLYNZQ[T[WZYY", + "F^ZIJRZ[", + "F^JIZRJ[", + "KYOBOb OBVB ObVb", + "KYUBUb NBUB NbUb", + "KYTBQEPHPJQMSOSPORSTSUQWPZP\\Q_Tb", + "KYPBSETHTJSMQOQPURQTQUSWTZT\\S_Pb", + "F^[FYGVHSHPGNFLFJGIIIKKMMMOLPJPHNF [FI[ YTWTUUTWTYV[X[ZZ[X[VYT", + "NV", + "JZ", + "H\\QFNGLJKOKRLWNZQ[S[VZXWYRYOXJVGSFQF", + "H\\NJPISFS[", + "H\\LKLJMHNGPFTFVGWHXJXLWNUQK[Y[", + "H\\MFXFRNUNWOXPYSYUXXVZS[P[MZLYKW", + "H\\UFKTZT UFU[", + "H\\WFMFLOMNPMSMVNXPYSYUXXVZS[P[MZLYKW", + "H\\XIWGTFRFOGMJLOLTMXOZR[S[VZXXYUYTXQVOSNRNOOMQLT", + "H\\YFO[ KFYF", + "H\\PFMGLILKMMONSOVPXRYTYWXYWZT[P[MZLYKWKTLRNPQOUNWMXKXIWGTFPF", + "H\\XMWPURRSQSNRLPKMKLLINGQFRFUGWIXMXRWWUZR[P[MZLX", + "MWRYQZR[SZRY", + "MWSZR[QZRYSZS\\R^Q_", + "MWRMQNROSNRM RYQZR[SZRY", + "MWRMQNROSNRM SZR[QZRYSZS\\R^Q_", + "MWRFRT RYQZR[SZRY", + "I[LKLJMHNGPFTFVGWHXJXLWNVORQRT RYQZR[SZRY", + "NVRFRM", + "JZNFNM VFVM", + "KYQFOGNINKOMQNSNUMVKVIUGSFQF", + "H\\PBP_ TBT_ YIWGTFPFMGKIKKLMMNOOUQWRXSYUYXWZT[P[MZKX", + "G][BIb", + "KYVBTDRGPKOPOTPYR]T`Vb", + "KYNBPDRGTKUPUTTYR]P`Nb", + "NVRBRb", + "E_IR[R", + "E_RIR[ IR[R", + "E_IO[O IU[U", + "G]KKYY YKKY", + "JZRLRX MOWU WOMU", + "MWRQQRRSSRRQ", + "MWSFRGQIQKRLSKRJ", + "MWRHQGRFSGSIRKQL", + "E_UMXP[RXTUW IR[R", + "G]OFOb UFUb JQZQ JWZW", + "E_\\O\\N[MZMYNXPVUTXRZP[L[JZIYHWHUISJRQNRMSKSIRGPFNGMIMKNNPQUXWZY[[[\\Z\\Y", + "G]IIJKKOKUJYI[ [IZKYOYUZY[[ IIKJOKUKYJ[I I[KZOYUYYZ[[", + "F_\\Q[OYNWNUOTPQTPUNVLVJUISIQJOLNNNPOQPTTUUWVYV[U\\S\\Q", + "KYOBO[ UBU[", + "F^RBR[ I[[[", + "F^[BI[[[", + "E_RIQJRKSJRI IYHZI[JZIY [YZZ[[\\Z[Y", + "F^RHNLKPJSJUKWMXOXQWRU RHVLYPZSZUYWWXUXSWRU RUQYP\\ RUSYT\\ P\\T\\", + "F^RNQKPINHMHKIJKJOKRLTNWR\\ RNSKTIVHWHYIZKZOYRXTVWR\\", + "F^RGPJLOIR RGTJXO[R IRLUPZR] [RXUTZR]", + "F^RTTWVXXXZW[U[SZQXPVPSQ SQUOVMVKUISHQHOINKNMOOQQ QQNPLPJQISIUJWLXNXPWRT RTQYP\\ RTSYT\\ P\\T\\", + "F^RRR[Q\\ RVQ\\ RIQHOHNINKONRR RISHUHVIVKUNRR RRNOLNJNIOIQJR RRVOXNZN[O[QZR RRNULVJVIUISJR RRVUXVZV[U[SZR", + "F^ISJSLTMVMXLZ ISIRJQLQMRNTNWMYLZ RGPIOLOOQUQXPZR\\ RGTIULUOSUSXTZR\\ [S[RZQXQWRVTVWWYXZ [SZSXTWVWXXZ KVYV", + "", + "", + "", + "PSSRRSQSPRPQQPRPSQSSRUQV QQQRRRRQQQ", + "PTQPPQPSQTSTTSTQSPQP RQQRRSSRRQ", + "NVPOTU TOPU NRVR", + "MWRKQMOPMR RKSMUPWR RMOQ RMUQ ROPQ ROTQ QQSQ MRWR", + "MWMRMQNOONQMSMUNVOWQWR PNTN OOUO NPVP NQVQ MRWR", + "LRLFLRRRLF LIPQ LLOR LOMQ", + "MWRKQMOPMR RKSMUPWR", + "MWWRWQVOUNSMQMONNOMQMR", + "G]]R]P\\MZJWHTGPGMHJJHMGPGR", + "MWMRMSNUOVQWSWUVVUWSWR", + "LXLPNRQSSSVRXP", + "RURUTTURTPRO", + "RVRRUPVNVLUKTK", + "NRRROPNNNLOKPK", + "MWWHVGTFQFOGNHMJMLNNOOUSVTWVWXVZU[S\\P\\N[MZ", + "G]IWHVGTGQHOINKMMMONPOTUUVWWYW[V\\U]S]P\\N[M", + "G]RRTUUVWWYW[V\\U]S]Q\\O[NYMWMUNTOPUOVMWKWIVHUGSGQHOINKMMMONPORR", + "H\\KFK[ HF[FQP[Z ZV[Y\\[ ZVZY WYZY WYZZ\\[", + "KYUARBPCNELHKLKRLUNWQXSXVWXUYR KPLMNKQJSJVKXMYPYVXZV]T_R`Oa", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + ">f>RfR", + "D`D``D", + "RRR>Rf", + "D`DD``", + "D`DR`R", + "F^FY^K", + "KYK^YF", + "", + "KYKFY^", + "F^FK^Y", + "KYKRYR", + "MWMWWM", + "", + "MWMMWW", + "", + "", + "", + "", + "D`DOGQKSPTTTYS]Q`O", + "PUUDSGQKPPPTQYS]U`", + "OTODQGSKTPTTSYQ]O`", + "D`DUGSKQPPTPYQ]S`U", + "KYRJYNKVRZ", + "JZJRNKVYZR", + "KYKVKNYVYN", + "JZLXJPZTXL", + "JZJ]L]O\\Q[TXUVVSVOULTJSIQIPJOLNONSOVPXS[U\\X]Z]", + "I]]Z]X\\U[SXPVOSNONLOJPIQISJTLUOVSVVUXT[Q\\O]L]J", + "JZZGXGUHSIPLONNQNUOXPZQ[S[TZUXVUVQUNTLQIOHLGJG", + "G[GJGLHOIQLTNUQVUVXUZT[S[QZPXOUNQNNOLPISHUGXGZ", + "E[EPFRHTJUMVQVUUXSZP[NZLWLSMQNNPLSKVKYL\\M^", + "EYETHVKWPWSVVTXQYNYLXKVKSLPNNQMTMYN\\P_", + "OUQOOQOSQUSUUSUQSOQO QPPQPSQTSTTSTQSPQP RQQRRSSRRQ", + "", + "D`DRJR ORUR ZR`R", + "D`DUDO`O`U", + "JZRDJR RDZR", + "D`DR`R JYZY P`T`", + "D`DR`R DRRb `RRb", + "", + "", + "", + "", + "", + "KYQKNLLNKQKSLVNXQYSYVXXVYSYQXNVLSKQK", + "LXLLLXXXXLLL", + "KYRJKVYVRJ", + "LXRHLRR\\XRRH", + "JZRIPOJOOSMYRUWYUSZOTORI", + "KYRKRY KRYR", + "MWMMWW WMMW", + "MWRLRX MOWU WOMU", + "", + "", + "NVQNOONQNSOUQVSVUUVSVQUOSNQN OQOS PPPT QOQU RORU SOSU TPTT UQUS", + "NVNNNVVVVNNN OOOU POPU QOQU RORU SOSU TOTU UOUU", + "MWRLMUWURL ROOT ROUT RRQT RRST", + "LULRUWUMLR ORTU ORTO RRTS RRTQ", + "MWRXWOMORX RUUP RUOP RRSP RRQP", + "OXXROMOWXR URPO URPU RRPQ RRPS", + "LXRLNWXPLPVWRL RRRL RRLP RRNW RRVW RRXP", + "", + "", + "", + "MWRLRX OOUO MUOWQXSXUWWU", + "LXRLRX LQMOWOXQ PWTW", + "KYMNWX WNMX OLLOKQ ULXOYQ", + "I[NII[ VI[[ MM[[ WMI[ NIVI MMWM", + "I[RGRV MJWP WJMP IVL\\ [VX\\ IV[V L\\X\\", + "G[MJSV KPSL G\\[\\[RG\\", + "LXPLPPLPLTPTPXTXTTXTXPTPTLPL", + "KYYPXNVLSKQKNLLNKQKSLVNXQYSYVXXVYT YPWNUMSMQNPOOQOSPUQVSWUWWVYT", + "KYRJKVYVRJ RZYNKNRZ", + "G]PIPGQFSFTGTI GZHXJVKTLPLKMJOIUIWJXKXPYTZV\\X]Z GZ]Z QZP[Q\\S\\T[SZ", + "JZRMRS RSQ\\ RSS\\ Q\\S\\ RMQJPHNG QJNG RMSJTHVG SJVG RMNKLKJM PLLLJM RMVKXKZM TLXLZM RMPNOOOR RMPOOR RMTNUOUR RMTOUR", + "JZRIRK RNRP RSRU RYQ\\ RYS\\ Q\\S\\ RGQIPJ RGSITJ PJRITJ RKPNNOMN RKTNVOWN NOPORNTOVO RPPSNTLTKRKSLT RPTSVTXTYRYSXT NTPTRSTTVT RUPXOYMZLZKYJWJYLZ RUTXUYWZXZYYZWZYXZ MZOZRYUZWZ", + "JZRYQ\\ RYS\\ Q\\S\\ RYUZXZZXZUYTWTYRZOYMWLUMVJUHSGQGOHNJOMMLKMJOKRMTKTJUJXLZOZRY", + "JZRYQ\\ RYS\\ Q\\S\\ RYVXVVXUXRZQZLYIXHVHTGPGNHLHKIJLJQLRLUNVNXRY", + "I[IPKR LKNP RGRO XKVP [PYR", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "QSRQQRRSSRRQ", + "PTQPPQPSQTSTTSTQSPQP", + "NVQNOONQNSOUQVSVUUVSVQUOSNQN", + "MWQMONNOMQMSNUOVQWSWUVVUWSWQVOUNSMQM", + "KYQKNLLNKQKSLVNXQYSYVXXVYSYQXNVLSKQK", + "G]PGMHJJHMGPGTHWJZM\\P]T]W\\ZZ\\W]T]P\\MZJWHTGPG", + "AcPALBJCGEEGCJBLAPATBXCZE]G_JaLbPcTcXbZa]__]aZbXcTcPbLaJ_G]EZCXBTAPA", + "fRAPCMDJDGCEA>H@JAMAZB]D_G`M`PaRc RATCWDZD]C_AfHdJcMcZb]`_]`W`TaRc", + "AcRAPCMDJDGCEABGAKAPBTDXG\\L`Rc RATCWDZD]C_AbGcKcPbT`X]\\X`Rc BHbH", + "H[WPVQWRXQXPVNTMQMNNLPKSKULXNZQ[S[VZXX QMONMPLSLUMXOZQ[ LbXF", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "KYRKMX RNVX RKWX OTTT KXPX TXYX", + "JZNKNX OKOX LKSKVLWNVPSQ SKULVNUPSQ OQSQVRWTWUVWSXLX SQURVTVUUWSX", + "KYVLWKWOVLTKQKOLNMMPMSNVOWQXTXVWWU QKOMNPNSOVQX", + "JZNKNX OKOX LKSKVLWMXPXSWVVWSXLX SKULVMWPWSVVUWSX", + "JYNKNX OKOX SOSS LKVKVOUK OQSQ LXVXVTUX", + "JXNKNX OKOX SOSS LKVKVOUK OQSQ LXQX", + "K[VLWKWOVLTKQKOLNMMPMSNVOWQXTXVW QKOMNPNSOVQX TXUWVU VSVX WSWX TSYS", + "J[NKNX OKOX VKVX WKWX LKQK TKYK OQVQ LXQX TXYX", + "NWRKRX SKSX PKUK PXUX", + "LXSKSURWQX TKTUSWQXPXNWMUNTOUNV QKVK", + "JZNKNX OKOX WKOS QQVX RQWX LKQK TKYK LXQX TXYX", + "KXOKOX PKPX MKRK MXWXWTVX", + "I\\MKMX NNRX NKRU WKRX WKWX XKXX KKNK WKZK KXOX UXZX", + "JZNKNX OMVX OKVV VKVX LKOK TKXK LXPX", + "KZQKOLNMMPMSNVOWQXTXVWWVXSXPWMVLTKQK QKOMNPNSOVQX TXVVWSWPVMTK", + "JYNKNX OKOX LKSKVLWNWOVQSROR SKULVNVOUQSR LXQX", + "KZQKOLNMMPMSNVOWQXTXVWWVXSXPWMVLTKQK QKOMNPNSOVQX TXVVWSWPVMTK PWPUQTSTTUUZV[W[XZ TUUXVZW[", + "JZNKNX OKOX LKSKVLWNWOVQSROR SKULVNVOUQSR LXQX SRTSUWVXWXXW SRUSVWWX", + "KZVMWKWOVMULSKQKOLNMNOOPQQTRVSWT NNOOQPTQVRWSWVVWTXRXPWOVNTNXOV", + "KZRKRX SKSX NKMOMKXKXOWK PXUX", + "J[NKNUOWQXTXVWWUWK OKOUPWQX LKQK UKYK", + "KYMKRX NKRU WKRX KKPK TKYK", + "I[LKOX MKOT RKOX RKUX SKUT XKUX JKOK VKZK", + "KZNKVX OKWX WKNX LKQK TKYK LXQX TXYX", + "LYNKRRRX OKSR WKSRSX LKQK TKYK PXUX", + "LYVKNX WKOX OKNONKWK NXWXWTVX", + "KYRKMX RNVX RKWX OTTT KXPX TXYX", + "JZNKNX OKOX LKSKVLWNVPSQ SKULVNUPSQ OQSQVRWTWUVWSXLX SQURVTVUUWSX", + "KXOKOX PKPX MKWKWOVK MXRX", + "KYRKLX RMWX RKXX MWVW LXXX", + "JYNKNX OKOX SOSS LKVKVOUK OQSQ LXVXVTUX", + "LYVKNX WKOX OKNONKWK NXWXWTVX", + "J[NKNX OKOX VKVX WKWX LKQK TKYK OQVQ LXQX TXYX", + "KZQKOLNMMPMSNVOWQXTXVWWVXSXPWMVLTKQK QKOMNPNSOVQX TXVVWSWPVMTK QOQT TOTT QQTQ QRTR", + "NWRKRX SKSX PKUK PXUX", + "JZNKNX OKOX WKOS QQVX RQWX LKQK TKYK LXQX TXYX", + "KYRKMX RNVX RKWX KXPX TXYX", + "I\\MKMX NNRX NKRU WKRX WKWX XKXX KKNK WKZK KXOX UXZX", + "JZNKNX OMVX OKVV VKVX LKOK TKXK LXPX", + "JZMJLM XJWM PPOS UPTS MVLY XVWY MKWK MLWL PQTQ PRTR MWWW MXWX", + "KZQKOLNMMPMSNVOWQXTXVWWVXSXPWMVLTKQK QKOMNPNSOVQX TXVVWSWPVMTK", + "J[NKNX OKOX VKVX WKWX LKYK LXQX TXYX", + "JYNKNX OKOX LKSKVLWNWOVQSROR SKULVNVOUQSR LXQX", + "K[MKRQ NKSQMX MKWKXOVK NWWW MXWXXTVX", + "KZRKRX SKSX NKMOMKXKXOWK PXUX", + "KZMONLOKPKQLRORX XOWLVKUKTLSOSX MONMOLPLQMRO XOWMVLULTMSO PXUX", + "KZRKRX SKSX QNNOMQMRNTQUTUWTXRXQWOTNQN QNOONQNROTQU TUVTWRWQVOTN PKUK PXUX", + "KZNKVX OKWX WKNX LKQK TKYK LXQX TXYX", + "J[RKRX SKSX LPMONOOSQU TUVSWOXOYP MONROTQUTUVTWRXO PKUK PXUX", + "KZMVNXQXMRMONMOLQKTKVLWMXOXRTXWXXV OUNRNOOMQK TKVMWOWRVU NWPW UWWW", + "KYTKKX SMTX TKUX NTTT IXNX RXWX", + "JYPKLX QKMX NKUKWLWNVPSQ UKVLVNUPSQ OQRQTRUSUUTWQXJX RQTSTUSWQX", + "KXVLWLXKWNVLTKRKPLOMNOMRMUNWPXRXTWUU RKPMOONRNVPX", + "JYPKLX QKMX NKTKVLWNWQVTUVTWQXJX TKULVNVQUTTVSWQX", + "JYPKLX QKMX SORS NKXKWNWK OQRQ JXTXUUSX", + "JXPKLX QKMX SORS NKXKWNWK OQRQ JXOX", + "KYVLWLXKWNVLTKRKPLOMNOMRMUNWPXRXTWUVVS RKPMOONRNVPX RXTVUS SSXS", + "J[PKLX QKMX XKTX YKUX NKSK VK[K OQVQ JXOX RXWX", + "NWTKPX UKQX RKWK NXSX", + "LXUKRUQWPX VKSURWPXOXMWLUMTNUMV SKXK", + "JZPKLX QKMX YKOR RPTX SPUX NKSK VK[K JXOX RXWX", + "KXQKMX RKNX OKTK KXUXVUTX", + "I\\OKKX OMPX PKQV YKPX YKUX ZKVX MKPK YK\\K IXMX SXXX", + "JZPKLX PKTX QKTU XKTX NKQK VKZK JXNX", + "KYRKPLOMNOMRMUNWPXRXTWUVVTWQWNVLTKRK RKPMOONRNVPX RXTVUTVQVMTK", + "JYPKLX QKMX NKUKWLXMXOWQTROR UKWMWOVQTR JXOX", + "KYRKPLOMNOMRMUNWPXRXTWUVVTWQWNVLTKRK RKPMOONRNVPX RXTVUTVQVMTK OWOVPUQURVRZS[T[UZ RVSZT[", + "JZPKLX QKMX NKUKWLXMXOWQTROR UKWMWOVQTR SRTWUXVXWW SRTSUWVX JXOX", + "KZWLXLYKXNWLUKRKPLOMOOPPUSVT ONPOURVSVVUWSXPXNWMULXMWNW", + "KZTKPX UKQX PKNNOKZKYNYK NXSX", + "J[PKMUMWOXSXUWVUYK QKNUNWOX NKSK WK[K", + "KYOKPX PKQV YKPX MKRK VK[K", + "I[NKMX OKNV TKMX TKSX UKTV ZKSX LKQK XK\\K", + "KZPKTX QKUX YKLX NKSK VK[K JXOX RXWX", + "LYPKRQPX QKSQ YKSQQX NKSK VK[K NXSX", + "LYXKLX YKMX QKONPKYK LXUXVUTX", + "", + "", + "", + "", + "", + "", + "", + "KZMHX\\", + "JZRMLW RMXW", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "LZQOPPPQOQOPQOTOVQVWWXXX TOUQUWWX URRSPTOUOWPXSXTWUU RSPUPWQX", + "JYNKNX OKOX ORPPROTOVPWRWUVWTXRXPWOU TOUPVRVUUWTX LKOK", + "LXVQUQURVRVQUPSOQOOPNRNUOWQXSXUWVV QOPPOROUPWQX", + "L[VKVX WKWX VRUPSOQOOPNRNUOWQXSXUWVU QOPPOROUPWQX TKWK VXYX", + "LXOSVSVRUPSOQOOPNRNUOWQXSXUWVV USUQSO QOPPOROUPWQX", + "LWTKULUMVMVLTKRKPMPX RKQMQX NOSO NXSX", + "LYQOOQOSQUSUUSUQSOQO QOPQPSQU SUTSTQSO TPUOVO PTOUOXPYTYVZ OWPXTXVYV[T\\P\\N[NYPX", + "J[NKNX OKOX ORPPROTOVPWRWX TOUPVRVX LKOK LXQX TXYX", + "NWRKRLSLSKRK RORX SOSX POSO PXUX", + "NWSKSLTLTKSK SOSZR\\ TOTZR\\P\\O[OZPZP[O[ QOTO", + "JZNKNX OKOX WOOU RSVX SSWX LKOK TOYO LXQX TXYX", + "NWRKRX SKSX PKSK PXUX", + "F_JOJX KOKX KRLPNOPORPSRSX POQPRRRX SRTPVOXOZP[R[X XOYPZRZX HOKO HXMX PXUX XX]X", + "J[NONX OOOX ORPPROTOVPWRWX TOUPVRVX LOOO LXQX TXYX", + "LYQOOPNRNUOWQXTXVWWUWRVPTOQO QOPPOROUPWQX TXUWVUVRUPTO", + "JYNON\\ OOO\\ ORPPROTOVPWRWUVWTXRXPWOU TOUPVRVUUWTX LOOO L\\Q\\", + "KYUOU\\ VOV\\ URTPROPONPMRMUNWPXRXTWUU POOPNRNUOWPX S\\X\\", + "KXOOOX POPX PRQPSOUOVPVQUQUPVP MOPO MXRX", + "LYTOUPUQVQVPTOQOOPORQSTTVU OQQRTSVTVWTXQXOWOVPVPWQX", + "LWPKPVRXTXUWUV QKQVRX NOTO", + "J[NONUOWQXSXUWVU OOOUPWQX VOVX WOWX LOOO TOWO VXYX", + "KYNORX OORV VORX LOQO TOXO", + "I[LOOX MOOU ROOX ROUX SOUU XOUX JOOO VOZO", + "KYNOUX OOVX VONX LOQO TOXO LXPX SXXX", + "KYNORX OORV VORXP[N\\M\\L[LZMZM[L[ LOQO TOXO", + "LXUONX VOOX OONQNOVO NXVXVVUX", + "K[QOOPNQMSMUNWPXQXSWUUWRXO QOOQNSNUOWPX QOSOUPWWXX SOTPVWXXYX", + "KXRKPMOOMUK\\ QLPNNTL\\ RKTKVLVNUPRQ TKULUNTPRQ RQTRUTUVTWRXQXOWNT RQSRTTTVRX", + "KYLQNOPORPSSSXR\\ LQNPPPRQSS WOVRSXQ\\", + "KYSOQOOPNQMSMUNWPXRXTWUVVTVRUPRNQLQKRJTJUKVM QOOQNSNVPX RXTVUTUQSO QLRKTKVM", + "LXVPTOQOOPOQPRRS QOPPPQRS RSOTNUNWPXSXUW RSPTOUOWPX", + "LWRKQLQMSNVNVMSNPOOPNRNTOVPWRXSYS[R\\P\\O[ SNQOPPOROTPVRX", + "IYJRKPLONOOPOQMX MONPNQLX OQPPROTOVPVRS\\ TOUPURR\\", + "IYJSKQLPNPOQOVPX MPNQNUOWPXQXSWTVUTVQVNULTKRKQLQNRPURWS QXSVTTUQUNTK", + "NWROPVPWQXSXUWVU SOQVQWRX", + "KYOOLX POMX UOVPWPVOTORQOR ORPSRWTXVWWU ORQSSWTX", + "LXLKNKPLWX NKOLVX RPMX RPNX", + "KZOOK\\ POL\\ NUNWOXQXSWTV VOTVTWUXWXXWYU WOUVUWVX", + "JYNOMX OONUMX VRVOWOVRTUQWNXMX LOOO", + "MXRKQLQMSNVN TNQOPPPRRSUS TNROQPQRRS SSPTOUOWQXSYTZT[S\\Q\\ SSQTPUPWQX", + "KXQOOPNQMSMUNWPXRXTWUVVTVRUPSOQO QOOQNSNVPX RXTVUTUQSO", + "IZPPMX PPNX TPSX TPTX KQMOXO KQMPXP", + "JXSOQOOPNQMSJ\\ QOOQNSK\\ SOUPVRVTUVTWRXPXNWMU SOUQUTTVRX", + "K[YOQOOPNQMSMUNWPXRXTWUVVTVRUPYP QOOQNSNVPX RXTVUTUQSO", + "KZSPQX SPRX MQOOXO MQOPXP", + "JXKRLPMOOOPPPROUOWPX NOOPORNUNWPXQXSWUUVRVOUOVP", + "KZOPNQMSMUNWPXRXUWWUXRXPWOUOTPSRRUO\\ MUNVPWRWUVWTXR XQWPUPSR RUQXP\\", + "KXMONOPPS[T\\ NOOPR[T\\U\\ VOTRNYL\\", + "I[TKQ\\ UKP\\ JRKPLONOOPOVPWSWUVWT MONPNTOWPXSXUWWTXRYO", + "JZNPPPPONPMQLSLUMWNXPXQWRUSR LUNWPWRU RRRWSXUXWVXTXRWPVOVPWP RUSWUWWV", + "KZVOTVTWUXWXXWYU WOUVUWVX USUQSOQOOPNQMSMUNWPXRXTV QOOQNSNVPX", + "JXOKMR PKNRNVPX NROPQOSOUPVRVTUVTWRXPXNWMUMR SOUQUTTVRX MKPK", + "KXUPUQVQUPSOQOOPNQMSMUNWPXRXTWUV QOOQNSNVPX", + "KZWKTVTWUXWXXWYU XKUVUWVX USUQSOQOOPNQMSMUNWPXRXTV QOOQNSNVPX UKXK", + "KWNURTTSURUPSOQOOPNQMSMUNWPXRXTWUV QOOQNSNVPX", + "MXWKXLXKVKTLSNPYO[N\\ VKULTNQYP[N\\L\\L[M\\ POVO", + "KYVOTVSYR[ WOUVTYR[P\\M\\L[M[N\\ USUQSOQOOPNQMSMUNWPXRXTV QOOQNSNVPX", + "KZPKLX QKMX OQPPROTOVPVRUUUWVX TOUPURTUTWUXWXXWYU NKQK", + "MWSKSLTLTKSK NROPPOROSPSRRURWSX QORPRRQUQWRXTXUWVU", + "MWTKTLULUKTK ORPPQOSOTPTRRYQ[O\\M\\M[N\\ ROSPSRQYP[O\\", + "KXPKLX QKMX VPUQVQVPUOTORQPROR ORPSQWRXTXUWVU ORQSRWSX NKQK", + "NVSKPVPWQXSXTWUU TKQVQWRX QKTK", + "F^GRHPIOKOLPLQJX JOKPKQIX LQMPOOQOSPSQQX QORPRQPX SQTPVOXOZPZRYUYWZX XOYPYRXUXWYX[X\\W]U", + "J[KRLPMOOOPPPQNX NOOPOQMX PQQPSOUOWPWRVUVWWX UOVPVRUUUWVXXXYWZU", + "KXQOOPNQMSMUNWPXRXTWUVVTVRUPSOQO QOOQNSNVPX RXTVUTUQSO", + "JYKRLPMOOOPPPQM\\ NOOPOQL\\ PQROTOVPWRWTVVUWSXQXOVOT TOVQVTUVSX J\\O\\", + "KYVOR\\ WOS\\ USUQSOQOOPNQMSMUNWPXRXTV QOOQNSNVPX P\\U\\", + "LXMRNPOOQORPRQPX POQPQQOX RQSPUOVOWPWQVQWP", + "LYVPVQWQVPTOQOOPORQSTTVU OQQRTSVTVWTXQXOWNVOVOW", + "NWSKPVPWQXSXTWUU TKQVQWRX POUO", + "IZJRKPLONOOPORNUNWOX MONPNRMUMWOXQXSWTV VOTVTWUXWXXWYU WOUVUWVX", + "JXKRLPMOOOPPPROUOWPX NOOPORNUNWPXQXSWUUVRVOUOVP", + "H\\IRJPKOMONPNRMUMWNX LOMPMRLULWNXOXQWRV TORVRWTX UOSVSWTXUXWWYUZRZOYOZP", + "JZMRNPPOROSPSR QORPRRQUPWNXMXLWLVMVLW XPWQXQXPWOVOTPSRRURWSX QUQWRXTXVWWU", + "IYJRKPLONOOPORNUNWOX MONPNRMUMWOXQXSWTV VOTVSYR[ WOUVTYR[P\\M\\L[M[N\\", + "KYWOWPVQNVMWMX NQOOROUQ OPRPUQVQ NVOVRWUW OVRXUXVV", + "H[RKSLSMTMTLRKOKMLLNLX OKNLMNMX XKYLYMZMZLXKVKTMTX VKUMUX JOWO JXOX RXWX", + "J[UKVLWLWKQKOLNNNX QKPLONOX VOVX WOWX LOWO LXQX TXYX", + "J[WKQKOLNNNX QKPLONOX UKVLVX WKWX LOVO LXQX TXYX", + "F_PKQLQMRMRLPKMKKLJNJX MKLLKNKX YKZL[L[KUKSLRNRX UKTLSNSX ZOZX [O[X HO[O HXMX PXUX XX]X", + "F_PKQLQMRMRLPKMKKLJNJX MKLLKNKX [KUKSLRNRX UKTLSNSX YKZLZX [K[X HOZO HXMX PXUX XX]X", + "NWRORX SOSX POSO PXUX", + "", + "LXVPTOROPPOQNSNUOWQXSXUW ROPQOSOVQX OSSS", + "LYSKQLPMOONRNUOWPXRXTWUVVTWQWNVLUKSK SKQMPOOSOVPX RXTVUTVPVMUK OQVQ", + "KZTKQ\\ UKP\\ QONPMRMUNWQXTXWWXUXRWPTOQO QOOPNRNUOWQX TXVWWUWRVPTO", + "LXUPVRVQUPSOQOOPNRNTOVRX QOOQOTPVRXSYS[R\\P\\", + "", + "", + "", + "I[VKWLXLVKSKQLPMOOLYK[J\\ SKQMPOMYL[J\\H\\H[I\\ ZK[L[KYKWLVNSYR[Q\\ YKXLWNTYS[Q\\O\\O[P\\ LOYO", + "IZVKWLXLXKSKQLPMOOLYK[J\\ SKQMPOMYL[J\\H\\H[I\\ VOTVTWUXWXXWYU WOUVUWVX LOWO", + "IZVKWL XKSKQLPMOOLYK[J\\ SKQMPOMYL[J\\H\\H[I\\ WKTVTWUXWXXWYU XKUVUWVX LOVO", + "F^SKTLTM ULSKPKNLMMLOIYH[G\\ PKNMMOJYI[G\\E\\E[F\\ ZK[L\\L\\KWKUL TMSOPYO[N\\ WKUMTOQYP[N\\L\\L[M\\ ZOXVXWYX[X\\W]U [OYVYWZX IO[O", + "F^SKTLTM ULSKPKNLMMLOIYH[G\\ PKNMMOJYI[G\\E\\E[F\\ ZK[L \\KWKUL TMSOPYO[N\\ WKUMTOQYP[N\\L\\L[M\\ [KXVXWYX[X\\W]U \\KYVYWZX IOZO", + "MWNROPPOROSPSRRURWSX QORPRRQUQWRXTXUWVU", + "", + "OU", + "LX", + "LYQKOLNONTOWQXTXVWWTWOVLTKQK QKPLOOOTPWQX TXUWVTVOULTK", + "LYPNSKSX RLRX OXVX", + "LYOMONNNNMOLQKTKVLWNVPTQQROSNUNX TKULVNUPTQ NWOVPVSWVWWV PVSXVXWVWU", + "LYOMONNNNMOLQKTKVLWNVPTQ TKULVNUPTQ RQTQVRWTWUVWTXQXOWNVNUOUOV TQURVTVUUWTX", + "LYSMSX TKTX TKMTXT QXVX", + "LYOKNQ OKVK OLSLVK NQOPQOTOVPWRWUVWTXQXOWNVNUOUOV TOUPVRVUUWTX", + "LYVMVNWNWMVLTKRKPLOMNPNUOWQXTXVWWUWSVQTPQPNR RKPMOPOUPWQX TXUWVUVSUQTP", + "LYNKNO VMRTPX WKTQQX NMPKRKUM NMPLRLUMVM", + "LYQKOLNNOPQQTQVPWNVLTKQK QKPLONPPQQ TQUPVNULTK QQORNTNUOWQXTXVWWUWTVRTQ QQPROTOUPWQX TXUWVUVTURTQ", + "LYOVOUNUNVOWQXSXUWVVWSWNVLTKQKOLNNNPORQSTSWQ SXUVVSVNULTK QKPLONOPPRQS", + "NVRVQWRXSWRV", + "NVSWRXQWRVSWSYQ[", + "NVROQPRQSPRO RVQWRXSWRV", + "NVROQPRQSPRO SWRXQWRVSWSYQ[", + "NVRKQLRSSLRK RLRO RVQWRXSWRV", + "LYNNONOONONNOLQKTKVLWNWOVQSRRSRTST TKVMVPUQSR RWRXSXSWRW", + "OVRKRP SKRP", + "LXOKOP PKOP UKUP VKUP", + "MWQKPLPNQOSOTNTLSKQK", + "MWRJRP OKUO UKOO", + "KZXHM\\", + "MWUHSJQMPPPTQWSZU\\ SJRLQPQTRXSZ", + "MWOHQJSMTPTTSWQZO\\ QJRLSPSTRXQZ", + "MWPHP\\ QHQ\\ PHUH P\\U\\", + "MWSHS\\ THT\\ OHTH O\\T\\", + "LWSHRIQKQMRORPPRRTRUQWQYR[S\\ RIQM QKRO RUQY QWR[", + "MXQHRISKSMRORPTRRTRUSWSYR[Q\\ RISM SKRO RUSY SWR[", + "MWTHPRT\\", + "MWPHTRP\\", + "OURHR\\", + "MWPHP\\ THT\\", + "I[LRXR", + "I[RLRX LRXR", + "JZRMRX MRWR MXWX", + "JZRMRX MMWM MRWR", + "JZMMWW WMMW", + "NVRQQRRSSRRQ", + "I[RLQMRNSMRL LRXR RVQWRXSWRV", + "I[LPXP LTXT", + "I[WLMX LPXP LTXT", + "I[LNXN LRXR LVXV", + "JZWLMRWX", + "JZMLWRMX", + "JZWKMOWS MTWT MXWX", + "JZMKWOMS MTWT MXWX", + "H[YUWUUTTSRPQOONNNLOKQKRLTNUOUQTRSTPUOWNYN", + "JZLTLRMPOPUSWSXR LRMQOQUTWTXRXP", + "JZMSRPWS MSRQWS", + "NVSKPO SKTLPO", + "NVQKTO QKPLTO", + "LXNKOMQNSNUMVK NKONQOSOUNVK", + "NVSLRMQLRKSLSNQP", + "NVSKQMQORPSORNQO", + "NVQLRMSLRKQLQNSP", + "NVQKSMSORPQORNSO", + "", + "JZWMQMONNOMQMSNUOVQWWW", + "JZMMMSNUOVQWSWUVVUWSWM", + "JZMMSMUNVOWQWSVUUVSWMW", + "JZMWMQNOONQMSMUNVOWQWW", + "JZWMQMONNOMQMSNUOVQWWW MRUR", + "I[TOUPXRUTTU UPWRUT LRWR", + "MWRMRX OPPORLTOUP PORMTO", + "I[POOPLROTPU OPMROT MRXR", + "MWRLRW OTPURXTUUT PURWTU", + "KYVSUPSOQOOPNQMSMUNWPXRXTWUVVTWQWNVLTKQKPLQLRK QOOQNSNVPX RXTVUTVQVNULTK", + "JZLKRX MKRV XKRX LKXK NLWL", + "G[IOLORW KORX [FRX", + "I[XIXJYJYIXHVHTJSLROQUPYO[ UITKSORUQXPZN\\L\\K[KZLZL[", + "I[XIXJYJYIXHVHTJSLROQUPYO[ UITKSORUQXPZN\\L\\K[KZLZL[ QNOONQNSOUQVSVUUVSVQUOSNQN", + "H\\ZRYTWUVUTTSSQPPONNMNKOJQJRKTMUNUPTQSSPTOVNWNYOZQZR", + "JZXKLX OKPLPNOOMOLNLLMKOKSLVLXK UTTUTWUXWXXWXUWTUT", + "J[YPXPXQYQYPXOWOVPUTTVSWQXOXMWLVLTMSORRPSNSLRKPKOLONPQUWWXXXYW OXMVMTOR ONPPVWWX", + "J[UPSOQOPQPRQTSTUS UOUSVTXTYRYQXNVLSKRKOLMNLQLRMUOWRXSXVW", + "KZQHQ\\ THT\\ WLVLVMWMWLUKPKNLNNOPVSWT NNOOVRWTWVVWTXQXOWNVNUOUOVNV", + "KYPKP[ TKT[ MQWQ MUWU", + "LXTLSLSMTMTLSKQKPLPNQPTRUS PNQOTQUSUUSW QPOROTPVSXTY OTPUSWTYT[S\\Q\\P[PZQZQ[P[", + "LXRKQLRMSLRK RMRQ RQQSRVSSRQ RVR\\ POONNOOPPOTOUNVOUPTO", + "LXRMSLRKQLRMRQQRSURV RQSRQURVRZQ[R\\S[RZ POONNOOPPOTOUNVOUPTO PXOWNXOYPXTXUWVXUYTX", + "LYVKVX NKVK QQVQ NXVX", + "", + "H\\QKNLLNKQKSLVNXQYSYVXXVYSYQXNVLSKQK RQQRRSSRRQ", + "LYQKPLPMQN TKULUMTN RNPOOQORPTRUSUUTVRVQUOSNRN RURY SUSY OWVW", + "LYRKPLONOOPQRRSRUQVOVNULSKRK RRRX SRSX OUVU", + "H\\QKNLLNKQKSLVNXQYSYVXXVYSYQXNVLSKQK RKRY KRYR", + "JYRRPQOQMRLTLUMWOXPXRWSUSTRR WMRR RMWMWR RMVNWR", + "JZLLMKOKQLRNRPQRPSNT OKPLQNQQPS VKUX WKTX NTXT", + "JYNKNU OKNR NROPQOSOUPVQVTTVTXUYVYWX SOUQUTTV LKOK", + "LYONRKRQ VNSKSQ RQPROTOUPWRXSXUWVUVTURSQ RTRUSUSTRT", + "JZRKRY MKMPNRPSTSVRWPWK LMMKNM QMRKSM VMWKXM OVUV", + "JYNKNX OKOX LKSKVLWNWOVQSROR SKULVNVOUQSR LXVXVUUX", + "LYWKTKQLONNQNSOVQXTYWY WKTLRNQQQSRVTXWY", + "JZRRPQOQMRLTLUMWOXPXRWSUSTRR SLQQ WMRR XQSS", + "KYPMTW TMPW MPWT WPMT", + "J[OUMULVLXMYOYPXPVNTMRMONMOLQKTKVLWMXOXRWTUVUXVYXYYXYVXUVU NMPLULWM", + "J[OOMOLNLLMKOKPLPNNPMRMUNWOXQYTYVXWWXUXRWPUNULVKXKYLYNXOVO NWPXUXWW", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "F^KHK\\ LHL\\ XHX\\ YHY\\ HH\\H H\\O\\ U\\\\\\", + "H]KHRQJ\\ JHQQ JHYHZMXH K[X[ J\\Y\\ZWX\\", + "KYVBTDRGPKOPOTPYR]T`Vb TDRHQKPPPTQYR\\T`", + "KYNBPDRGTKUPUTTYR]P`Nb PDRHSKTPTTSYR\\P`", + "KYOBOb PBPb OBVB ObVb", + "KYTBTb UBUb NBUB NbUb", + "JYTBQEPHPJQMSOSPORSTSUQWPZP\\Q_Tb RDQGQKRN RVQYQ]R`", + "KZPBSETHTJSMQOQPURQTQUSWTZT\\S_Pb RDSGSKRN RVSYS]R`", + "KYU@RCPFOIOLPOSVTYT\\S_Ra RCQEPHPKQNTUUXU[T^RaOd", + "KYO@RCTFUIULTOQVPYP\\Q_Ra RCSETHTKSNPUOXO[P^RaUd", + "AXCRGRR` GSRa FSRb X:Rb", + "F^[CZD[E\\D\\C[BYBWCUETGSJRNPZO^N` VDUFTJRVQZP]O_MaKbIbHaH`I_J`Ia", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "H\\RFK[ RFY[ RIX[ MUVU I[O[ U[[[", + "G]LFL[ MFM[ IFUFXGYHZJZLYNXOUP UFWGXHYJYLXNWOUP MPUPXQYRZTZWYYXZU[I[ UPWQXRYTYWXYWZU[", + "G\\XIYLYFXIVGSFQFNGLIKKJNJSKVLXNZQ[S[VZXXYV QFOGMILKKNKSLVMXOZQ[", + "G]LFL[ MFM[ IFSFVGXIYKZNZSYVXXVZS[I[ SFUGWIXKYNYSXVWXUZS[", + "G\\LFL[ MFM[ SLST IFYFYLXF MPSP I[Y[YUX[", + "G[LFL[ MFM[ SLST IFYFYLXF MPSP I[P[", + "G^XIYLYFXIVGSFQFNGLIKKJNJSKVLXNZQ[S[VZXX QFOGMILKKNKSLVMXOZQ[ XSX[ YSY[ US\\S", + "F^KFK[ LFL[ XFX[ YFY[ HFOF UF\\F LPXP H[O[ U[\\[", + "MXRFR[ SFS[ OFVF O[V[", + "KZUFUWTZR[P[NZMXMVNUOVNW TFTWSZR[ QFXF", + "F\\KFK[ LFL[ YFLS QOY[ POX[ HFOF UF[F H[O[ U[[[", + "I[NFN[ OFO[ KFRF K[Z[ZUY[", + "F_KFK[ LFRX KFR[ YFR[ YFY[ ZFZ[ HFLF YF]F H[N[ V[][", + "G^LFL[ MFYY MHY[ YFY[ IFMF VF\\F I[O[", + "G]QFNGLIKKJOJRKVLXNZQ[S[VZXXYVZRZOYKXIVGSFQF QFOGMILKKOKRLVMXOZQ[ S[UZWXXVYRYOXKWIUGSF", + "G]LFL[ MFM[ IFUFXGYHZJZMYOXPUQMQ UFWGXHYJYMXOWPUQ I[P[", + "G]QFNGLIKKJOJRKVLXNZQ[S[VZXXYVZRZOYKXIVGSFQF QFOGMILKKOKRLVMXOZQ[ S[UZWXXVYRYOXKWIUGSF NYNXOVQURUTVUXV_W`Y`Z^Z] UXV\\W^X_Y_Z^", + "G]LFL[ MFM[ IFUFXGYHZJZLYNXOUPMP UFWGXHYJYLXNWOUP I[P[ RPTQURXYYZZZ[Y TQUSWZX[Z[[Y[X", + "H\\XIYFYLXIVGSFPFMGKIKKLMMNOOUQWRYT KKMMONUPWQXRYTYXWZT[Q[NZLXKUK[LX", + "I\\RFR[ SFS[ LFKLKFZFZLYF O[V[", + "F^KFKULXNZQ[S[VZXXYUYF LFLUMXOZQ[ HFOF VF\\F", + "H\\KFR[ LFRX YFR[ IFOF UF[F", + "F^JFN[ KFNV RFN[ RFV[ SFVV ZFV[ GFNF WF]F", + "H\\KFX[ LFY[ YFK[ IFOF UF[F I[O[ U[[[", + "H]KFRQR[ LFSQS[ ZFSQ IFOF VF\\F O[V[", + "H\\XFK[ YFL[ LFKLKFYF K[Y[YUX[", + "H\\RFK[ RFY[ RIX[ MUVU I[O[ U[[[", + "G]LFL[ MFM[ IFUFXGYHZJZLYNXOUP UFWGXHYJYLXNWOUP MPUPXQYRZTZWYYXZU[I[ UPWQXRYTYWXYWZU[", + "I[NFN[ OFO[ KFZFZLYF K[R[", + "H\\RFJ[ RFZ[ RIY[ KZYZ J[Z[", + "G\\LFL[ MFM[ SLST IFYFYLXF MPSP I[Y[YUX[", + "H\\XFK[ YFL[ LFKLKFYF K[Y[YUX[", + "F^KFK[ LFL[ XFX[ YFY[ HFOF UF\\F LPXP H[O[ U[\\[", + "G]QFNGLIKKJOJRKVLXNZQ[S[VZXXYVZRZOYKXIVGSFQF QFOGMILKKOKRLVMXOZQ[ S[UZWXXVYRYOXKWIUGSF OMOT UMUT OPUP OQUQ", + "MXRFR[ SFS[ OFVF O[V[", + "F\\KFK[ LFL[ YFLS QOY[ POX[ HFOF UF[F H[O[ U[[[", + "H\\RFK[ RFY[ RIX[ I[O[ U[[[", + "F_KFK[ LFRX KFR[ YFR[ YFY[ ZFZ[ HFLF YF]F H[N[ V[][", + "G^LFL[ MFYY MHY[ YFY[ IFMF VF\\F I[O[", + "G]KEJJ ZEYJ ONNS VNUS KWJ\\ ZWY\\ KGYG KHYH OPUP OQUQ KYYY KZYZ", + "G]QFNGLIKKJOJRKVLXNZQ[S[VZXXYVZRZOYKXIVGSFQF QFOGMILKKOKRLVMXOZQ[ S[UZWXXVYRYOXKWIUGSF", + "F^KFK[ LFL[ XFX[ YFY[ HF\\F H[O[ U[\\[", + "G]LFL[ MFM[ IFUFXGYHZJZMYOXPUQMQ UFWGXHYJYMXOWPUQ I[P[", + "H]KFRPJ[ JFQP JFYFZLXF KZXZ J[Y[ZUX[", + "I\\RFR[ SFS[ LFKLKFZFZLYF O[V[", + "I\\KKKILGMFOFPGQIRMR[ KIMGOGQI ZKZIYGXFVFUGTISMS[ ZIXGVGTI O[V[", + "H]RFR[ SFS[ PKMLLMKOKRLTMUPVUVXUYTZRZOYMXLUKPK PKNLMMLOLRMTNUPV UVWUXTYRYOXMWLUK OFVF O[V[", + "H\\KFX[ LFY[ YFK[ IFOF UF[F I[O[ U[[[", + "G^RFR[ SFS[ IMJLLMMQNSOTQU JLKMLQMSNTQUTUWTXSYQZM[L TUVTWSXQYM[L\\M OFVF O[V[", + "G]JXK[O[MWKSJPJLKIMGPFTFWGYIZLZPYSWWU[Y[ZX MWLTKPKLLINGPF TFVGXIYLYPXTWW KZNZ VZYZ", + "H\\UFH[ UFV[ THU[ LUUU F[L[ R[X[", + "F^OFI[ PFJ[ LFWFZG[I[KZNYOVP WFYGZIZKYNXOVP MPVPXQYSYUXXVZR[F[ VPWQXSXUWXUZR[", + "H]ZH[H\\F[L[JZHYGWFTFQGOIMLLOKSKVLYMZP[S[UZWXXV TFRGPINLMOLSLVMYNZP[", + "F]OFI[ PFJ[ LFUFXGYHZKZOYSWWUYSZO[F[ UFWGXHYKYOXSVWTYRZO[", + "F]OFI[ PFJ[ TLRT LF[FZLZF MPSP F[U[WVT[", + "F\\OFI[ PFJ[ TLRT LF[FZLZF MPSP F[M[", + "H^ZH[H\\F[L[JZHYGWFTFQGOIMLLOKSKVLYMZP[R[UZWXYT TFRGPINLMOLSLVMYNZP[ R[TZVXXT UT\\T", + "E_NFH[ OFI[ [FU[ \\FV[ KFRF XF_F LPXP E[L[ R[Y[", + "LYUFO[ VFP[ RFYF L[S[", + "I[XFSWRYQZO[M[KZJXJVKULVKW WFRWQYO[ TF[F", + "F]OFI[ PFJ[ ]FLS SOW[ ROV[ LFSF YF_F F[M[ S[Y[", + "H\\QFK[ RFL[ NFUF H[W[YUV[", + "E`NFH[ NFO[ OFPY \\FO[ \\FV[ ]FW[ KFOF \\F`F E[K[ S[Z[", + "F_OFI[ OFVX OIV[ \\FV[ LFOF YF_F F[L[", + "G]SFPGNILLKOJSJVKYLZN[Q[TZVXXUYRZNZKYHXGVFSF SFQGOIMLLOKSKVLYN[ Q[SZUXWUXRYNYKXHVF", + "F]OFI[ PFJ[ LFXF[G\\I\\K[NYPUQMQ XFZG[I[KZNXPUQ F[M[", + "G]SFPGNILLKOJSJVKYLZN[Q[TZVXXUYRZNZKYHXGVFSF SFQGOIMLLOKSKVLYN[ Q[SZUXWUXRYNYKXHVF LYLXMVOUPURVSXS_T`V`W^W] SXT^U_V_W^", + "F^OFI[ PFJ[ LFWFZG[I[KZNYOVPMP WFYGZIZKYNXOVP RPTQURVZW[Y[ZYZX URWYXZYZZY F[M[", + "G^ZH[H\\F[L[JZHYGVFRFOGMIMKNMONVRXT MKOMVQWRXTXWWYVZS[O[LZKYJWJUI[JYKY", + "H]UFO[ VFP[ OFLLNF]F\\L\\F L[S[", + "F_NFKQJUJXKZN[R[UZWXXU\\F OFLQKUKXLZN[ KFRF YF_F", + "H\\NFO[ OFPY \\FO[ LFRF XF^F", + "E_MFK[ NFLY UFK[ UFS[ VFTY ]FS[ JFQF ZF`F", + "G]NFU[ OFV[ \\FH[ LFRF XF^F F[L[ R[X[", + "H]NFRPO[ OFSPP[ ]FSP LFRF YF_F L[S[", + "G][FH[ \\FI[ OFLLNF\\F H[V[XUU[", + "H\\KILKXWYYY[ LLXX KIKKLMXYY[ PPLTKVKXLZK[ KVMZ LTLVMXMZK[ SSXN VIVLWNYNYLWKVI VIWLYN", + "H\\QIK[ SIY[ RIX[ MUVU I[O[ U[[[ QBOCNENGOIQJSJUIVGVEUCSBQB", + "", + "", + "", + "", + "", + "G]IB[b", + "F^RJIZ RJ[Z", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "I]NONPMPMONNPMTMVNWOXQXXYZZ[ WOWXXZZ[[[ WQVRPSMTLVLXMZP[S[UZWX PSNTMVMXNZP[", + "G\\LFL[ MFM[ MPONQMSMVNXPYSYUXXVZS[Q[OZMX SMUNWPXSXUWXUZS[ IFMF", + "H[WPVQWRXQXPVNTMQMNNLPKSKULXNZQ[S[VZXX QMONMPLSLUMXOZQ[", + "H]WFW[ XFX[ WPUNSMQMNNLPKSKULXNZQ[S[UZWX QMONMPLSLUMXOZQ[ TFXF W[[[", + "H[LSXSXQWOVNTMQMNNLPKSKULXNZQ[S[VZXX WSWPVN QMONMPLSLUMXOZQ[", + "KXUGTHUIVHVGUFSFQGPIP[ SFRGQIQ[ MMUM M[T[", + "I\\QMONNOMQMSNUOVQWSWUVVUWSWQVOUNSMQM ONNPNTOV UVVTVPUN VOWNYMYNWN NUMVLXLYM[P\\U\\X]Y^ LYMZP[U[X\\Y^Y_XaUbObLaK_K^L\\O[", + "G]LFL[ MFM[ MPONRMTMWNXPX[ TMVNWPW[ IFMF I[P[ T[[[", + "MXRFQGRHSGRF RMR[ SMS[ OMSM O[V[", + "MXSFRGSHTGSF TMT_SaQbObNaN`O_P`Oa SMS_RaQb PMTM", + "G\\LFL[ MFM[ WMMW RSX[ QSW[ IFMF TMZM I[P[ T[Z[", + "MXRFR[ SFS[ OFSF O[V[", + "BcGMG[ HMH[ HPJNMMOMRNSPS[ OMQNRPR[ SPUNXMZM]N^P^[ ZM\\N]P][ DMHM D[K[ O[V[ Z[a[", + "G]LML[ MMM[ MPONRMTMWNXPX[ TMVNWPW[ IMMM I[P[ T[[[", + "H\\QMNNLPKSKULXNZQ[S[VZXXYUYSXPVNSMQM QMONMPLSLUMXOZQ[ S[UZWXXUXSWPUNSM", + "G\\LMLb MMMb MPONQMSMVNXPYSYUXXVZS[Q[OZMX SMUNWPXSXUWXUZS[ IMMM IbPb", + "H\\WMWb XMXb WPUNSMQMNNLPKSKULXNZQ[S[UZWX QMONMPLSLUMXOZQ[ Tb[b", + "IZNMN[ OMO[ OSPPRNTMWMXNXOWPVOWN KMOM K[R[", + "J[WOXMXQWOVNTMPMNNMOMQNRPSUUWVXW MPNQPRUTWUXVXYWZU[Q[OZNYMWM[NY", + "KZPFPWQZS[U[WZXX QFQWRZS[ MMUM", + "G]LMLXMZP[R[UZWX MMMXNZP[ WMW[ XMX[ IMMM TMXM W[[[", + "I[LMR[ MMRY XMR[ JMPM TMZM", + "F^JMN[ KMNX RMN[ RMV[ SMVX ZMV[ GMNM WM]M", + "H\\LMW[ MMX[ XML[ JMPM TMZM J[P[ T[Z[", + "H[LMR[ MMRY XMR[P_NaLbKbJaK`La JMPM TMZM", + "I[WML[ XMM[ MMLQLMXM L[X[XWW[", + "G^QMNNLPKRJUJXKZN[P[RZUWWTYPZM QMONMPLRKUKXLZN[ QMSMUNVPXXYZZ[ SMTNUPWXXZZ[[[", + "G\\TFQGOIMMLPKTJZIb TFRGPINMMPLTKZJb TFVFXGYHYKXMWNTOPO VFXHXKWMVNTO POTPVRWTWWVYUZR[P[NZMYLV POSPURVTVWUYTZR[", + "H\\IPKNMMOMQNROSRSVRZOb JOLNPNRO ZMYPXRSYP^Nb YMXPWRSY", + "I\\VNTMRMONMQLTLWMYNZP[R[UZWWXTXQWOSJRHRFSEUEWFYH RMPNNQMTMXNZ R[TZVWWTWPVNTKSISGTFVFYH", + "I[XPVNTMPMNNNPPRSS PMONOPQRSS SSNTLVLXMZP[S[UZWX SSOTMVMXNZP[", + "I[TFRGQHQIRJUKZKZJWKSMPOMRLULWMYP[S]T_TaSbQbPa ULQONRMUMWNYP[", + "G]HQIOKMNMONOPNTL[ MMNNNPMTK[ NTPPRNTMVMXNYOYRXWUb VMXOXRWWTb", + "F]GQHOJMMMNNNPMUMXNZO[ LMMNMPLULXMZO[Q[SZUXWUXRYMYIXGVFTFRHRJSMUPWRZT SZUWVUWRXMXIWGVF", + "LXRMPTOXOZP[S[UYVW SMQTPXPZQ[", + "H\\NMJ[ OMK[ XMYNZNYMWMUNQROSMS OSQTSZT[ OSPTRZS[U[WZYW", + "H\\KFMFOGPHQJWXXZY[ MFOHPJVXWZY[Z[ RMJ[ RMK[", + "F]MMGb NMHb MPLVLYN[P[RZTXVU XMUXUZV[Y[[Y\\W YMVXVZW[", + "H\\NML[ OMNSMXL[ YMXQVU ZMYPXRVUTWQYOZL[ KMOM", + "IZTFRGQHQIRJUKXK UKQLOMNONQPSSTVT UKRLPMOOOQQSST STOUMVLXLZN\\S^T_TaRbPb STPUNVMXMZO\\S^", + "I[RMONMQLTLWMYNZP[R[UZWWXTXQWOVNTMRM RMPNNQMTMXNZ R[TZVWWTWPVN", + "G]PNL[ PNM[ VNV[ VNW[ IPKNNM[M IPKONN[N", + "H[LVMYNZP[R[UZWWXTXQWOVNTMRMONMQLTHb R[TZVWWTWPVN RMPNNQMTIb", + "H][MQMNNLQKTKWLYMZO[Q[TZVWWTWQVOUNSM QMONMQLTLXMZ Q[SZUWVTVPUN UN[N", + "H\\SNP[ SNQ[ JPLNOMZM JPLOONZN", + "H\\IQJOLMOMPNPPNVNYP[ NMONOPMVMYNZP[Q[TZVXXUYRYOXMWNXOYR XUYO", + "G]ONMOKQJTJWKYLZN[Q[TZWXYUZRZOXMVMTORSPXMb JWLYNZQZTYWWYU ZOXNVNTPRSPYNb", + "I[KMMMONPPU_VaWb MMNNOPT_UaWbYb ZMYOWRM]K`Jb", + "F]UFOb VFNb GQHOJMMMNNNPMUMXOZRZTYWVYS LMMNMPLULXMZO[R[TZVXXUYS[M", + "F]JQLOONNMLNJQITIWJZK[M[OZQWRT IWJYKZMZOYQW QTQWRZS[U[WZYWZTZQYNXMWNYOZQ QWRYSZUZWYYW", + "H]XMVTUXUZV[Y[[Y\\W YMWTVXVZW[ VTVQUNSMQMNNLQKTKWLYMZO[Q[SZUWVT QMONMQLTLXMZ", + "H[PFLSLVMYNZ QFMS MSNPPNRMTMVNWOXQXTWWUZR[P[NZMWMS VNWPWTVWTZR[ MFQF", + "I[WPWQXQXPWNUMRMONMQLTLWMYNZP[R[UZWW RMPNNQMTMXNZ", + "H]ZFVTUXUZV[Y[[Y\\W [FWTVXVZW[ VTVQUNSMQMNNLQKTKWLYMZO[Q[SZUWVT QMONMQLTLXMZ WF[F", + "I[MVQUTTWRXPWNUMRMONMQLTLWMYNZP[R[UZWX RMPNNQMTMXNZ", + "KZZGYHZI[H[GZFXFVGUHTJSMP[O_Na XFVHUJTNRWQ[P^O`NaLbJbIaI`J_K`Ja OMYM", + "H\\YMU[T^RaObLbJaI`I_J^K_J` XMT[S^QaOb VTVQUNSMQMNNLQKTKWLYMZO[Q[SZUWVT QMONMQLTLXMZ", + "H]PFJ[ QFK[ MTOPQNSMUMWNXOXQVWVZW[ UMWOWQUWUZV[Y[[Y\\W MFQF", + "LYUFTGUHVGUF MQNOPMSMTNTQRWRZS[ RMSNSQQWQZR[U[WYXW", + "LYVFUGVHWGVF NQOOQMTMUNUQR[Q^P`OaMbKbJaJ`K_L`Ka SMTNTQQ[P^O`Mb", + "H\\PFJ[ QFK[ XNWOXPYOYNXMWMUNQROSMS OSQTSZT[ OSPTRZS[U[WZYW MFQF", + "MYUFQTPXPZQ[T[VYWW VFRTQXQZR[ RFVF", + "AbBQCOEMHMINIPHTF[ GMHNHPGTE[ HTJPLNNMPMRNSOSQP[ PMRORQO[ RTTPVNXMZM\\N]O]Q[W[Z\\[ ZM\\O\\QZWZZ[[^[`YaW", + "F]GQHOJMMMNNNPMTK[ LMMNMPLTJ[ MTOPQNSMUMWNXOXQVWVZW[ UMWOWQUWUZV[Y[[Y\\W", + "I[RMONMQLTLWMYNZP[R[UZWWXTXQWOVNTMRM RMPNNQMTMXNZ R[TZVWWTWPVN", + "G\\HQIOKMNMONOPNTJb MMNNNPMTIb NTOQQNSMUMWNXOYQYTXWVZS[Q[OZNWNT WNXPXTWWUZS[ FbMb", + "H\\XMRb YMSb VTVQUNSMQMNNLQKTKWLYMZO[Q[SZUWVT QMONMQLTLXMZ ObVb", + "IZJQKOMMPMQNQPPTN[ OMPNPPOTM[ PTRPTNVMXMYNYOXPWOXN", + "J[XOXPYPYOXNUMRMONNONQORVVWW NPOQVUWVWYVZS[P[MZLYLXMXMY", + "KYTFPTOXOZP[S[UYVW UFQTPXPZQ[ NMWM", + "F]GQHOJMMMNNNQLWLYN[ LMMNMQKWKYLZN[P[RZTXVT XMVTUXUZV[Y[[Y\\W YMWTVXVZW[", + "H\\IQJOLMOMPNPQNWNYP[ NMONOQMWMYNZP[Q[TZVXXUYQYMXMYO", + "C`DQEOGMJMKNKQIWIYK[ IMJNJQHWHYIZK[M[OZQXRV TMRVRYSZU[W[YZ[X\\V]R]M\\M]O UMSVSYU[", + "H\\KQMNOMRMSOSR QMRORRQVPXNZL[K[JZJYKXLYKZ QVQYR[U[WZYW YNXOYPZOZNYMXMVNTPSRRVRYS[", + "G\\HQIOKMNMONOQMWMYO[ MMNNNQLWLYMZO[Q[SZUXWT ZMV[U^SaPbMbKaJ`J_K^L_K` YMU[T^RaPb", + "H\\YMXOVQNWLYK[ LQMOOMRMVO MOONRNVOXO LYNYRZUZWY NYR[U[WYXW", + "G^VGUHVIWHWGUFRFOGMILLL[ RFPGNIMLM[ \\G[H\\I]H]G\\FZFXGWIW[ ZFYGXIX[ IM[M I[P[ T[[[", + "G]WGVHWIXHWGUFRFOGMILLL[ RFPGNIMLM[ WMW[ XMX[ IMXM I[P[ T[[[", + "G]VGUHVIWHWGUF XFRFOGMILLL[ RFPGNIMLM[ WHW[ XFX[ IMWM I[P[ T[[[", + "BcRGQHRISHRGPFMFJGHIGLG[ MFKGIIHLH[ ]G\\H]I^H]G[FXFUGSIRLR[ XFVGTISLS[ ]M][ ^M^[ DM^M D[K[ O[V[ Z[a[", + "BcRGQHRISHRGPFMFJGHIGLG[ MFKGIIHLH[ \\G[H\\I]H]G[F ^FXFUGSIRLR[ XFVGTISLS[ ]H][ ^F^[ DM]M D[K[ O[V[ Z[a[", + "MXRMR[ SMS[ OMSM O[V[", + "", + "IZWNUMRMONMPLSLVMYNZQ[T[VZ RMPNNPMSMVNYOZQ[ MTUT", + "I\\TFQGOJNLMOLTLXMZO[Q[TZVWWUXRYMYIXGVFTF TFRGPJOLNOMTMXNZO[ Q[SZUWVUWRXMXIWGVF NPWP", + "G]UFOb VFNb QMMNKPJSJVKXMZP[S[WZYXZUZRYPWNTMQM QMNNLPKSKVLXNZP[ S[VZXXYUYRXPVNTM", + "I[TMVNXPXOWNTMQMNNMOLQLSMUOWSZ QMONNOMQMSNUSZT\\T^S_Q_", + "", + "", + "G]LMKNJPJRKUOYP[ JRKTOXP[P]O`MbLbKaJ_J\\KXMTOQRNTMVMYNZPZTYXWZU[T[SZSXTWUXTY VMXNYPYTXXWZ", + "E_YGXHYIZHYGWFTFQGOINKMNLRJ[I_Ha TFRGPIOKNNLWK[J^I`HaFbDbCaC`D_E`Da _G^H_I`H`G_F]F[GZHYJXMU[T_Sa ]F[HZJYNWWV[U^T`SaQbObNaN`O_P`Oa IM^M", + "F^[GZH[I\\H[GXFUFRGPIOKNNMRK[J_Ia UFSGQIPKONMWL[K^J`IaGbEbDaD`E_F`Ea YMWTVXVZW[Z[\\Y]W ZMXTWXWZX[ JMZM", + "F^YGXHYIZHZGXF \\FUFRGPIOKNNMRK[J_Ia UFSGQIPKONMWL[K^J`IaGbEbDaD`E_F`Ea [FWTVXVZW[Z[\\Y]W \\FXTWXWZX[ JMYM", + "@cTGSHTIUHTGRFOFLGJIIKHNGRE[D_Ca OFMGKIJKINGWF[E^D`CaAb?b>a>`?_@`?a `G_H`IaH`G]FZFWGUITKSNRRP[O_Na ZFXGVIUKTNRWQ[P^O`NaLbJbIaI`J_K`Ja ^M\\T[X[Z\\[_[aYbW _M]T\\X\\Z][ DM_M", + "@cTGSHTIUHTGRFOFLGJIIKHNGRE[D_Ca OFMGKIJKINGWF[E^D`CaAb?b>a>`?_@`?a ^G]H^I_H_G]F aFZFWGUITKSNRRP[O_Na ZFXGVIUKTNRWQ[P^O`NaLbJbIaI`J_K`Ja `F\\T[X[Z\\[_[aYbW aF]T\\X\\Z][ DM^M", + "LYMQNOPMSMTNTQRWRZS[ RMSNSQQWQZR[U[WYXW", + "", + "NV", + "JZ", + "H\\QFNGLJKOKRLWNZQ[S[VZXWYRYOXJVGSFQF QFOGNHMJLOLRMWNYOZQ[ S[UZVYWWXRXOWJVHUGSF", + "H\\NJPISFS[ RGR[ N[W[", + "H\\LJMKLLKKKJLHMGPFTFWGXHYJYLXNUPPRNSLUKXK[ TFVGWHXJXLWNTPPR KYLXNXSZVZXYYX NXS[W[XZYXYV", + "H\\LJMKLLKKKJLHMGPFTFWGXIXLWNTOQO TFVGWIWLVNTO TOVPXRYTYWXYWZT[P[MZLYKWKVLUMVLW WQXTXWWYVZT[", + "H\\THT[ UFU[ UFJUZU Q[X[", + "H\\MFKP KPMNPMSMVNXPYSYUXXVZS[P[MZLYKWKVLUMVLW SMUNWPXSXUWXUZS[ MFWF MGRGWF", + "H\\WIVJWKXJXIWGUFRFOGMILKKOKULXNZQ[S[VZXXYUYTXQVOSNRNOOMQLT RFPGNIMKLOLUMXOZQ[ S[UZWXXUXTWQUOSN", + "H\\KFKL KJLHNFPFUIWIXHYF LHNGPGUI YFYIXLTQSSRVR[ XLSQRSQVQ[", + "H\\PFMGLILLMNPOTOWNXLXIWGTFPF PFNGMIMLNNPO TOVNWLWIVGTF POMPLQKSKWLYMZP[T[WZXYYWYSXQWPTO PONPMQLSLWMYNZP[ T[VZWYXWXSWQVPTO", + "H\\XMWPURRSQSNRLPKMKLLINGQFSFVGXIYLYRXVWXUZR[O[MZLXLWMVNWMX QSORMPLMLLMIOGQF SFUGWIXLXRWVVXTZR[", + "MWRYQZR[SZRY", + "MWR[QZRYSZS\\R^Q_", + "MWRMQNROSNRM RYQZR[SZRY", + "MWRMQNROSNRM R[QZRYSZS\\R^Q_", + "MWRFQHRTSHRF RHRN RYQZR[SZRY", + "I[MJNKMLLKLJMHNGPFSFVGWHXJXLWNVORQRT SFUGVHWJWLVNTP RYQZR[SZRY", + "NVRFQM SFQM", + "JZNFMM OFMM VFUM WFUM", + "KYQFOGNINKOMQNSNUMVKVIUGSFQF", + "JZRFRR MIWO WIMO", + "G][BIb", + "KYVBTDRGPKOPOTPYR]T`Vb TDRHQKPPPTQYR\\T`", + "KYNBPDRGTKUPUTTYR]P`Nb PDRHSKTPTTSYR\\P`", + "KYOBOb PBPb OBVB ObVb", + "KYTBTb UBUb NBUB NbUb", + "JYTBQEPHPJQMSOSPORSTSUQWPZP\\Q_Tb RDQGQKRN RVQYQ]R`", + "KZPBSETHTJSMQOQPURQTQUSWTZT\\S_Pb RDSGSKRN RVSYS]R`", + "KYUBNRUb", + "KYOBVROb", + "NVRBRb", + "KYOBOb UBUb", + "E_IR[R", + "E_RIR[ IR[R", + "F^RJR[ JRZR J[Z[", + "F^RJR[ JJZJ JRZR", + "G]KKYY YKKY", + "MWQQQSSSSQQQ RQRS QRSR", + "E_RIQJRKSJRI IR[R RYQZR[SZRY", + "E_IO[O IU[U", + "E_YIK[ IO[O IU[U", + "E_IM[M IR[R IW[W", + "F^ZIJRZ[", + "F^JIZRJ[", + "F^ZFJMZT JVZV J[Z[", + "F^JFZMJT JVZV J[Z[", + "F_[WYWWVUTRPQOONMNKOJQJSKUMVOVQURTUPWNYM[M", + "F^IUISJPLONOPPTSVTXTZS[Q ISJQLPNPPQTTVUXUZT[Q[O", + "G]JTROZT JTRPZT", + "LXTFOL TFUGOL", + "LXPFUL PFOGUL", + "H\\KFLHNJQKSKVJXHYF KFLINKQLSLVKXIYF", + "MWRHQGRFSGSIRKQL", + "MWSFRGQIQKRLSKRJ", + "MWRHSGRFQGQIRKSL", + "MWQFRGSISKRLQKRJ", + "E[HMLMRY KMR[ [BR[", + "F^ZJSJOKMLKNJQJSKVMXOYSZZZ", + "F^JJJQKULWNYQZSZVYXWYUZQZJ", + "F^JJQJUKWLYNZQZSYVWXUYQZJZ", + "F^JZJSKOLMNKQJSJVKXMYOZSZZ", + "F^ZJSJOKMLKNJQJSKVMXOYSZZZ JRVR", + "E_XP[RXT UMZRUW IRZR", + "JZPLRITL MORJWO RJR[", + "E_LPIRLT OMJROW JR[R", + "JZPXR[TX MURZWU RIRZ", + "I\\XRWOVNTMRMONMQLTLWMYNZP[R[UZWXXUYPYKXHWGUFRFPGOHOIPIPH RMPNNQMTMXNZ R[TZVXWUXPXKWHUF", + "H\\JFR[ KFRY ZFR[ JFZF KGYG", + "AbDMIMRY HNR[ b:R[", + "F^[CZD[E\\D\\C[BYBWCUETGSJRNPZO^N` VDUFTJRVQZP]O_MaKbIbHaH`I_J`Ia", + "F^[CZD[E\\D\\C[BYBWCUETGSJRNPZO^N` VDUFTJRVQZP]O_MaKbIbHaH`I_J`Ia QKNLLNKQKSLVNXQYSYVXXVYSYQXNVLSKQK", + "F_\\S[UYVWVUUTTQPPONNLNJOIQISJULVNVPUQTTPUOWNYN[O\\Q\\S", + "F^[FI[ NFPHPJOLMMKMIKIIJGLFNFPGSHVHYG[F WTUUTWTYV[X[ZZ[X[VYTWT", + "F_[NZO[P\\O\\N[MZMYNXPVUTXRZP[M[JZIXIUJSPORMSKSIRGPFNGMIMKNNPQUXWZZ[[[\\Z\\Y M[KZJXJUKSMQ MKNMVXXZZ[", + "E`WNVLTKQKOLNMMPMSNUPVSVUUVS QKOMNPNSOUPV WKVSVUXVZV\\T]Q]O\\L[JYHWGTFQFNGLHJJILHOHRIUJWLYNZQ[T[WZYYZX XKWSWUXV", + "H\\PBP_ TBT_ XIWJXKYJYIWGTFPFMGKIKKLMMNOOUQWRYT KKMMONUPWQXRYTYXWZT[P[MZKXKWLVMWLX", + "G]OFOb UFUb JQZQ JWZW", + "JZUITJUKVJVIUGSFQFOGNINKOMQOVR OMTPVRWTWVVXTZ PNNPMRMTNVPXU[ NVSYU[V]V_UaSbQbOaN_N^O]P^O_", + "JZRFQHRJSHRF RFRb RQQTRbSTRQ LMNNPMNLLM LMXM TMVNXMVLTM", + "JZRFQHRJSHRF RFRT RPQRSVRXQVSRRP RTRb R^Q`RbS`R^ LMNNPMNLLM LMXM TMVNXMVLTM L[N\\P[NZL[ L[X[ T[V\\X[VZT[", + "I\\XFX[ KFXF PPXP K[X[", + "", + "E`QFNGKIILHOHRIUKXNZQ[T[WZZX\\U]R]O\\LZIWGTFQF ROQPQQRRSRTQTPSORO RPRQSQSPRP", + "J[PFNGOIQJ PFOGOI UFWGVITJ UFVGVI QJOKNLMNMQNSOTQUTUVTWSXQXNWLVKTJQJ RUR[ SUS[ NXWX", + "I\\RFOGMILLLMMPORRSSSVRXPYMYLXIVGSFRF RSR[ SSS[ NWWW", + "D`PFMGJIHLGOGSHVJYM[P\\T\\W[ZY\\V]S]O\\LZIWGTFPF RFR\\ GQ]Q", + "G`PMMNKPJSJTKWMYPZQZTYVWWTWSVPTNQMPM ]GWG[HUN ]G]M\\IVO \\HVN", + "F\\IIJGLFOFQGRIRLQOPQNSKU OFPGQIQMPPNS VFT[ WFS[ KUYU", + "I\\MFMU NFMQ MQNOONQMTMWNXPXRWTUV TMVNWPWRTXTZU[W[YY KFNF", + "I\\RNOOMQLTLUMXOZR[S[VZXXYUYTXQVOSNRN RHNJRFRN SHWJSFSN RSQTQURVSVTUTTSSRS RTRUSUSTRT", + "G^QHRFR[ THSFS[ JHKFKMLPNRQSRS MHLFLNMQ [HZFZMYPWRTSSS XHYFYNXQ NWWW", + "G]LFL[ MFM[ IFUFXGYHZJZMYOXPUQMQ UFWGXHYJYMXOWPUQ I[Y[YVX[", + "H[YGUGQHNJLMKPKSLVNYQ[U\\Y\\ YGVHSJQMPPPSQVSYV[Y\\", + "F_OQMQKRJSIUIWJYKZM[O[QZRYSWSURSQROQ SHPQ ZJRR \\QST", + "H\\OKUY UKOY KOYU YOKU", + "F^NVLUKUIVHXHYI[K\\L\\N[OYOXNVKRJOJMKJMHPGTGWHYJZMZOYRVVUXUYV[X\\Y\\[[\\Y\\X[VYUXUVV JMKKMIPHTHWIYKZM", + "F^NMLNKNIMHKHJIHKGLGNHOJOKNMKQJTJVKYM[P\\T\\W[YYZVZTYQVMUKUJVHXGYG[H\\J\\K[MYNXNVM JVKXMZP[T[WZYXZV", + "I[KYYK QLULYKXOXS ULXLXO", + "I[YKKY LQLUKYOXSX LULXOX", + "I[YYKK SLOLKKLOLS OLLLLO", + "I[KKYY QXUXYYXUXQ UXXXXU", + "", + "F_JMILIJJHLGNGPHQIRKSP IJKHMHOIPJQLRPR[ [M\\L\\J[HYGWGUHTISKRP \\JZHXHVIUJTLSPS[", + "F^IGJKKMMOPPTPWOYMZK[G IGJJKLMNPOTOWNYLZJ[G PONPMQLSLVMXOZQ[S[UZWXXVXSWQVPTO PPNQMSMVNY VYWVWSVQTP", + "F^MJMV NKNU VKVU WJWV IGKIMJPKTKWJYI[G IYKWMVPUTUWVYW[Y", + "F^[ILIJJILINJPLQNQPPQNQLPJ[J IMJOKPMQ QMPKOJMI IXXXZW[U[SZQXPVPTQSSSUTWIW [TZRYQWP STTVUWWX", + "F]OUMTLTJUIWIXJZL[M[OZPXPWOUJPINIKJILHOGSGWHYJZLZOYRVUUWUYV[X[YZZX MSKPJNJKKILH SGVHXJYLYOXRVU", + "G_HKKHMKMV JILLLV MKPHRKRU OIQLQU RKUHWKW[ TIVLV[ WKZH[J\\M\\P[SZUXWUYP[ YIZJ[M[PZSYUWWTYP[", + "F^ISMSLRKOKMLJNHQGSGVHXJYMYOXRWS[S ITOTMRLOLMMJOHQG SGUHWJXMXOWRUT[T KXYX KYYY", + "F_GLJIMLMX IJLMLX MLPISLSX OJRMRX SLVIYLYW[Y UJXMXXZZ]W", + "G]ZIJY ZIWJQJ XKUKQJ ZIYLYR XKXNYR QRJR PSMSJR QRQY PSPVQY", + "F^HOJKOU JMOWRPWPZO[M[KZIXHWHUITKTMUPVRWUWXUZ WHVIUKUMWQXTXWWYUZ", + "F^IOLLPN KMOORLUN QMTOWLYN VMXO[L IULRPT KSOURRUT QSTUWRYT VSXU[R", + "F^JHNJPLQOQRPUNWJY JHMIOJQLRO RRQUOWMXJY ZHWIUJSLRO RRSUUWWXZY ZHVJTLSOSRTUVWZY IP[P IQ[Q", + "", + "", + "", + "", + "NVQQQSSSSQQQ QQSS SQQS", + "JZMPQRTTVVWYW[V]U^ MQST MRPSTUVWWY", + "JZWKVMTOPQMR SPMS UFVGWIWKVNTPQRMT", + "H\\SMONLPKRKTLVNWQWUVXTYRYPXNVMSM XNSM VMQNLP ONKR LVQW NWSVXT UVYR", + "H\\SMONLPKRKTLVNWQWUVXTYRYPXNVMSM XNSM VMQNLP ONKR LVQW NWSVXT UVYR", + "J[SMPNNPMRMTNVPWRWUVWTXRXPWNUMSM OPUM NRVN MTWO NUXP OVWR PWVT", + "JZOGO^ UFU] MNWL MOWM MWWU MXWV", + "JZNFNX VLV^ NNVL NOVM NWVU NXVV", + "JZNBNW NNQLTLVMWOWQVSSUQVNW NNQMTMVN UMVOVQUSSU", + "E_HIHL \\I\\L HI\\I HJ\\J HK\\K HL\\L", + "JZMNMQ WNWQ MNWN MOWO MPWP MQWQ", + "JZMLWX MLONQOTOVNWMWKUKUMTO ONTO QOWM VKVN ULWL WXUVSUPUNVMWMYOYOWPU UVPU SUMW NVNY MXOX", + "JZPOOMOKMKMMNNPOSOUNWL NKNN MLOL MMSO POUN WLWY", + "A^GfHfIeIdHcGcFdFfGhIiKiNhPfQdR`RUQ;Q4R/S-U,V,X-Y/Y3X6W8U;P?JCHEFHEJDNDREVGYJ[N\\R\\V[XZZW[T[PZMYKWITHPHMIKKJNJRKUMW GdGeHeHdGd U;Q?LCIFGIFKENERFVGXJ[ R\\U[WZYWZTZPYMXKVITH", + "EfNSOUQVSVUUVSVQUOSNQNOONPMSMVNYP[S\\V\\Y[[Y\\W]T]P\\MZJXIUHRHOIMJKLIOHSHXI]KaMcPeTfYf]e`cba KLJNIRIXJ\\L`NbQdUeYe]d_cba POTO OPUP NQVQ NRVR NSVS OTUT PUTU aLaNcNcLaL bLbN aMcM aVaXcXcVaV bVbX aWcW", + "D`H@Hd M@Md W@Wd \\@\\d MMWK MNWL MOWM MWWU MXWV MYWW", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "NVQQQSSSSQQQ QQSS SQQS", + "JZMPQRTTVVWYW[V]U^ MQST MRPSTUVWWY", + "JZWKVMTOPQMR SPMS UFVGWIWKVNTPQRMT", + "H\\PMMNLOKQKSLUMVPWTWWVXUYSYQXOWNTMPM MNLPLSMUNVPW WVXTXQWOVNTM", + "H\\SMONLPKRKTLVNWQWUVXTYRYPXNVMSM XNSM VMQNLP ONKR LVQW NWSVXT UVYR", + "J[SMPNNPMRMTNVPWRWUVWTXRXPWNUMSM OPUM NRVN MTWO NUXP OVWR PWVT", + "JZOGO^ UFU] MNWL MOWM MWWU MXWV", + "JZNFNX VLV^ NNVL NOVM NWVU NXVV", + "JZNBNW NNQLTLVMWOWQVSSUQVNW NNQMTMVN UMVOVQUSSU", + "E_HIHL \\I\\L HI\\I HJ\\J HK\\K HL\\L", + "JZMNMQ WNWQ MNWN MOWO MPWP MQWQ", + "JZQCVMRTRU ULQS TITKPRRUUY W\\UYSXQXOYN[N]O_Ra W\\UZSYOYO]P_Ra SXPZN]", + "JZPOOMOKMKMMNNPOSOUNWL NKNN MLOL MMSO POUN WLSY", + "A^GfHfIeIdHcGcFdFfGhIiKiNhPfQdR`RUQ;Q4R/S-U,V,X-Y/Y3X6W8U;P?JCHEFHEJDNDREVGYJ[N\\R\\V[XZZW[T[PZMYKWITHPHMIKKJNJRKUMW GdGeHeHdGd U;Q?LCIFGIFKENERFVGXJ[ R\\U[WZYWZTZPYMXKVITH", + "IjNQOOQNSNUOVQVSUUSVQVOUNTMQMNNKPISHWH[I^K`NaRaW`[_]]`ZcVfQiMk WHZI]K_N`R`W_[^]\\`YcTgQi POTO OPUP NQVQ NRVR NSVS OTUT PUTU eLeNgNgLeL fLfN eMgM eVeXgXgVeV fVfX eWgW", + "D`H>Hf I>If M>Mf QBSBSDQDQAR?T>W>Y?[A\\D\\I[LYNWOUOSNRLQNOQNROSQVRXSVUUWUYV[X\\[\\`[cYeWfTfReQcQ`S`SbQb RBRD QCSC Y?ZA[D[IZLYN RLRNPQNRPSRVRX YVZX[[[`ZcYe R`Rb QaSa", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "AcHBHb IBIb [B[b \\B\\b DB`B DbMb Wb`b", + "BaGBQPFb FBPP EBPQ EB\\B^I[B Ga\\a Fb\\b^[[b", + "I[X+U1R8P=OANFMNMVN^OcPgRlUsXy U1S6QPBTJTLSNROMRRUSVTXTZPbOfOjPoRsVy T.R2Q5P:P>QCRF R^QaPfPjQoRrTv", + "I\\N+R1T5U:U>TBPJPLQNROWRRUQVPXPZTbUfUjToRsNy P.R2S5T:T>SCRF R^SaTfTjSoRrPv", + "I[V.S1Q4O8N=NCOIPMSXT\\UbUgTlSoQs S1Q5P8O=OBPHQLTWU[VaVgUlSpQsNv", + "I[N.Q1S4U8V=VCUITMQXP\\ObOgPlQoSs Q1S5T8U=UBTHSLPWO[NaNgOlQpSsVv", + "7Z:RARRo @RQo ?RRr Z\"VJRr", + "Ca].\\.[/[0\\1]1^0^.],[+Y+W,U.T0S3R:QJQjPsOv \\/\\0]0]/\\/ R:Rj U.T1S:SZRjQqPtOvMxKyIyGxFvFtGsHsItIuHvGv GtGuHuHtGt", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "H\\RFJ[ RIK[J[ RIY[Z[ RFZ[ MUWU LVXV", + "H\\LFL[ MGMZ LFTFWGXHYJYMXOWPTQ MGTGWHXJXMWOTP MPTPWQXRYTYWXYWZT[L[ MQTQWRXTXWWYTZMZ", + "H]ZKYIWGUFQFOGMILKKNKSLVMXOZQ[U[WZYXZV ZKYKXIWHUGQGOHMKLNLSMVOYQZUZWYXXYVZV", + "H]LFL[ MGMZ LFSFVGXIYKZNZSYVXXVZS[L[ MGSGVHWIXKYNYSXVWXVYSZMZ", + "I\\MFM[ NGNZ MFYF NGYGYF NPTPTQ NQTQ NZYZY[ M[Y[", + "I[MFM[ NGN[M[ MFYF NGYGYF NPTPTQ NQTQ", + "H]ZKYIWGUFQFOGMILKKNKSLVMXOZQ[U[WZYXZVZRUR ZKYKXIWHUGQGOHNIMKLNLSMVNXOYQZUZWYXXYVYSUSUR", + "G]KFK[ KFLFL[K[ YFXFX[Y[ YFY[ LPXP LQXQ", + "NWRFR[S[ RFSFS[", + "J[VFVVUYSZQZOYNVMV VFWFWVVYUZS[Q[OZNYMV", + "H]LFL[M[ LFMFM[ ZFYFMR ZFMS POY[Z[ QOZ[", + "IZMFM[ MFNFNZ NZYZY[ M[Y[", + "F^JFJ[ KKK[J[ KKR[ JFRX ZFRX YKR[ YKY[Z[ ZFZ[", + "G]KFK[ LIL[K[ LIY[ KFXX XFXX XFYFY[", + "G]PFNGLIKKJNJSKVLXNZP[T[VZXXYVZSZNYKXIVGTFPF QGNHLKKNKSLVNYQZSZVYXVYSYNXKVHSGQG", + "H\\LFL[ MGM[L[ LFUFWGXHYJYMXOWPUQMQ MGUGWHXJXMWOUPMP", + "G]PFNGLIKKJNJSKVLXNZP[T[VZXXYVZSZNYKXIVGTFPF QGNHLKKNKSLVNYQZSZVYXVYSYNXKVHSGQG SXX]Y] SXTXY]", + "H\\LFL[ MGM[L[ LFTFWGXHYJYMXOWPTQMQ MGTGWHXJXMWOTPMP RQX[Y[ SQY[", + "H\\YIWGTFPFMGKIKKLMMNOOTQVRWSXUXXWYTZPZNYMXKX YIWIVHTGPGMHLILKMMONTPVQXSYUYXWZT[P[MZKX", + "J[RGR[ SGS[R[ LFYFYG LFLGYG", + "G]KFKULXNZQ[S[VZXXYUYF KFLFLUMXNYQZSZVYWXXUXFYF", + "H\\JFR[ JFKFRX ZFYFRX ZFR[", + "E_GFM[ GFHFMX RFMX RIM[ RIW[ RFWX ]F\\FWX ]FW[", + "H\\KFX[Y[ KFLFY[ YFXFK[ YFL[K[", + "I\\KFRPR[S[ KFLFSP ZFYFRP ZFSPS[", + "H\\XFK[ YFL[ KFYF KFKGXG LZYZY[ K[Y[", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "E\\XFVHTKQPOSLWIZG[E[DZDXEWFXEY XFWJUTT[ XFU[ T[TYSVRTPRNQLQKRKTLWOZR[V[XZ", + "F^UGTHSJQOOUNWLZJ[ THSKQSPVOXMZJ[H[GZGXHWIXHY OLNNMOKOJNJLKJMHOGRFXFZG[I[KZMXNTORO XFYGZIZKYMXN TOWPXQYSYVXYWZU[S[RZRXSU TOVPWQXSXVWYU[", + "H]KHJJJLKNNOQOUNWMYKZIZGYFWFTGQJOMMQLULXMZP[R[UZWXXVXTWRURSSRU WFUGRJPMNQMUMXNZP[", + "F]UGTHSJQOOUNWLZJ[ THSKQSPVOXMZJ[H[GZGXHWJWLXNZP[S[UZWXYTZOZLYIWGUFPFMGKIJKJMKNMNNMOK", + "I\\WIVJVLWMYMZKZIYGWFTFRGQHPJPLQNSO TFRHQJQMSO SOQONPLRKTKWLYMZO[R[UZWXXVXTWRURSSRU QOOPMRLTLXMZ", + "G\\WHVJTORUQWOZM[ QLPNNOLOKMKKLINGQF[FXGWHVKTSSVRXPZM[K[IZHYHXIWJXIY SFWGXG OSPRRQVQXPZMXT", + "G]JIIKIMJOLPOPROTNWKXHXGWFVFTGRIQKPNPQQSSTUTWSYQZO WFUGSIRKQNQRST ZOYSWWUYSZO[L[JZIXIWJVKWJX YSWVUXRZO[", + "F^LLKKKILGOFRFOQMWLYKZI[G[FZFXGWHXGY RFOONRLWKYI[ JTKSMRVOXN[L]J^H^G]F\\FZGXJWLURTVTYV[W[YZ[X \\FZHXLVRUVUYV[", + "IYWHUKSPQUPWNZL[ YLWNTOQOONNLNJOHQGUFYFWHVJTPRVQXOZL[J[IZIXJWKXJY", + "IZYFWHUKSPPYN] YMWOTPQPOONMNKOIQGUFYFWIVKSTQXPZN]M^K_J^J\\KZMXOWRVVU", + "F^LLKKKIMGPFRFOQMWLYKZI[G[FZFXGWHXGY RFOONRLWKYI[ ZGWKUMSNPO ]G\\H]I^H^G]F\\FZGWLVMTNPO POSPTRUYV[ PORPSRTYV[W[YZ[X", + "I[MILKLMMOOPRPUOWNZK[H[GZFYFWGVHTKPUOWMZK[ VHTLRSQVPXNZK[I[HZHXIWKWMXPZR[U[WZYX", + "D`RFNOKUIXGZE[C[BZBXCWDXCY RFPMOQNVNZP[ RFQJPOOVOZP[ [FWORXP[ [FYMXQWVWZY[Z[\\Z^X [FZJYOXVXZY[", + "G^RFQJOPMULWJZH[F[EZEXFWGXFY RFRKSVT[ RFSKTVT[ `G_H`IaHaG`F^F\\GZJYLWQUWT[", + "H]SFQGOIMLLNKRKVLYMZO[Q[TZVXXUYSZOZKYHXGWGUHSJQNPSPV QGOJMNLRLVMYO[", + "F]UGTHSJQOOUNWLZJ[ THSKQSPVOXMZJ[H[GZGXHWIXHY OLNNMOKOJNJLKJMHOGRFVFYGZH[J[MZOYPVQTQRP VFXGYHZJZMYOXPVQ", + "H]UJULTNSOQPOPNNNLOIQGTFWFYGZIZMYPWSSWPYNZK[I[HZHXIWKWMXPZS[V[XZZX WFXGYIYMXPVSSVOYK[", + "F^UGTHSJQOOUNWLZJ[ THSKQSPVOXMZJ[H[GZGXHWIXHY OLNNMOKOJNJLKJMHOGRFWFZG[I[KZMYNVORO WFYGZIZKYMXNVO ROUPVRWYX[ ROTPURVYX[Y[[Z]X", + "H\\NIMKMMNOPPSPVOXN[K\\H\\G[FZFXGWHVJUMSTRWPZN[ VJUNTUSXQZN[K[IZHXHWIVJWIX", + "I[YHXJVOTUSWQZO[ SLRNPONOMMMKNIPGSF\\FZGYHXKVSUVTXRZO[M[KZJYJXKWLXKY UFYGZG", + "G]HJJGLFMFOHOKNNKVKYL[ MFNHNKKSJVJYL[N[PZSWUTVR ZFVRUVUYW[X[ZZ\\X [FWRVVVYW[", + "G\\HJJGLFMFOHOKNOLVLYM[ MFNHNKLRKVKYM[N[QZTWVTXPYMZIZGYFXFWGVIVLWNYP[Q]Q", + "F]ILHLGKGIHGJFNFMHLLKUJ[ LLLUK[ VFTHRLOUMYK[ VFUHTLSUR[ TLTUS[ `F^G\\IZLWUUYS[", + "H\\PKOLMLLKLIMGOFQFSGTITLSPQUOXMZJ[H[GZGXHWIXHY QFRGSISLRPPUNXLZJ[ ]G\\H]I^H^G]F[FYGWIULSPRURXSZT[U[WZYX", + "G]JJLGNFOFQGQIOOORPT OFPGPINONRPTRTUSWQYNZL \\FZLWTUX ]F[LYQWUUXSZP[L[JZIXIWJVKWJX", + "G\\ZHYJWOVRUTSWQYOZL[ SLRNPONOMMMKNIPGSF]F[GZHYKXOVUTXQZL[H[GZGXHWJWLXOZQ[T[WZYX VFZG[G", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "H\\WMW[X[ WMXMX[ WPUNSMPMNNLPKSKULXNZP[S[UZWX WPSNPNNOMPLSLUMXNYPZSZWX", + "H\\LFL[M[ LFMFM[ MPONQMTMVNXPYSYUXXVZT[Q[OZMX MPQNTNVOWPXSXUWXVYTZQZMX", + "I[XPVNTMQMONMPLSLUMXOZQ[T[VZXX XPWQVOTNQNOONPMSMUNXOYQZTZVYWWXX", + "H\\WFW[X[ WFXFX[ WPUNSMPMNNLPKSKULXNZP[S[UZWX WPSNPNNOMPLSLUMXNYPZSZWX", + "I[MTXTXQWOVNTMQMONMPLSLUMXOZQ[T[VZXX MSWSWQVOTNQNOONPMSMUNXOYQZTZVYWWXX", + "LZWFUFSGRJR[S[ WFWGUGSH TGSJS[ OMVMVN OMONVN", + "H\\XMWMW\\V_U`SaQaO`N_L_ XMX\\W_UaSbPbNaL_ WPUNSMPMNNLPKSKULXNZP[S[UZWX WPSNPNNOMPLSLUMXNYPZSZWX", + "H\\LFL[M[ LFMFM[ MQPNRMUMWNXQX[ MQPORNTNVOWQW[X[", + "NWRFQGQHRISITHTGSFRF RGRHSHSGRG RMR[S[ RMSMS[", + "NWRFQGQHRISITHTGSFRF RGRHSHSGRG RMRbSb RMSMSb", + "H[LFL[M[ LFMFM[ XMWMMW XMMX PTV[X[ QSX[", + "NWRFR[S[ RFSFS[", + "CbGMG[H[ GMHMH[ HQKNMMPMRNSQS[ HQKOMNONQORQR[S[ SQVNXM[M]N^Q^[ SQVOXNZN\\O]Q][^[", + "H\\LML[M[ LMMMM[ MQPNRMUMWNXQX[ MQPORNTNVOWQW[X[", + "I\\QMONMPLSLUMXOZQ[T[VZXXYUYSXPVNTMQM QNOONPMSMUNXOYQZTZVYWXXUXSWPVOTNQN", + "H\\LMLbMb LMMMMb MPONQMTMVNXPYSYUXXVZT[Q[OZMX MPQNTNVOWPXSXUWXVYTZQZMX", + "H\\WMWbXb WMXMXb WPUNSMPMNNLPKSKULXNZP[S[UZWX WPSNPNNOMPLSLUMXNYPZSZWX", + "KYOMO[P[ OMPMP[ PSQPSNUMXM PSQQSOUNXNXM", + "J[XPWNTMQMNNMPNRPSUUWV VUWWWXVZ WYTZQZNY OZNXMX XPWPVN WOTNQNNO ONNPOR NQPRUTWUXWXXWZT[Q[NZMX", + "MXRFR[S[ RFSFS[ OMVMVN OMONVN", + "H\\LMLWMZO[R[TZWW LMMMMWNYPZRZTYWW WMW[X[ WMXMX[", + "JZLMR[ LMMMRY XMWMRY XMR[", + "F^IMN[ IMJMNX RMNX RPN[ RPV[ RMVX [MZMVX [MV[", + "I[LMW[X[ LMMMX[ XMWML[ XMM[L[", + "JZLMR[ LMMMRY XMWMRYNb XMR[ObNb", + "I[VNL[ XMNZ LMXM LMLNVN NZXZX[ L[X[", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "K[UUTSRRPRNSMTLVLXMZO[Q[SZTX PRNTMVMYO[ VRTXTZV[XZYY[V WRUXUZV[", + "LZLVNSPO SFMXMZO[P[RZTXUUURVVWWXWZV TFNXNZO[", + "LXTSSTTTTSSRQROSNTMVMXNZP[S[VYXV QROTNVNYP[", + "K[UUTSRRPRNSMTLVLXMZO[Q[SZTX PRNTMVMYO[ ZFTXTZV[XZYY[V [FUXUZV[", + "LXOYQXRWSUSSRRQROSNTMVMXNZP[S[VYXV QROTNVNYP[", + "OXRRUOWLXIXGWFUGTIKdKfLgNfOcPZQ[S[UZVYXV TISNRRO[M`Kd", + "K[UUTSRRPRNSMTLVLXMZO[Q[SZTX PRNTMVMYO[ VRPd WRT[R`PdOfMgLfLdMaO_R]V[YY[V", + "L[LVNSPO SFL[ TFM[ OUQSSRTRVSVUUXUZV[ TRUSUUTXTZV[XZYY[V", + "NVSLRMSNTMSL QROXOZQ[SZTYVV RRPXPZQ[", + "NVSLRMSNTMSL QRKd RRO[M`KdJfHgGfGdHaJ_M]Q[TYVV", + "LZLVNSPO SFL[ TFM[ URUSVSURTRRTOU OURVSZT[ OUQVRZT[U[XYZV", + "NVNVPSRO UFOXOZQ[SZTYVV VFPXPZQ[", + "E^EVGSIRKSKUI[ IRJSJUH[ KUMSORPRRSRUP[ PRQSQUO[ RUTSVRWRYSYUXXXZY[ WRXSXUWXWZY[[Z\\Y^V", + "I[IVKSMROSOUM[ MRNSNUL[ OUQSSRTRVSVUUXUZV[ TRUSUUTXTZV[XZYY[V", + "KYRRPRNSMTLVLXMZO[Q[SZTYUWUUTSRRQSQURWTXVXXWYV PRNTMVMYO[", + "L[LVNSPO QLHg RLIg OUQSSRTRVSVUUXUZV[ TRUSUUTXTZV[XZYY[V", + "K[UUTSRRPRNSMTLVLXMZO[Q[SZ PRNTMVMYO[ VRPdPfQgSfTcT[V[YY[V WRT[R`Pd", + "LZLVNSPRRSRUP[ PRQSQUO[ RUTSVRWRVU VRVUWWXWZV", + "NZNVPSQQQSTUUWUYTZR[ QSSUTWTYR[ NZP[U[XYZV", + "NVNVPSRO UFOXOZQ[SZTYVV VFPXPZQ[ PNVN", + "K[NRLXLZN[O[QZSXUU ORMXMZN[ VRTXTZV[XZYY[V WRUXUZV[", + "KZNRMTLWLZN[O[RZTXUUUR ORNTMWMZN[ URVVWWXWZV", + "H]LRJTIWIZK[L[NZPX MRKTJWJZK[ RRPXPZR[S[UZWXXUXR SRQXQZR[ XRYVZW[W]V", + "JZJVLSNRPRQSQUPXOZM[L[KZKYLYKZ WSVTWTWSVRURSSRUQXQZR[U[XYZV QSRU SSQU PXQZ QXOZ", + "K[NRLXLZN[O[QZSXUU ORMXMZN[ VRPd WRT[R`PdOfMgLfLdMaO_R]V[YY[V", + "LYLVNSPRRRTSTVSXPZN[ RRSSSVRXPZ N[P\\Q^QaPdNfLgKfKdLaO^R\\VYYV N[O\\P^PaOdNf", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "NV", + "JZ", + "H\\QFNGLJKOKRLWNZQ[S[VZXWYRYOXJVGSFQF OGMJLOLRMWOZ NYQZSZVY UZWWXRXOWJUG VHSGQGNH", + "H\\NJPISFS[ NJNKPJRHR[S[", + "H\\LKLJMHNGPFTFVGWHXJXLWNUQL[ LKMKMJNHPGTGVHWJWLVNTQK[ LZYZY[ K[Y[", + "H\\MFXFQO MFMGWG WFPO QNSNVOXQYTYUXXVZS[P[MZLYKWLW POSOVPXS TOWQXTXUWXTZ XVVYSZPZMYLW OZLX", + "H\\UIU[V[ VFV[ VFKVZV UILV LUZUZV", + "H\\MFLO NGMN MFWFWG NGWG MNPMSMVNXPYSYUXXVZS[P[MZLYKWLW LOMOONSNVOXR TNWPXSXUWXTZ XVVYSZPZMYLW OZLX", + "H\\VGWIXIWGTFRFOGMJLOLTMXOZR[S[VZXXYUYTXQVOSNRNOOMQ WHTGRGOH PGNJMOMTNXQZ MVOYRZSZVYXV TZWXXUXTWQTO XSVPSOROOPMS QONQMT", + "H\\KFYFO[ KFKGXG XFN[O[", + "H\\PFMGLILKMMNNPOTPVQWRXTXWWYTZPZMYLWLTMRNQPPTOVNWMXKXIWGTFPF NGMIMKNMPNTOVPXRYTYWXYWZT[P[MZLYKWKTLRNPPOTNVMWKWIVG WHTGPGMH LXOZ UZXX", + "H\\WPURRSQSNRLPKMKLLINGQFRFUGWIXMXRWWUZR[P[MZLXMXNZ WMVPSR WNUQRRQRNQLN PRMPLMLLMIPG LKNHQGRGUHWK SGVIWMWRVWTZ UYRZPZMY", + "MXRXQYQZR[S[TZTYSXRX RYRZSZSYRY", + "MXTZS[R[QZQYRXSXTYT\\S^Q_ RYRZSZSYRY S[T\\ TZS^", + "MXRMQNQORPSPTOTNSMRM RNROSOSNRN RXQYQZR[S[TZTYSXRX RYRZSZSYRY", + "MXRMQNQORPSPTOTNSMRM RNROSOSNRN TZS[R[QZQYRXSXTYT\\S^Q_ RYRZSZSYRY S[T\\ TZS^", + "MXRFRTST RFSFST RXQYQZR[S[TZTYSXRX RYRZSZSYRY", + "I\\LKLJMHNGQFTFWGXHYJYLXNWOUPRQ LKMKMJNHQGTGWHXJXLWNUORP MIPG UGXI XMTP RPRTSTSP RXQYQZR[S[TZTYSXRX RYRZSZSYRY", + "MXTFRGQIQLRMSMTLTKSJRJQK RKRLSLSKRK RGQK QIRJ", + "MXTHSIRIQHQGRFSFTGTJSLQM RGRHSHSGRG SITJ THSL", + "F_\\MZMXNWPUVTXSYQZMZKYJWJUKSLRQOSMTKTISGQFPFNGMIMKNNPQUWXZZ[\\[ \\M\\NZNWP ZMXPVVUXSZQ[M[KZJYIWIUJSLQQNRMSKSIRG SHQGPGNH OGNINKONQQVWXYZZ\\Z\\[", + "I\\RBR_S_ RBSBS_ WIYIWGTFQFNGLILKMMNNVRWSXUXWWYTZQZOYNX WIVHTGQGNHMIMKNMVQXSYUYWXYWZT[Q[NZLXNX XXUZ", + "G^[BIbJb [B\\BJb", + "KYUBSDQGOKNPNTOYQ]S`UbVb UBVBTDRGPKOPOTPYR]T`Vb", + "KYNBPDRGTKUPUTTYR]P`NbOb NBOBQDSGUKVPVTUYS]Q`Ob", + "JZRFQGSQRR RFRR RFSGQQRR MINIVOWO MIWO MIMJWNWO WIVINOMO WIMO WIWJMNMO", + "F_JQ[Q[R JQJR[R", + "F_RIRZSZ RISISZ JQ[Q[R JQJR[R", + "F_JM[M[N JMJN[N JU[U[V JUJV[V", + "NWSFRGRM SGRM SFTGRM", + "I[NFMGMM NGMM NFOGMM WFVGVM WGVM WFXGVM", + "KYQFOGNINKOMQNSNUMVKVIUGSFQF QFNIOMSNVKUGQF SFOGNKQNUMVISF", + "F^ZIJRZ[ ZIZJLRZZZ[", + "F^JIZRJ[ JIJJXRJZJ[", + "G^OFObPb OFPFPb UFUbVb UFVFVb JP[P[Q JPJQ[Q JW[W[X JWJX[X", + "F^[FYGVHSHPGNFLFJGIIIKKMMMOLPJPHNF [FH[I[ [F\\FI[ YTWTUUTWTYV[X[ZZ[X[VYT NFJGIKMMPJNF LFIIKMOLPHLF YTUUTYX[[XYT WTTWV[ZZ[VWT", + "E`WMTKQKOLNMMOMRNTOUQVTVWT WMTLQLOMNONROTQUTUWT VKVSWUYVZV\\U]S]O\\L[JYHWGTFQFNGLHJJILHOHRIUJWLYNZQ[U[YZ VKWKWSXUZV YV[U\\S\\O[LZJYIWHTGQGNHLIKJJLIOIRJUKWLXNYQZUZYYYZ", + "E_JPLONOPPSTTUVVXVZU[S[QZOXNVNTOSPPTNULUJT ZPXOVOTPQTPUNVLVJUISIQJOLNNNPOQPTTVUXUZT KOJQJSKU YUZSZQYO", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "NV", + "JZ", + "H]TFQGOIMLLOKSKVLYMZO[Q[TZVXXUYRZNZKYHXGVFTF TFRGPINLMOLSLVMYO[ Q[SZUXWUXRYNYKXHVF", + "H]TJO[ VFP[ VFSIPKNL UIQKNL", + "H]OJPKOLNKNJOHPGSFVFYGZIZKYMWOTQPSMUKWI[ VFXGYIYKXMVOPS JYKXMXRZUZWYXW MXR[U[WZXW", + "H]OJPKOLNKNJOHPGSFVFYGZIZKYMVOSP VFXGYIYKXMVO QPSPVQWRXTXWWYVZS[O[LZKYJWJVKULVKW SPUQVRWTWWVYUZS[", + "H]XGR[ YFS[ YFJUZU", + "H]QFLP QF[F QGVG[F LPMOPNSNVOWPXRXUWXUZR[O[LZKYJWJVKULVKW SNUOVPWRWUVXTZR[", + "H]YIXJYKZJZIYGWFTFQGOIMLLOKSKWLYMZO[R[UZWXXVXSWQVPTOQOOPMRLT TFRGPINLMOLSLXMZ R[TZVXWVWRVP", + "H]NFLL [FZIXLSRQUPWO[ XLRRPUOWN[ MIPFRFWI NHPGRGWIYIZH[F", + "H]SFPGOHNJNMOOQPTPXOYNZLZIYGVFSF SFQGPHOJOMPOQP TPWOXNYLYIXGVF QPMQKSJUJXKZN[R[VZWYXWXTWRVQTP QPNQLSKUKXLZN[ R[UZVYWWWSVQ", + "H]YMXOVQTRQROQNPMNMKNIPGSFVFXGYHZJZNYRXUVXTZQ[N[LZKXKWLVMWLX OQNONKOIQGSF XGYIYNXRWUUXSZQ[", + "MXPYOZP[QZPY", + "MXP[OZPYQZQ[P]N_", + "MXSMRNSOTNSM PYOZP[QZ", + "MXSMRNSOTNSM P[OZPYQZQ[P]N_", + "MXUFTGRS UGRS UFVGRS PYOZP[QZPY", + "H]OJPKOLNKNJOHPGSFWFZG[I[KZMYNSPQQQSRTTT WFYGZIZKYMXNVO PYOZP[QZPY", + "MXVFTHSJSKTLUKTJ", + "MXUHTGUFVGVHUJSL", + "E_\\N[O\\P]O]N\\M[MYNWPRXPZN[K[HZGXGVHTISKRPPROTMUKUITGRFPGOIOLPRQUSXUZW[Y[ZYZX K[IZHXHVITJSPP OLPQQTSWUYWZYZZY", + "H]TBL_ YBQ_ ZJYKZL[K[JZHYGVFRFOGMIMKNMONVRXT MKOMVQWRXTXWWYVZS[O[LZKYJWJVKULVKW", + "G]_BEb", + "KZZBVESHQKOONTNXO]P`Qb VESIQMPPOUOZP_Qb", + "JYSBTDUGVLVPUUSYQ\\N_Jb SBTEUJUOTTSWQ[N_", + "J[TFTR OIYO YIOO", + "E_IR[R", + "E_RIR[ IR[R", + "E_IO[O IU[U", + "NWUFSM VFSM", + "I[PFNM QFNM YFWM ZFWM", + "KZSFQGPIPKQMSNUNWMXKXIWGUFSF", + "F^ZIJRZ[", + "F^JIZRJ[", + "H]SFLb YFRb LQZQ KWYW", + "E_^F\\GXHUHQGOFMFKGJIJKLMNMPLQJQHOF ^FF[ XTVTTUSWSYU[W[YZZXZVXT", + "E`WNVLTKQKOLNMMPMSNUPVSVUUVS QKOMNPNSOUPV WKVSVUXVZV\\T]Q]O\\L[JYHWGTFQFNGLHJJILHOHRIUJWLYNZQ[T[WZYYZX XKWSWUXV", + "F_\\S[UYVWVUUTTQPPONNLNJOIQISJULVNVPUQTTPUOWNYN[O\\Q\\S", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "H\\RFK[ RFY[ RIX[ MUVU I[O[ U[[[", + "G]LFL[ MFM[ IFYFYLXF MPUPXQYRZTZWYYXZU[I[ UPWQXRYTYWXYWZU[", + "G]LFL[ MFM[ IFUFXGYHZJZLYNXOUP UFWGXHYJYLXNWOUP MPUPXQYRZTZWYYXZU[I[ UPWQXRYTYWXYWZU[", + "I[NFN[ OFO[ KFZFZLYF K[R[", + "F^NFNLMTLXKZJ[ XFX[ YFY[ KF\\F G[\\[ G[Gb H[Gb [[\\b \\[\\b", + "G\\LFL[ MFM[ SLST IFYFYLXF MPSP I[Y[YUX[", + "CbRFR[ SFS[ OFVF GGHHGIFHFGGFHFIGJIKMLONPWPYOZM[I\\G]F^F_G_H^I]H^G NPLQKSJXIZH[ NPMQLSKXJZI[G[FZEX WPYQZS[X\\Z][ WPXQYSZX[Z\\[^[_Z`X O[V[", + "H\\LIKFKLLINGPFTFWGXIXLWNTOQO TFVGWIWLVNTO TOVPXRYTYWXYWZT[O[MZLYKWKVLUMVLW WQXTXWWYVZT[", + "F^KFK[ LFL[ XFX[ YFY[ HFOF UF\\F XHLY H[O[ U[\\[", + "F^KFK[ LFL[ XFX[ YFY[ HFOF UF\\F XHLY H[O[ U[\\[ N@N?M?M@NBPCTCVBW@", + "F^KFK[ LFL[ HFOF LPSPUOVMWIXGYFZF[G[HZIYHZG SPUQVSWXXZY[ SPTQUSVXWZX[Z[[Z\\X H[O[", + "E^MFMLLTKXJZI[H[GZGYHXIYHZ XFX[ YFY[ JF\\F U[\\[", + "F_KFK[ LFRX KFR[ YFR[ YFY[ ZFZ[ HFLF YF]F H[N[ V[][", + "F^KFK[ LFL[ XFX[ YFY[ HFOF UF\\F LPXP H[O[ U[\\[", + "G]QFNGLIKKJOJRKVLXNZQ[S[VZXXYVZRZOYKXIVGSFQF QFOGMILKKOKRLVMXOZQ[ S[UZWXXVYRYOXKWIUGSF", + "F^KFK[ LFL[ XFX[ YFY[ HF\\F H[O[ U[\\[", + "G]LFL[ MFM[ IFUFXGYHZJZMYOXPUQMQ UFWGXHYJYMXOWPUQ I[P[", + "G\\XIYLYFXIVGSFQFNGLIKKJNJSKVLXNZQ[S[VZXXYV QFOGMILKKNKSLVMXOZQ[", + "I\\RFR[ SFS[ LFKLKFZFZLYF O[V[", + "H]KFRV LFSV ZFSVQYPZN[M[LZLYMXNYMZ IFOF VF\\F", + "F_RFR[ SFS[ OFVF PILJJLIOIRJULWPXUXYW[U\\R\\O[LYJUIPI PIMJKLJOJRKUMWPX UXXWZU[R[OZLXJUI O[V[", + "H\\KFX[ LFY[ YFK[ IFOF UF[F I[O[ U[[[", + "F^KFK[ LFL[ XFX[ YFY[ HFOF UF\\F H[\\[ [[\\b \\[\\b", + "F]KFKQLSOTRTUSWQ LFLQMSOT WFW[ XFX[ HFOF TF[F T[[[", + "BcGFG[ HFH[ RFR[ SFS[ ]F][ ^F^[ DFKF OFVF ZFaF D[a[", + "BcGFG[ HFH[ RFR[ SFS[ ]F][ ^F^[ DFKF OFVF ZFaF D[a[ `[ab a[ab", + "F`PFP[ QFQ[ IFHLHFTF QPXP[Q\\R]T]W\\Y[ZX[M[ XPZQ[R\\T\\W[YZZX[", + "CaHFH[ IFI[ EFLF IPPPSQTRUTUWTYSZP[E[ PPRQSRTTTWSYRZP[ [F[[ \\F\\[ XF_F X[_[", + "H]MFM[ NFN[ JFQF NPUPXQYRZTZWYYXZU[J[ UPWQXRYTYWXYWZU[", + "H]LIKFKLLINGQFSFVGXIYKZNZSYVXXVZS[P[MZLYKWKVLUMVLW SFUGWIXKYNYSXVWXUZS[ PPYP", + "CbHFH[ IFI[ EFLF E[L[ VFSGQIPKOOORPVQXSZV[X[[Z]X^V_R_O^K]I[GXFVF VFTGRIQKPOPRQVRXTZV[ X[ZZ\\X]V^R^O]K\\IZGXF IPOP", + "G]WFW[ XFX[ [FOFLGKHJJJLKNLOOPWP OFMGLHKJKLLNMOOP RPPQORLYKZJZIY PQOSMZL[J[IYIX T[[[", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "I]NONPMPMONNPMTMVNWOXQXXYZZ[ WOWXXZZ[[[ WQVRPSMTLVLXMZP[S[UZWX PSNTMVMXNZP[", + "H\\XFWGQINKLNKQKULXNZQ[S[VZXXYUYSXPVNSMQMNNLPKS XFWHUIQJNLLN QMONMPLSLUMXOZQ[ S[UZWXXUXSWPUNSM", + "H\\MMM[ NMN[ JMUMXNYPYQXSUT UMWNXPXQWSUT NTUTXUYWYXXZU[J[ UTWUXWXXWZU[", + "HZMMM[ NMN[ JMXMXRWM J[Q[", + "F]NMNQMWLZK[ WMW[ XMX[ KM[M I[H`H[[[[`Z[", + "H[LSXSXQWOVNTMQMNNLPKSKULXNZQ[S[VZXX WSWPVN QMONMPLSLUMXOZQ[", + "E`RMR[ SMS[ OMVM JNIOHNIMJMKNMRNSPTUTWSXRZN[M\\M]N\\O[N PTNUMVKZJ[ PTNVLZK[I[HZGX UTWUXVZZ[[ UTWVYZZ[\\[]Z^X O[V[", + "I[MOLMLQMONNPMTMWNXPXQWSTT TMVNWPWQVSTT QTTTWUXWXXWZT[P[MZLXLWMVNWMX TTVUWWWXVZT[", + "G]LML[ MMM[ WMW[ XMX[ IMPM TM[M I[P[ T[[[ WNMZ", + "G]LML[ MMM[ WMW[ XMX[ IMPM TM[M I[P[ T[[[ WNMZ OGOFNFNGOIQJSJUIVG", + "H\\MMM[ NMN[ JMQM NTPTSSTRVNWMXMYNXOWN PTSUTVVZW[ PTRUSVUZV[X[YZZX J[Q[", + "G]NMNQMWLZK[J[IZJYKZ WMW[ XMX[ KM[M T[[[", + "G^LML[ LMR[ MMRY XMR[ XMX[ YMY[ IMMM XM\\M I[O[ U[\\[", + "G]LML[ MMM[ WMW[ XMX[ IMPM TM[M MTWT I[P[ T[[[", + "H\\QMNNLPKSKULXNZQ[S[VZXXYUYSXPVNSMQM QMONMPLSLUMXOZQ[ S[UZWXXUXSWPUNSM", + "G]LML[ MMM[ WMW[ XMX[ IM[M I[P[ T[[[", + "G\\LMLb MMMb MPONQMSMVNXPYSYUXXVZS[Q[OZMX SMUNWPXSXUWXUZS[ IMMM IbPb", + "H[WPVQWRXQXPVNTMQMNNLPKSKULXNZQ[S[VZXX QMONMPLSLUMXOZQ[", + "I\\RMR[ SMS[ MMLRLMYMYRXM O[V[", + "I[LMR[ MMRY XMR[P_NaLbKbJaK`La JMPM TMZM", + "H]RFRb SFSb OFSF RPQNPMNMLNKQKWLZN[P[QZRX NMMNLQLWMZN[ WMXNYQYWXZW[ SPTNUMWMYNZQZWYZW[U[TZSX ObVb", + "H\\LMW[ MMX[ XML[ JMPM TMZM J[P[ T[Z[", + "G]LML[ MMM[ WMW[ XMX[ IMPM TM[M I[[[[`Z[", + "G]LMLTMVPWRWUVWT MMMTNVPW WMW[ XMX[ IMPM TM[M T[[[", + "CbHMH[ IMI[ RMR[ SMS[ \\M\\[ ]M][ EMLM OMVM YM`M E[`[", + "CbHMH[ IMI[ RMR[ SMS[ \\M\\[ ]M][ EMLM OMVM YM`M E[`[``_[", + "H]QMQ[ RMR[ LMKRKMUM RTVTYUZWZXYZV[N[ VTXUYWYXXZV[", + "E_JMJ[ KMK[ GMNM KTOTRUSWSXRZO[G[ OTQURWRXQZO[ YMY[ ZMZ[ VM]M V[][", + "J[OMO[ PMP[ LMSM PTTTWUXWXXWZT[L[ TTVUWWWXVZT[", + "I\\MOLMLQMONNPMSMVNXPYSYUXXVZS[P[NZLXLWMVNWMX SMUNWPXSXUWXUZS[ RTXT", + "DaIMI[ JMJ[ FMMM F[M[ VMSNQPPSPUQXSZV[X[[Z]X^U^S]P[NXMVM VMTNRPQSQURXTZV[ X[ZZ\\X]U]S\\PZNXM JTPT", + "G\\VMV[ WMW[ ZMOMLNKPKQLSOTVT OMMNLPLQMSOT TTQUPVNZM[ TTRUQVOZN[L[KZJX S[Z[", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "H\\RFKZ QIW[ RIX[ RFY[ MUVU I[O[ T[[[ KZJ[ KZM[ WZU[ WYV[ XYZ[", + "G]LFL[ MGMZ NFN[ IFUFXGYHZJZLYNXOUP XHYJYLXN UFWGXIXMWOUP NPUPXQYRZTZWYYXZU[I[ XRYTYWXY UPWQXSXXWZU[ JFLG KFLH OFNH PFNG LZJ[ LYK[ NYO[ NZP[", + "G\\XIYFYLXIVGTFQFNGLIKKJNJSKVLXNZQ[T[VZXXYV MILKKNKSLVMX QFOGMJLNLSMWOZQ[", + "G]LFL[ MGMZ NFN[ IFSFVGXIYKZNZSYVXXVZS[I[ WIXKYNYSXVWX SFUGWJXNXSWWUZS[ JFLG KFLH OFNH PFNG LZJ[ LYK[ NYO[ NZP[", + "G\\LFL[ MGMZ NFN[ IFYFYL NPTP TLTT I[Y[YU JFLG KFLH OFNH PFNG TFYG VFYH WFYI XFYL TLSPTT TNRPTR TOPPTQ LZJ[ LYK[ NYO[ NZP[ T[YZ V[YY W[YX X[YU", + "G[LFL[ MGMZ NFN[ IFYFYL NPTP TLTT I[Q[ JFLG KFLH OFNH PFNG TFYG VFYH WFYI XFYL TLSPTT TNRPTR TOPPTQ LZJ[ LYK[ NYO[ NZP[", + "G^XIYFYLXIVGTFQFNGLIKKJNJSKVLXNZQ[T[VZXZY[YS MILKKNKSLVMX QFOGMJLNLSMWOZQ[ XTXY WSWYVZ TS\\S USWT VSWU ZSYU [SYT", + "F^KFK[ LGLZ MFM[ WFW[ XGXZ YFY[ HFPF TF\\F MPWP H[P[ T[\\[ IFKG JFKH NFMH OFMG UFWG VFWH ZFYH [FYG KZI[ KYJ[ MYN[ MZO[ WZU[ WYV[ YYZ[ YZ[[", + "LXQFQ[ RGRZ SFS[ NFVF N[V[ OFQG PFQH TFSH UFSG QZO[ QYP[ SYT[ SZU[", + "JZSFSWRZQ[ TGTWSZ UFUWTZQ[O[MZLXLVMUNUOVOWNXMX MVMWNWNVMV PFXF QFSG RFSH VFUH WFUG", + "F\\KFK[ LGLZ MFM[ XGMR PPW[ QPX[ QNY[ HFPF UF[F H[P[ T[[[ IFKG JFKH NFMH OFMG WFXG ZFXG KZI[ KYJ[ MYN[ MZO[ WYU[ WYZ[", + "I[NFN[ OGOZ PFP[ KFSF K[Z[ZU LFNG MFNH QFPH RFPG NZL[ NYM[ PYQ[ PZR[ U[ZZ W[ZY X[ZX Y[ZU", + "E_JFJZ JFQ[ KFQX LFRX XFQ[ XFX[ YGYZ ZFZ[ GFLF XF]F G[M[ U[][ HFJG [FZH \\FZG JZH[ JZL[ XZV[ XYW[ ZY[[ ZZ\\[", + "F^KFKZ KFY[ LFXX MFYX YGY[ HFMF VF\\F H[N[ IFKG WFYG [FYG KZI[ KZM[", + "G]QFNGLIKKJOJRKVLXNZQ[S[VZXXYVZRZOYKXIVGSFQF MILKKNKSLVMX WXXVYSYNXKWI QFOGMJLNLSMWOZQ[ S[UZWWXSXNWJUGSF", + "G]LFL[ MGMZ NFN[ IFUFXGYHZJZMYOXPUQNQ XHYJYMXO UFWGXIXNWPUQ I[Q[ JFLG KFLH OFNH PFNG LZJ[ LYK[ NYO[ NZP[", + "G]QFNGLIKKJOJRKVLXNZQ[S[VZXXYVZRZOYKXIVGSFQF MILKKNKSLVMX WXXVYSYNXKWI QFOGMJLNLSMWOZQ[ S[UZWWXSXNWJUGSF NXOVQURUTVUXV^W`Y`Z^Z\\ V\\W^X_Y_ UXW]X^Y^Z]", + "G]LFL[ MGMZ NFN[ IFUFXGYHZJZLYNXOUPNP XHYJYLXN UFWGXIXMWOUP RPTQUSWYX[Z[[Y[W WWXYYZZZ TQURXXYYZY[X I[Q[ JFLG KFLH OFNH PFNG LZJ[ LYK[ NYO[ NZP[", + "H\\XIYFYLXIVGSFPFMGKIKLLNOPURWSXUXXWZ LLMNOOUQWRXT MGLILKMMONUPXRYTYWXYWZT[Q[NZLXKUK[LX", + "H\\JFJL QFQ[ RGRZ SFS[ ZFZL JFZF N[V[ KFJL LFJI MFJH OFJG UFZG WFZH XFZI YFZL QZO[ QYP[ SYT[ SZU[", + "F^KFKULXNZQ[S[VZXXYUYG LGLVMX MFMVNYOZQ[ HFPF VF\\F IFKG JFKH NFMH OFMG WFYG [FYG", + "H\\KFR[ LFRXR[ MFSX YGR[ IFPF UF[F JFLH NFMH OFMG WFYG ZFYG", + "F^JFN[ KFNVN[ LFOV RFOVN[ RFV[ SFVVV[ TFWV ZGWVV[ GFOF RFTF WF]F HFKG IFKH MFLH NFLG XFZG \\FZG", + "H\\KFW[ LFX[ MFY[ XGLZ IFPF UF[F I[O[ T[[[ JFMH NFMH OFMG VFXG ZFXG LZJ[ LZN[ WZU[ WYV[ WYZ[", + "G]JFQQQ[ KFRQRZ LFSQS[ YGSQ HFOF VF\\F N[V[ IFKG NFLG WFYG [FYG QZO[ QYP[ SYT[ SZU[", + "H\\YFKFKL WFK[ XFL[ YFM[ K[Y[YU LFKL MFKI NFKH PFKG T[YZ V[YY W[YX X[YU", + "H\\RFKZ QIW[ RIX[ RFY[ MUVU I[O[ T[[[ KZJ[ KZM[ WZU[ WYV[ XYZ[", + "G]LFL[ MGMZ NFN[ IFUFXGYHZJZLYNXOUP XHYJYLXN UFWGXIXMWOUP NPUPXQYRZTZWYYXZU[I[ XRYTYWXY UPWQXSXXWZU[ JFLG KFLH OFNH PFNG LZJ[ LYK[ NYO[ NZP[", + "I[NFN[ OGOZ PFP[ KFZFZL K[S[ LFNG MFNH QFPH RFPG UFZG WFZH XFZI YFZL NYM[ NZL[ PYQ[ PZR[", + "H\\RFJ[ QIX[ RIY[ RFZ[ KYXY KZXZ J[Z[", + "G\\LFL[ MGMZ NFN[ IFYFYL NPTP TLTT I[Y[YU JFLG KFLH OFNH PFNG TFYG VFYH WFYI XFYL TLSPTT TNRPTR TOPPTQ LZJ[ LYK[ NYO[ NZP[ T[YZ V[YY W[YX X[YU", + "H\\YFKFKL WFK[ XFL[ YFM[ K[Y[YU LFKL MFKI NFKH PFKG T[YZ V[YY W[YX X[YU", + "F^KFK[ LGLZ MFM[ WFW[ XGXZ YFY[ HFPF TF\\F MPWP H[P[ T[\\[ IFKG JFKH NFMH OFMG UFWG VFWH ZFYH [FYG KZI[ KYJ[ MYN[ MZO[ WZU[ WYV[ YYZ[ YZ[[", + "G]QFNGLIKKJOJRKVLXNZQ[S[VZXXYVZRZOYKXIVGSFQF MILKKNKSLVMX WXXVYSYNXKWI QFOGMJLNLSMWOZQ[ S[UZWWXSXNWJUGSF OMOT UMUT OPUP OQUQ ONPP OOQP UNTP UOSP PQOS QQOR SQUR TQUS", + "LXQFQ[ RGRZ SFS[ NFVF N[V[ OFQG PFQH TFSH UFSG QZO[ QYP[ SYT[ SZU[", + "F\\KFK[ LGLZ MFM[ XGMR PPW[ QPX[ QNY[ HFPF UF[F H[P[ T[[[ IFKG JFKH NFMH OFMG WFXG ZFXG KZI[ KYJ[ MYN[ MZO[ WYU[ WYZ[", + "H\\RFKZ QIW[ RIX[ RFY[ I[O[ T[[[ KZJ[ KZM[ WZU[ WYV[ XYZ[", + "E_JFJZ JFQ[ KFQX LFRX XFQ[ XFX[ YGYZ ZFZ[ GFLF XF]F G[M[ U[][ HFJG [FZH \\FZG JZH[ JZL[ XZV[ XYW[ ZY[[ ZZ\\[", + "F^KFKZ KFY[ LFXX MFYX YGY[ HFMF VF\\F H[N[ IFKG WFYG [FYG KZI[ KZM[", + "G]JEJL ZEZL OMOT UMUT JUJ\\ ZUZ\\ JGZG JHZH JIZI OPUP OQUQ JXZX JYZY JZZZ JFMH ZFWH KIJK LIJJ XIZJ YIZK ONPP OOQP UNTP UOSP PQOS QQOR SQUR TQUS JVKX JWLX ZWXX ZVYX MYJ[ WYZ[", + "G]QFNGLIKKJOJRKVLXNZQ[S[VZXXYVZRZOYKXIVGSFQF MILKKNKSLVMX WXXVYSYNXKWI QFOGMJLNLSMWOZQ[ S[UZWWXSXNWJUGSF", + "F^KFK[ LGLZ MFM[ WFW[ XGXZ YFY[ HF\\F H[P[ T[\\[ IFKG JFKH NFMH OFMG UFWG VFWH ZFYH [FYG KZI[ KYJ[ MYN[ MZO[ WZU[ WYV[ YYZ[ YZ[[", + "G]LFL[ MGMZ NFN[ IFUFXGYHZJZMYOXPUQNQ XHYJYMXO UFWGXIXNWPUQ I[Q[ JFLG KFLH OFNH PFNG LZJ[ LYK[ NYO[ NZP[", + "G]IFPPQQ JFQP KFRPI[ IFYFZLYIWF VFYH TFYG KYYY JZYZ I[Y[ZUYXWY", + "H\\JFJL QFQ[ RGRZ SFS[ ZFZL JFZF N[V[ KFJL LFJI MFJH OFJG UFZG WFZH XFZI YFZL QZO[ QYP[ SYT[ SZU[", + "H\\JMKILGMFOFPGQIRM LHMGOGPH JMKJMHOHPIQMQ[ RMR[ ZMYJWHUHTISMS[ XHWGUGTH ZMYIXGWFUFTGSIRM N[V[ QYP[ QZO[ SZU[ SYT[", + "G]QFQ[ RGRZ SFS[ NFVF N[V[ OFQG PFQH TFSH UFSG QZO[ QYP[ SYT[ SZU[ OKLLKMJOJRKTLUOVUVXUYTZRZOYMXLUKOK LMKOKRLT XTYRYOXM OKMLLOLRMUOV UVWUXRXOWLUK", + "H\\KFW[ LFX[ MFY[ XGLZ IFPF UF[F I[O[ T[[[ JFMH NFMH OFMG VFXG ZFXG LZJ[ LZN[ WZU[ WYV[ WYZ[", + "F^QFQ[ RGRZ SFS[ NFVF N[V[ OFQG PFQH TFSH UFSG QZO[ QYP[ SYT[ SZU[ HMIMJNKQLSMTPUTUWTXSYQZN[M\\M LRKNJLILKN HMIKJKKLLPMSNTPU YN[LZLYNXR TUVTWSXPYLZK[K\\M", + "G]NYKYJWK[O[MVKRJOJLKIMGPFTFWGYIZLZOYRWVU[Y[ZWYYVY LSKOKLLI XIYLYOXS O[MULPLKMHNGPF TFVGWHXKXPWUU[ KZNZ VZYZ", + "H\\UFIZ SJT[ THUZ UFUHVYV[ LUTU F[L[ Q[X[ IZG[ IZK[ TZR[ TYS[ VYW[", + "F^OFI[ PFJ[ QFK[ LFWFZG[I[KZNYOVP YGZIZKYNXO WFXGYIYKXNVP NPVPXQYSYUXXVZR[F[ WQXSXUWXUZ VPWRWUVXTZR[ MFPG NFOH RFPH SFPG JZG[ JYH[ KYL[ JZM[", + "H]ZH[H\\F[L[JZHYGWFTFQGOIMLLOKSKVLYMZP[S[UZWXXV QHOJNLMOLSLWMY TFRGPJOLNOMSMXNZP[", + "F]OFI[ PFJ[ QFK[ LFUFXGYHZKZOYSWWUYSZO[F[ WGXHYKYOXSVWTY UFWHXKXOWSUWRZO[ MFPG NFOH RFPH SFPG JZG[ JYH[ KYL[ JZM[", + "F]OFI[ PFJ[ QFK[ ULST LF[FZL NPTP F[U[WV MFPG NFOH RFPH SFPG WFZG XFZH YFZI ZFZL ULSPST TNRPSR TOQPSQ JZG[ JYH[ KYL[ JZM[ P[UZ R[UY UYWV", + "F\\OFI[ PFJ[ QFK[ ULST LF[FZL NPTP F[N[ MFPG NFOH RFPH SFPG WFZG XFZH YFZI ZFZL ULSPST TNRPSR TOQPSQ JZG[ JYH[ KYL[ JZM[", + "H^ZH[H\\F[L[JZHYGWFTFQGOIMLLOKSKVLYMZP[R[UZWXYT QHOJNLMOLSLWMY VXWWXT TFRGPJOLNOMSMXNZP[ R[TZVWWT TT\\T UTWU VTWW ZTXV [TXU", + "E_NFH[ OFI[ PFJ[ ZFT[ [FU[ \\FV[ KFSF WF_F LPXP E[M[ Q[Y[ LFOG MFNH QFOH RFOG XF[G YFZH ]F[H ^F[G IZF[ IYG[ JYK[ IZL[ UZR[ UYS[ VYW[ UZX[", + "KYTFN[ UFO[ VFP[ QFYF K[S[ RFUG SFTH WFUH XFUG OZL[ OYM[ PYQ[ OZR[", + "I\\WFRWQYO[ XFTSSVRX YFUSSXQZO[M[KZJXJVKULUMVMWLXKX KVKWLWLVKV TF\\F UFXG VFWH ZFXH [FXG", + "F]OFI[ PFJ[ QFK[ \\GMR QOU[ ROV[ SNWZ LFTF YF_F F[N[ R[Y[ MFPG NFOH RFPH SFPG ZF\\G ^F\\G JZG[ JYH[ KYL[ JZM[ UZS[ UYT[ VYX[", + "H\\QFK[ RFL[ SFM[ NFVF H[W[YU OFRG PFQH TFRH UFRG LZI[ LYJ[ MYN[ LZO[ R[WZ T[XX V[YU", + "D`MFGZ MGNYN[ NFOY OFPX [FPXN[ [FU[ \\FV[ ]FW[ JFOF [F`F D[J[ R[Z[ KFMG LFMH ^F\\H _F\\G GZE[ GZI[ VZS[ VYT[ WYX[ VZY[", + "F_OFIZ OFV[ PFVX QFWX \\GWXV[ LFQF YF_F F[L[ MFPG NFPH ZF\\G ^F\\G IZG[ IZK[", + "G]SFPGNILLKOJSJVKYLZN[Q[TZVXXUYRZNZKYHXGVFSF OIMLLOKSKWLY UXWUXRYNYJXH SFQGOJNLMOLSLXMZN[ Q[SZUWVUWRXNXIWGVF", + "F]OFI[ PFJ[ QFK[ LFXF[G\\I\\K[NYPUQMQ ZG[I[KZNXP XFYGZIZKYNWPUQ F[N[ MFPG NFOH RFPH SFPG JZG[ JYH[ KYL[ JZM[", + "G]SFPGNILLKOJSJVKYLZN[Q[TZVXXUYRZNZKYHXGVFSF OIMLLOKSKWLY UXWUXRYNYJXH SFQGOJNLMOLSLXMZN[ Q[SZUWVUWRXNXIWGVF LXMVOUPURVSXT]U^V^W] T^U_V_ SXS_T`V`W]W\\", + "F^OFI[ PFJ[ QFK[ LFWFZG[I[KZNYOVPNP YGZIZKYNXO WFXGYIYKXNVP RPTQURWXXYYYZX WYXZYZ URVZW[Y[ZXZW F[N[ MFPG NFOH RFPH SFPG JZG[ JYH[ KYL[ JZM[", + "G^ZH[H\\F[L[JZHYGVFRFOGMIMLNNPPVSWUWXVZ NLONVRWT OGNINKOMUPWRXTXWWYVZS[O[LZKYJWJUI[JYKY", + "G]TFN[ UFO[ VFP[ MFKL ]F\\L MF]F K[S[ NFKL PFLI RFMG YF\\G ZF\\H [F\\I \\F\\L OZL[ OYM[ PYQ[ OZR[", + "F_NFKQJUJXKZN[R[UZWXXU\\G OFLQKUKYLZ PFMQLULYN[ KFSF YF_F LFOG MFNH QFOH RFOG ZF\\G ^F\\G", + "H\\NFNHOYO[ OGPX PFQW [GO[ LFSF XF^F MFNH QFPH RFOG YF[G ]F[G", + "E_MFMHKYK[ NGLX OFMW UFMWK[ UFUHSYS[ VGTX WFUW ]GUWS[ JFRF UFWF ZF`F KFNG LFMH PFNI QFNG [F]G _F]G", + "G]NFT[ OFU[ PFV[ [GIZ LFSF XF^F F[L[ Q[X[ MFOH QFPH RFPG YF[G ]F[G IZG[ IZK[ TZR[ TYS[ UYW[", + "G]MFQPN[ NFRPO[ OFSPP[ \\GSP KFRF YF_F K[S[ LFNG PFOH QFNG ZF\\G ^F\\G OZL[ OYM[ PYQ[ OZR[", + "G]ZFH[ [FI[ \\FJ[ \\FNFLL H[V[XU OFLL PFMI RFNG R[VZ T[WX U[XU", + "", + "", + "", + "", + "", + "", + "H\\JFR[ KFRX LFSX JFZFR[ LGYG LHYH", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "I]NPNOOOOQMQMONNPMTMVNWOXQXXYZZ[ VOWQWXXZ TMUNVPVXWZZ[[[ VRUSPTMULWLXMZP[S[UZVX NUMWMXNZ USQTOUNWNXOZP[", + "G\\LFL[MZOZ MGMY IFNFNZ NPONQMSMVNXPYSYUXXVZS[Q[OZNX WPXRXVWX SMUNVOWRWVVYUZS[ JFLG KFLH", + "H[WQWPVPVRXRXPVNTMQMNNLPKSKULXNZQ[S[VZXX MPLRLVMX QMONNOMRMVNYOZQ[", + "H]VFV[[[ WGWZ SFXFX[ VPUNSMQMNNLPKSKULXNZQ[S[UZVX MPLRLVMX QMONNOMRMVNYOZQ[ TFVG UFVH XYY[ XZZ[", + "H[MSXSXQWOVNSMQMNNLPKSKULXNZQ[S[VZXX WRWQVO MPLRLVMX VSVPUNSM QMONNOMRMVNYOZQ[", + "KYWHWGVGVIXIXGWFTFRGQHPKP[ RHQKQZ TFSGRIR[ MMVM M[U[ PZN[ PYO[ RYS[ RZT[", + "I\\XNYOZNYMXMVNUO QMONNOMQMSNUOVQWSWUVVUWSWQVOUNSMQM OONQNSOU UUVSVQUO QMPNOPOTPVQW SWTVUTUPTNSM NUMVLXLYM[N\\Q]U]X^Y_ N[Q\\U\\X] LYMZP[U[X\\Y^Y_XaUbObLaK_K^L\\O[ ObMaL_L^M\\O[", + "G^LFL[ MGMZ IFNFN[ NQOOPNRMUMWNXOYRY[ WOXRXZ UMVNWQW[ I[Q[ T[\\[ JFLG KFLH LZJ[ LYK[ NYO[ NZP[ WZU[ WYV[ YYZ[ YZ[[", + "LXQFQHSHSFQF RFRH QGSG QMQ[ RNRZ NMSMS[ N[V[ OMQN PMQO QZO[ QYP[ SYT[ SZU[", + "KXRFRHTHTFRF SFSH RGTG RMR^QaPb SNS]R` OMTMT]S`RaPbMbLaL_N_NaMaM` PMRN QMRO", + "G]LFL[ MGMZ IFNFN[ WNNW RSY[ RTX[ QTW[ TM[M I[Q[ T[[[ JFLG KFLH UMWN ZMWN LZJ[ LYK[ NYO[ NZP[ WYU[ VYZ[", + "LXQFQ[ RGRZ NFSFS[ N[V[ OFQG PFQH QZO[ QYP[ SYT[ SZU[", + "AcFMF[ GNGZ CMHMH[ HQIOJNLMOMQNROSRS[ QORRRZ OMPNQQQ[ SQTOUNWMZM\\N]O^R^[ \\O]R]Z ZM[N\\Q\\[ C[K[ N[V[ Y[a[ DMFN EMFO FZD[ FYE[ HYI[ HZJ[ QZO[ QYP[ SYT[ SZU[ \\ZZ[ \\Y[[ ^Y_[ ^Z`[", + "G^LML[ MNMZ IMNMN[ NQOOPNRMUMWNXOYRY[ WOXRXZ UMVNWQW[ I[Q[ T[\\[ JMLN KMLO LZJ[ LYK[ NYO[ NZP[ WZU[ WYV[ YYZ[ YZ[[", + "H\\QMNNLPKSKULXNZQ[S[VZXXYUYSXPVNSMQM MPLRLVMX WXXVXRWP QMONNOMRMVNYOZQ[ S[UZVYWVWRVOUNSM", + "G\\LMLb MNMa IMNMNb NPONQMSMVNXPYSYUXXVZS[Q[OZNX WPXRXVWX SMUNVOWRWVVYUZS[ IbQb JMLN KMLO LaJb L`Kb N`Ob NaPb", + "H\\VNVb WOWa UNWNXMXb VPUNSMQMNNLPKSKULXNZQ[S[UZVX MPLRLVMX QMONNOMRMVNYOZQ[ Sb[b VaTb V`Ub X`Yb XaZb", + "IZNMN[ ONOZ KMPMP[ WOWNVNVPXPXNWMUMSNQPPS K[S[ LMNN MMNO NZL[ NYM[ PYQ[ PZR[", + "J[WOXMXQWOVNTMPMNNMOMQNSPTUUWVXY NNMQ NRPSUTWU XVWZ MONQPRUSWTXVXYWZU[Q[OZNYMWM[NY", + "KZPHPVQYRZT[V[XZYX QHQWRY PHRFRWSZT[ MMVM", + "G^LMLVMYNZP[S[UZVYWW MNMWNY IMNMNWOZP[ WMW[\\[ XNXZ TMYMY[ JMLN KMLO YYZ[ YZ[[", + "I[LMR[ MMRY NMSY XNSYR[ JMQM TMZM KMNO PMNN VMXN YMXN", + "F^JMN[ KMNX LMOX RMOXN[ RMV[ SMVX RMTMWX ZNWXV[ GMOM WM]M HMKN NMLN XMZN \\MZN", + "H\\LMV[ MMW[ NMX[ WNMZ JMQM TMZM J[P[ S[Z[ KMMN PMNN UMWN YMWN MZK[ MZO[ VZT[ WZY[", + "H[LMR[ MMRY NMSY XNSYP_NaLbJbIaI_K_KaJaJ` JMQM TMZM KMNO PMNN VMXN YMXN", + "I[VML[ WMM[ XMN[ XMLMLQ L[X[XW MMLQ NMLP OMLO QMLN S[XZ U[XY V[XX W[XW", + "G^[MZQYTWXUZR[P[MZKXJUJSKPMNPMRMUNVOWQYXZZ[[\\[ ZMYQXTWVUYTZR[ LXKVKRLP P[NZMYLVLRMONNPM RMTNUOVQXXYZ[[", + "G\\QFNGMHLJKNKb NHMJLNLa QFOGNIMNMb QFSFVGWHXJXLWNVOSP PPTPWQXRYTYWXYWZT[Q[OZNYMW VHWJWLVN WRXTXWWY SFUGVIVMUOSP TPVQWSWXVZT[ KbMb", + "F\\HRINKMMMONPOQRRYSb IOKNMNOOPP HRIPKOMOOPPQQTRYRa XMWPVRTUSWR[Qb YMWQ ZMYOWRTVSXR[ XMZM QbSb", + "H\\SMQMNNLPKSKULXNZQ[S[VZXXYUYSXPVNSMPLNKMJMHNGPFSFWH MPLSLUMX WXXUXSWP QMONNOMRMVNYOZQ[ S[UZVYWVWRVOUNOKNJNIOHQGTGWH", + "I[SMUNVOWOVNSMQMMNLOLQMRQS SSQSMTKVKXMZP[S[VZXXWXVZ NNMOMQNR MULVLXMY QMONNONQORQS QSNTMVMXNZP[", + "I[QHRGRFQFPGPIQJTKXKYKYJXJUKSLPNNPMRLULWMYNZP[S\\U]V_VaUbSbRaR`S`Sa POOPNRMUMWNYOZ UKRMQNOQNTNWOYQ[S\\", + "G]JMKNLPL[ KMLNMPMZ HPINJMLMMNNPN[ UMVNWQWb WOXRXa NQOOPNRMUMWNXOYRYb L[N[ WbYb", + "F]IMJNKPKTLWMYNZQ[S[VZWYXWYRYOXJVGTFRFPGOIOKPMSOVP[Q JMKNLPLTMWNY VYWWXRXOWJVHTG GPHNIMKMLNMPMTNXOZQ[ S[UZVXWSWNVJUHSGQGOI", + "KZNMONPPPXQZS[U[WZXX OMPNQPQXRZ LPMNNMPMQNRPRXSZT[", + "G]JMKNLPL[ KMLNMPMZ HPINJMLMMNNPN[ SOUNWNXOXPZPZNXMVMTNQQOTNW XNYOYP PSQSWYYYZX TWWZYZ RTUZV[X[YZZX L[N[", + "H\\JGKFMFOGQIXXYZZ[ OHPIWXXY MFNGOIVXXZZ[[[ RMJZJ[K[RM", + "G]KMKb LNLa MMMb VMVXWZX[Z[[Z\\X WNWXXZY[ XMXXYZZ[ MXNZP[R[TZUYVW KMMM VMXM KbMb", + "G]JMKNLPMTN[ KMLNMPNTOZ HPINJMLMMNNPOTPZ VVWTXQXMYMZNYQXSVVTXQZN[ XRYOYM", + "JZPGSFRFPGOHOIPJSKVLWKVJSKPLNMMOMQNRPSSTVUWTVSSTOUMVLXLZM[O\\S]U^V_VaTbRbOaPaRb OMNONQOR NVMXMZN[ VKSKQLPMOOOQQSST VTSTPUOVNXNZP\\S]", + "H\\QMNNLPKSKULXNZQ[S[VZXXYUYSXPVNSMQM MPLRLVMX WXXVXRWP QMONNOMRMVNYOZQ[ S[UZVYWVWRVOUNSM", + "G]IQJOKNMM[M KOMNZN IQJPLO[O OONZM[LZMWOO UOVZW[XZWWUO [M[O OOMZ UOWZ", + "G\\QMNNLPKTKb MPLTLa QMONNOMSMb MWNYOZQ[S[VZXXYUYSXPVNSMQM WXXVXRWP S[UZVYWVWRVOUNSM KbMb", + "G]PMMNKPJSJUKXMZP[R[UZWXXUXSWPUNRM LPKRKVLX VXWVWRVP PMNNMOLRLVMYNZP[ R[TZUYVVVRUOTNRM RMZO[N[MPM RMZN", + "H\\JQKOLNNMZM LONNYN JQKPMOZO ROQZR[SZRO ZMZO RORZ", + "G\\JMKNLPLUMXOZQ[S[UZWXXVYRYNXMWMXPXSWWUZ KMLNMPMUNX WMXNXO HPINJMLMMNNPNVOYQ[", + "G]RQQNPMNMLNKOJRJUKXMZP[T[WZYXZUZRYOXNVMTMSNRQ LOKRKULX XXYUYRXO NMMNLQLVMYNZP[ T[VZWYXVXQWNVM RQQb RQRa RQSb QbSb", + "H\\LMMNNPT_VaXbZb[a NOOPU_V` INJMLMNNPPV_WaXb VSXPYMZMYOVSN\\K`JbKbL_N\\", + "F]HNINJPJUKXMZP[T[VZXXYVZRZNYMXMYPYSXWVZ JNKPKULX XMYNYO GPHNIMJMKNLPLVMYNZP[ QFSb RGRa SFQb QFSF QbSb", + "F^NMLNJPISIWJYKZM[O[QZRYSWSTRSQTQWRYSZU[W[YZZY[W[SZPXNVM KPJSJWKY RTRX YYZWZSYP NMLOKRKWLZM[ W[XZYWYRXOVM", + "G]WMUTUXVZW[Y[[Y\\W XMVTVZ WMYMWTVX UTUQTNRMPMMNKQJTJVKYLZN[P[RZSYTWUT NNLQKTKWLY PMNOMQLTLWMZN[", + "I\\PFNMMSMWNYOZQ[S[VZXWYTYRXOWNUMSMQNPOOQNT QFOMNQNWOZ VYWWXTXQWO MFRFPMNT S[UYVWWTWQVNUM NFQG OFPH", + "I[WQWPVPVRXRXPWNUMRMONMQLTLVMYNZP[R[UZWW OONQMTMWNY RMPOOQNTNWOZP[", + "G]YFVQUUUXVZW[Y[[Y\\W ZFWQVUVZ VF[FWTVX UTUQTNRMPMMNKQJTJVKYLZN[P[RZSYTWUT MOLQKTKWLY PMNOMQLTLWMZN[ WFZG XFYH", + "I[MVQUTTWRXPWNUMRMONMQLTLVMYNZP[R[UZWX OONQMTMWNY RMPOOQNTNWOZP[", + "JZZHZGYGYI[I[GZFXFVGTISKRNQRO[N^M`Kb TJSMRRP[O^ XFVHUJTMSRQZP]O_MaKbIbHaH_J_JaIaI` NMYM", + "H]XMT[S^QaOb YMU[S_ XMZMV[T_RaObLbJaI`I^K^K`J`J_ VTVQUNSMQMNNLQKTKVLYMZO[Q[SZTYUWVT NOMQLTLWMY QMOONQMTMWNZO[", + "G]OFI[K[ PFJ[ LFQFK[ MTOPQNSMUMWNXPXSVX WNWRVVVZ WPUUUXVZW[Y[[Y\\W MFPG NFOH", + "KXTFTHVHVFTF UFUH TGVG LQMOOMQMRNSPSSQX RNRRQVQZ RPPUPXQZR[T[VYWW", + "KXUFUHWHWFUF VFVH UGWG MQNOPMRMSNTPTSRZQ]P_NaLbJbIaI_K_KaJaJ` SNSSQZP]O_ SPRTP[O^N`Lb", + "G]OFI[K[ PFJ[ LFQFK[ YOYNXNXPZPZNYMWMUNQROS MSOSQTRUTYUZWZ QUSYTZ OSPTRZS[U[WZYW MFPG NFOH", + "LXTFQQPUPXQZR[T[VYWW UFRQQUQZ QFVFRTQX RFUG SFTH", + "@cAQBODMFMGNHPHSF[ GNGSE[ GPFTD[F[ HSJPLNNMPMRNSPSSQ[ RNRSP[ RPQTO[Q[ SSUPWNYM[M]N^P^S\\X ]N]R\\V\\Z ]P[U[X\\Z][_[aYbW", + "F^GQHOJMLMMNNPNSL[ MNMSK[ MPLTJ[L[ NSPPRNTMVMXNYPYSWX XNXRWVWZ XPVUVXWZX[Z[\\Y]W", + "H\\QMNNLQKTKVLYMZP[S[VZXWYTYRXOWNTMQM NOMQLTLWMY VYWWXTXQWO QMOONQMTMWNZP[ S[UYVWWTWQVNTM", + "G]HQIOKMMMNNOPOSNWKb NNNSMWJb NPMTIb OTPQQORNTMVMXNYOZRZTYWWZT[R[PZOWOT XOYQYTXWWY VMWNXQXTWWVYT[ FbNb JaGb J`Hb K`Lb JaMb", + "G\\WMQb XMRb WMYMSb UTUQTNRMPMMNKQJTJVKYLZN[P[RZSYTWUT MOLQKTKWLY PMNOMQLTLWMZN[ NbVb RaOb R`Pb S`Tb RaUb", + "I[JQKOMMOMPNQPQTO[ PNPTN[ PPOTM[O[ YOYNXNXPZPZNYMWMUNSPQT", + "J[XPXOWOWQYQYOXNUMRMONNONQOSQTTUVVWX ONNQ ORQSTTVU WVVZ NOOQQRTSVTWVWXVZS[P[MZLYLWNWNYMYMX", + "KYTFQQPUPXQZR[T[VYWW UFRQQUQZ TFVFRTQX NMXM", + "F^GQHOJMLMMNNPNSLX MNMRLVLZ MPKUKXLZN[P[RZTXVU XMVUVXWZX[Z[\\Y]W YMWUWZ XMZMXTWX", + "H\\IQJOLMNMONPPPSNX ONORNVNZ OPMUMXNZP[R[TZVXXUYQYMXMXNYP", + "CaDQEOGMIMJNKPKSIX JNJRIVIZ JPHUHXIZK[M[OZQXRU TMRURXSZU[W[YZ[X]U^Q^M]M]N^P UMSUSZ TMVMTTSX", + "G]JQLNNMPMRNSPSR PMQNQRPVOXMZK[I[HZHXJXJZIZIY RORRQVQY ZOZNYNYP[P[NZMXMVNTPSRRVRZS[ PVPXQZS[U[WZYW", + "G]HQIOKMMMNNOPOSMX NNNRMVMZ NPLULXMZO[Q[SZUXWT YMU[T^RaPb ZMV[T_ YM[MW[U_SaPbMbKaJ`J^L^L`K`K_", + "H\\YMXOVQNWLYK[ XOOOMPLR VORNONNO VORMOMMOLR LYUYWXXV NYRZUZVY NYR[U[WYXV", + "", + "", + "", + "", + "", + "", + "H\\WQVOUNSMQMNNLPKSKULXNZQ[S[VZWYXWYSYNXJWHVGSFQFNGMHNHOGQF MPLRLVMX VYWWXSXNWJVH QMONNOMRMVNYOZQ[ S[UZVXWTWMVIUGSF", + "I[UMWNXOYOXNUMRMONMPLSLUMXOZR[U[XZYYXYWZU[ NPMSMUNX RMPNOONRNVOYPZR[ NTTUUTTSNT NTTT", + "H\\QFNGLJKOKRLWNZQ[S[VZXWYRYOXJVGSFQF NHMJLNLSMWNY VYWWXSXNWJVH QFOGNIMNMSNXOZQ[ S[UZVXWSWNVIUGSF LPXQ LQXP", + "G]PMMNKPJSJUKXMZP[T[WZYXZUZSYPWNTMPM LPKSKULX XXYUYSXP PMNNMOLRLVMYNZP[T[VZWYXVXRWOVNTM QFSb RGRa SFQb QFSF QbSb", + "H\\TMVNXPYPYOWNTMPMMNLOKQKSLUNWPXRYSZT\\T^S_Q_O^P^Q_ MOLQLSMUOW PMNNMPMSNURY YPXO", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "NV", + "JZ", + "H\\QFNGLJKOKRLWNZQ[S[VZXWYRYOXJVGSFQF NHMJLNLSMWNY VYWWXSXNWJVH QFOGNIMNMSNXOZQ[ S[UZVXWSWNVIUGSF", + "H\\QHQ[ RHRZ SFS[ SFPINJ M[W[ QZO[ QYP[ SYT[ SZU[", + "H\\LJLKMKMJLJ LIMINJNKMLLLKKKJLHMGPFTFWGXHYJYLXNUPPRNSLUKXK[ WHXJXLWN TFVGWJWLVNTPPR KYLXNXSYWYYX NXSZWZXY NXS[W[XZYXYV", + "H\\LJLKMKMJLJ LIMINJNKMLLLKKKJLHMGPFTFWGXIXLWNTO VGWIWLVN SFUGVIVLUNSO QOTOVPXRYTYWXYWZT[P[MZLYKWKVLUMUNVNWMXLX WRXTXWWY SOUPVQWTWWVZT[ LVLWMWMVLV", + "H\\SIS[ THTZ UFU[ UFJUZU P[X[ SZQ[ SYR[ UYV[ UZW[", + "H\\MFKPMNPMSMVNXPYSYUXXVZS[P[MZLYKWKVLUMUNVNWMXLX WPXRXVWX SMUNVOWRWVVYUZS[ LVLWMWMVLV MFWF MGUG MHQHUGWF", + "H\\VIVJWJWIVI WHVHUIUJVKWKXJXIWGUFRFOGMILKKOKULXNZQ[S[VZXXYUYTXQVOSNQNOONPMR NIMKLOLUMXNY WXXVXSWQ RFPGOHNJMNMUNXOZQ[ S[UZVYWVWSVPUOSN", + "H\\KFKL YFYIXLTQSSRWR[ SRRTQWQ[ XLSQQTPWP[R[ KJLHNFPFUIWIXHYF MHNGPGRH KJLINHPHUI", + "H\\PFMGLILLMNPOTOWNXLXIWGTFPF NGMIMLNN VNWLWIVG PFOGNINLONPO TOUNVLVIUGTF POMPLQKSKWLYMZP[T[WZXYYWYSXQWPTO MQLSLWMY WYXWXSWQ PONPMSMWNZP[ T[VZWWWSVPTO", + "H\\MWMXNXNWMW WOVQURSSQSNRLPKMKLLINGQFSFVGXIYLYRXVWXUZR[O[MZLXLWMVNVOWOXNYMY MPLNLKMI VHWIXLXRWVVX QSORNQMNMKNHOGQF SFUGVIWLWSVWUYTZR[", + "MXRXQYQZR[S[TZTYSXRX RYRZSZSYRY", + "MXTZS[R[QZQYRXSXTYT\\S^Q_ RYRZSZSYRY S[T\\ TZS^", + "MXRMQNQORPSPTOTNSMRM RNROSOSNRN RXQYQZR[S[TZTYSXRX RYRZSZSYRY", + "MXRMQNQORPSPTOTNSMRM RNROSOSNRN TZS[R[QZQYRXSXTYT\\S^Q_ RYRZSZSYRY S[T\\ TZS^", + "MXRFQGQIRQ RFRTST RFSFST SFTGTISQ RXQYQZR[S[TZTYSXRX RYRZSZSYRY", + "I\\MKMJNJNLLLLJMHNGPFTFWGXHYJYLXNWOSQ WHXIXMWN TFVGWIWMVOUP RQRTSTSQRQ RXQYQZR[S[TZTYSXRX RYRZSZSYRY", + "MXTFRGQIQLRMSMTLTKSJRJQK RKRLSLSKRK RGQK QIRJ", + "MXTHSIRIQHQGRFSFTGTJSLQM RGRHSHSGRG SITJ THSL", + "E_[O[NZNZP\\P\\N[MZMYNXPVUTXRZP[L[JZIXIUJSPORMSKSIRGPFNGMIMLNOPRTWWZY[[[\\Y\\X KZJXJUKSLR RMSI SKRG NGMK NNPQTVWYYZ N[LZKXKULSPO MINMQQUVXYZZ[Z\\Y", + "H\\PBP_ TBT_ XKXJWJWLYLYJXHWGTFPFMGKIKLLNOPURWSXUXXWZ LLMNOOUQWRXT MGLILKMMONUPXRYTYWXYWZT[P[MZLYKWKUMUMWLWLV", + "G^[BIbJb [B\\BJb", + "KYUBSDQGOKNPNTOYQ]S`Ub QHPKOOOUPYQ\\ SDRFQIPOPUQ[R^S`", + "KYOBQDSGUKVPVTUYS]Q`Ob SHTKUOUUTYS\\ QDRFSITOTUS[R^Q`", + "JZRFQGSQRR RFRR RFSGQQRR MINIVOWO MIWO MIMJWNWO WIVINOMO WIMO WIWJMNMO", + "F_JQ[Q[R JQJR[R", + "F_RIRZSZ RISISZ JQ[Q[R JQJR[R", + "F_JM[M[N JMJN[N JU[U[V JUJV[V", + "NWSFRGRM SGRM SFTGRM", + "I[NFMGMM NGMM NFOGMM WFVGVM WGVM WFXGVM", + "KYQFOGNINKOMQNSNUMVKVIUGSFQF QFNIOMSNVKUGQF SFOGNKQNUMVISF", + "F^ZIJRZ[ ZIZJLRZZZ[", + "F^JIZRJ[ JIJJXRJZJ[", + "G^OFObPb OFPFPb UFUbVb UFVFVb JP[P[Q JPJQ[Q JW[W[X JWJX[X", + "F^[FYGVHSHPGNFLFJGIIIKKMMMOLPJPHNF [FH[ [FI[ [FJ[ YTWTUUTWTYV[X[ZZ[X[VYT OGLFIIJLMMPJOG NFJGIK KMOLPH ZUWTTWUZX[[XZU YTUUTY V[ZZ[V H[J[", + "E`VNULSKQKOLNMMOMRNTOUQVSVUUVS OMNONROT QKPLOOORPUQV VKVSWUYVZV\\U]R]O\\L[JYHWGTFQFNGLHJJILHOHRIUJWLYNZQ[T[WZYYXYWZ WLWSXU VKXKXSYUZV", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "NV", + "JZ", + "H]TFQGOIMLLOKSKVLYMZO[Q[TZVXXUYRZNZKYHXGVFTF QHOJNLMOLSLWMY TYVWWUXRYNYJXH TFRGPJOLNOMSMXNZO[ Q[SZUWVUWRXNXIWGVF", + "H]TJO[Q[ WFUJP[ WFQ[ WFTIQKOL TJRKOL", + "H]OKOJPJPLNLNJOHPGSFVFYGZIZKYMWOMUKWI[ XGYIYKXMVOSQ VFWGXIXKWMUOMU JYKXMXRYWYXX MXRZWZ MXR[U[WZXXXW", + "H]OKOJPJPLNLNJOHPGSFVFYGZIZKYMXNVOSP XGYIYKXMWN VFWGXIXKWMUOSP QPSPVQWRXTXWWYUZR[O[LZKYJWJULULWKWKV VRWTWWVY SPUQVSVWUYTZR[", + "H]WJR[T[ ZFXJS[ ZFT[ ZFJUZU", + "H]QFLP QF[F QGYG PHUHYG[F LPMOPNSNVOWPXRXUWXUZQ[N[LZKYJWJULULWKWKV VPWRWUVXTZ SNUOVQVUUXSZQ[", + "H]YJYIXIXKZKZIYGWFTFQGOIMLLOKSKVLYMZO[R[UZWXXVXSWQVPTOQOOPNQMS PINLMOLSLWMY VXWVWSVQ TFRGPJOLNOMSMXNZO[ R[TZUYVVVRUPTO", + "H]NFLL [FZIXLTQRTQWP[ RSPWO[ XLRRPUOWN[P[ MIPFRFWI OGRGWI MIOHRHWIYIZH[F", + "H]SFPGOHNJNMOOQPTPWOYNZLZIYGWFSF UFPG PHOJONPO OORP SPWO XNYLYIXG YGUF SFQHPJPNQP TPVOWNXLXHWF QPMQKSJUJXKZN[R[VZWYXWXTWRVQTP RPMQ NQLSKUKXLZ KZP[VZ VYWWWTVR VQSP QPOQMSLULXMZN[ R[TZUYVWVSUQTP", + "H]XNWPVQTRQROQNPMNMKNIPGSFVFXGYHZKZNYRXUVXTZQ[N[LZKXKVMVMXLXLW OPNNNKOI XHYJYNXRWUUX QRPQOOOKPHQGSF VFWGXIXNWRVUUWSZQ[", + "MXPXOYOZP[Q[RZRYQXPX PYPZQZQYPY", + "MXQ[P[OZOYPXQXRYR[Q]P^N_ PYPZQZQYPY Q[Q\\P^", + "MXSMRNROSPTPUOUNTMSM SNSOTOTNSN PXOYOZP[Q[RZRYQXPX PYPZQZQYPY", + "MXSMRNROSPTPUOUNTMSM SNSOTOTNSN Q[P[OZOYPXQXRYR[Q]P^N_ PYPZQZQYPY Q[Q\\P^", + "MXVFUFTGRT VGUGRT VGVHRT VFWGWHRT PXOYOZP[Q[RZRYQXPX PYPZQZQYPY", + "H]OKOJPJPLNLNJOHPGSFWFZG[I[KZMYNWOSPQQQSSTTT UFZG YGZIZKYMXNVO WFXGYIYKXMWNSPRQRSST PXOYOZP[Q[RZRYQXPX PYPZQZQYPY", + "MXWFUGTHSJSLTMUMVLVKUJTJ UGTITJ TKTLULUKTK", + "MXVIUITHTGUFVFWGWIVKULSM UGUHVHVGUG VIVJUL", + "E_\\O\\N[N[P]P]N\\M[MYNWPRXPZN[K[HZGXGVHTISKRPPROTMUKUITGRFPGOIOLPRQURWTZV[X[YYYX L[HZ IZHXHVITJSLR PPQSTYVZ K[JZIXIVJTKSMRRO OLPOQRSVUYWZXZYY", + "H]TBL_ YBQ_ ZKZJYJYL[L[JZHYGVFRFOGMIMLNNPPVSWUWXVZ NLONVRWT OGNINKOMUPWRXTXWWYVZS[O[LZKYJWJULULWKWKV", + "G^_BEbFb _B`BFb", + "JZZBXCUERHPKNOMSMXN\\O_Qb SHQKOONTN\\ ZBWDTGRJQLPOOSN\\ NTO]P`Qb", + "JZSBUEVHWLWQVUTYR\\O_LaJb VHVPUUSYQ\\ SBTDUGVP VHUQTUSXRZP]M`Jb", + "J[TFSGUQTR TFTR TFUGSQTR OIPIXOYO OIYO OIOJYNYO YIXIPOOO YIOO YIYJONOO", + "F_JQ[Q[R JQJR[R", + "F_RIRZSZ RISISZ JQ[Q[R JQJR[R", + "F_JM[M[N JMJN[N JU[U[V JUJV[V", + "MWUFTGRM UGRM UFVGRM", + "H\\PFOGMM PGMM PFQGMM ZFYGWM ZGWM ZF[GWM", + "KZSFQGPIPKQMSNUNWMXKXIWGUFSF SFPIQMUNXKWGSF UFQGPKSNWMXIUF", + "F^ZIJRZ[ ZIZJLRZZZ[", + "F^JIZRJ[ JIJJXRJZJ[", + "G^SFKbLb SFTFLb YFQbRb YFZFRb KP\\P\\Q KPKQ\\Q IWZWZX IWIXZX", + "E^^F\\GXHUHQGOFMFKGJIJKLMNMPLQJQHOF ^FE[ ^FF[ ^FG[ XTVTTUSWSYU[W[YZZXZVXT PGMFJIKLNMQJPG OFKGJK LMPLQH YUVTSWTZW[ZXYU XTTUSY U[YZZV E[G[", + "E`UQUNTLRKPKNLMMLPLSMUOVQVSUTTUQ OLNMMPMSNU RKPLOMNPNSOUPV VKUQUSVUXVZV\\U]R]O\\L[JYHWGTFQFNGLHJJILHOHRIUJWLYNZQ[T[WZYYXYWZ WKVQVSWU VKXKWQWSXUZV", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + 0 }; + +} + +/* End of file. */ diff --git a/opencv/imgproc/_featuretree.h b/opencv/imgproc/_featuretree.h new file mode 100644 index 0000000..76a7b4f --- /dev/null +++ b/opencv/imgproc/_featuretree.h @@ -0,0 +1,63 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2009, Xavier Delacour, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +// 2009-01-09, Xavier Delacour + +#ifndef __opencv_featuretree_h__ +#define __opencv_featuretree_h__ + +struct CvFeatureTree { + CvFeatureTree(const CvFeatureTree& x); + CvFeatureTree& operator= (const CvFeatureTree& rhs); + + CvFeatureTree() {} + virtual ~CvFeatureTree() {} + virtual void FindFeatures(const CvMat* d, int k, int emax, CvMat* results, CvMat* dist) = 0; + virtual int FindOrthoRange(CvMat* /*bounds_min*/, CvMat* /*bounds_max*/,CvMat* /*results*/) { + return 0; + } +}; + +#endif // __cv_featuretree_h__ + +// Local Variables: +// mode:C++ +// End: diff --git a/opencv/imgproc/_geom.h b/opencv/imgproc/_geom.h new file mode 100644 index 0000000..7da6079 --- /dev/null +++ b/opencv/imgproc/_geom.h @@ -0,0 +1,72 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef _CV_GEOM_H_ +#define _CV_GEOM_H_ + +/* Finds distance between two points */ +CV_INLINE float icvDistanceL2_32f( CvPoint2D32f pt1, CvPoint2D32f pt2 ) +{ + float dx = pt2.x - pt1.x; + float dy = pt2.y - pt1.y; + + return std::sqrt( dx*dx + dy*dy ); +} + + +int icvIntersectLines( double x1, double dx1, double y1, double dy1, + double x2, double dx2, double y2, double dy2, + double* t2 ); + + +void icvCreateCenterNormalLine( CvSubdiv2DEdge edge, double* a, double* b, double* c ); + +void icvIntersectLines3( double* a0, double* b0, double* c0, + double* a1, double* b1, double* c1, + CvPoint2D32f* point ); + + +/* curvature: 0 - 1-curvature, 1 - k-cosine curvature. */ +CvSeq* icvApproximateChainTC89( CvChain* chain, int header_size, CvMemStorage* storage, int method ); + +#endif /*_IPCVGEOM_H_*/ + +/* End of file. */ diff --git a/opencv/imgproc/_imgproc.h b/opencv/imgproc/_imgproc.h new file mode 100644 index 0000000..9800a4e --- /dev/null +++ b/opencv/imgproc/_imgproc.h @@ -0,0 +1,47 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef _CV_IMG_PROC_H_ +#define _CV_IMG_PROC_H_ + + + +#endif /*_CV_INTERNAL_H_*/ diff --git a/opencv/imgproc/_kdtree.hpp b/opencv/imgproc/_kdtree.hpp new file mode 100644 index 0000000..b46c995 --- /dev/null +++ b/opencv/imgproc/_kdtree.hpp @@ -0,0 +1,467 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2008, Xavier Delacour, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +// 2008-05-13, Xavier Delacour + +#ifndef __cv_kdtree_h__ +#define __cv_kdtree_h__ + +#include "precomp.hpp" + +#include +#include +#include +#include +#include "assert.h" +#include "math.h" + +#if _MSC_VER >= 1400 +#pragma warning(disable: 4512) // suppress "assignment operator could not be generated" +#endif + +// J.S. Beis and D.G. Lowe. Shape indexing using approximate nearest-neighbor search +// in highdimensional spaces. In Proc. IEEE Conf. Comp. Vision Patt. Recog., +// pages 1000--1006, 1997. http://citeseer.ist.psu.edu/beis97shape.html +#undef __deref +#undef __valuetype + +template < class __valuetype, class __deref > +class CvKDTree { +public: + typedef __deref deref_type; + typedef typename __deref::scalar_type scalar_type; + typedef typename __deref::accum_type accum_type; + +private: + struct node { + int dim; // split dimension; >=0 for nodes, -1 for leaves + __valuetype value; // if leaf, value of leaf + int left, right; // node indices of left and right branches + scalar_type boundary; // left if deref(value,dim)<=boundary, otherwise right + }; + typedef std::vector < node > node_array; + + __deref deref; // requires operator() (__valuetype lhs,int dim) + + node_array nodes; // node storage + int point_dim; // dimension of points (the k in kd-tree) + int root_node; // index of root node, -1 if empty tree + + // for given set of point indices, compute dimension of highest variance + template < class __instype, class __valuector > + int dimension_of_highest_variance(__instype * first, __instype * last, + __valuector ctor) { + assert(last - first > 0); + + accum_type maxvar = -std::numeric_limits < accum_type >::max(); + int maxj = -1; + for (int j = 0; j < point_dim; ++j) { + accum_type mean = 0; + for (__instype * k = first; k < last; ++k) + mean += deref(ctor(*k), j); + mean /= last - first; + accum_type var = 0; + for (__instype * k = first; k < last; ++k) { + accum_type diff = accum_type(deref(ctor(*k), j)) - mean; + var += diff * diff; + } + var /= last - first; + + assert(maxj != -1 || var >= maxvar); + + if (var >= maxvar) { + maxvar = var; + maxj = j; + } + } + + return maxj; + } + + // given point indices and dimension, find index of median; (almost) modifies [first,last) + // such that points_in[first,median]<=point[median], points_in(median,last)>point[median]. + // implemented as partial quicksort; expected linear perf. + template < class __instype, class __valuector > + __instype * median_partition(__instype * first, __instype * last, + int dim, __valuector ctor) { + assert(last - first > 0); + __instype *k = first + (last - first) / 2; + median_partition(first, last, k, dim, ctor); + return k; + } + + template < class __instype, class __valuector > + struct median_pr { + const __instype & pivot; + int dim; + __deref deref; + __valuector ctor; + median_pr(const __instype & _pivot, int _dim, __deref _deref, __valuector _ctor) + : pivot(_pivot), dim(_dim), deref(_deref), ctor(_ctor) { + } + bool operator() (const __instype & lhs) const { + return deref(ctor(lhs), dim) <= deref(ctor(pivot), dim); + } + }; + + template < class __instype, class __valuector > + void median_partition(__instype * first, __instype * last, + __instype * k, int dim, __valuector ctor) { + int pivot = (int)((last - first) / 2); + + std::swap(first[pivot], last[-1]); + __instype *middle = std::partition(first, last - 1, + median_pr < __instype, __valuector > + (last[-1], dim, deref, ctor)); + std::swap(*middle, last[-1]); + + if (middle < k) + median_partition(middle + 1, last, k, dim, ctor); + else if (middle > k) + median_partition(first, middle, k, dim, ctor); + } + + // insert given points into the tree; return created node + template < class __instype, class __valuector > + int insert(__instype * first, __instype * last, __valuector ctor) { + if (first == last) + return -1; + else { + + int dim = dimension_of_highest_variance(first, last, ctor); + __instype *median = median_partition(first, last, dim, ctor); + + __instype *split = median; + for (; split != last && deref(ctor(*split), dim) == + deref(ctor(*median), dim); ++split); + + if (split == last) { // leaf + int nexti = -1; + for (--split; split >= first; --split) { + int i = (int)nodes.size(); + node & n = *nodes.insert(nodes.end(), node()); + n.dim = -1; + n.value = ctor(*split); + n.left = -1; + n.right = nexti; + nexti = i; + } + + return nexti; + } else { // node + int i = (int)nodes.size(); + // note that recursive insert may invalidate this ref + node & n = *nodes.insert(nodes.end(), node()); + + n.dim = dim; + n.boundary = deref(ctor(*median), dim); + + int left = insert(first, split, ctor); + nodes[i].left = left; + int right = insert(split, last, ctor); + nodes[i].right = right; + + return i; + } + } + } + + // run to leaf; linear search for p; + // if found, remove paths to empty leaves on unwind + bool remove(int *i, const __valuetype & p) { + if (*i == -1) + return false; + node & n = nodes[*i]; + bool r; + + if (n.dim >= 0) { // node + if (deref(p, n.dim) <= n.boundary) // left + r = remove(&n.left, p); + else // right + r = remove(&n.right, p); + + // if terminal, remove this node + if (n.left == -1 && n.right == -1) + *i = -1; + + return r; + } else { // leaf + if (n.value == p) { + *i = n.right; + return true; + } else + return remove(&n.right, p); + } + } + +public: + struct identity_ctor { + const __valuetype & operator() (const __valuetype & rhs) const { + return rhs; + } + }; + + // initialize an empty tree + CvKDTree(__deref _deref = __deref()) + : deref(_deref), root_node(-1) { + } + // given points, initialize a balanced tree + CvKDTree(__valuetype * first, __valuetype * last, int _point_dim, + __deref _deref = __deref()) + : deref(_deref) { + set_data(first, last, _point_dim, identity_ctor()); + } + // given points, initialize a balanced tree + template < class __instype, class __valuector > + CvKDTree(__instype * first, __instype * last, int _point_dim, + __valuector ctor, __deref _deref = __deref()) + : deref(_deref) { + set_data(first, last, _point_dim, ctor); + } + + void set_deref(__deref _deref) { + deref = _deref; + } + + void set_data(__valuetype * first, __valuetype * last, int _point_dim) { + set_data(first, last, _point_dim, identity_ctor()); + } + template < class __instype, class __valuector > + void set_data(__instype * first, __instype * last, int _point_dim, + __valuector ctor) { + point_dim = _point_dim; + nodes.clear(); + nodes.reserve(last - first); + root_node = insert(first, last, ctor); + } + + int dims() const { + return point_dim; + } + + // remove the given point + bool remove(const __valuetype & p) { + return remove(&root_node, p); + } + + void print() const { + print(root_node); + } + void print(int i, int indent = 0) const { + if (i == -1) + return; + for (int j = 0; j < indent; ++j) + std::cout << " "; + const node & n = nodes[i]; + if (n.dim >= 0) { + std::cout << "node " << i << ", left " << nodes[i].left << ", right " << + nodes[i].right << ", dim " << nodes[i].dim << ", boundary " << + nodes[i].boundary << std::endl; + print(n.left, indent + 3); + print(n.right, indent + 3); + } else + std::cout << "leaf " << i << ", value = " << nodes[i].value << std::endl; + } + + //////////////////////////////////////////////////////////////////////////////////////// + // bbf search +public: + struct bbf_nn { // info on found neighbors (approx k nearest) + const __valuetype *p; // nearest neighbor + accum_type dist; // distance from d to query point + bbf_nn(const __valuetype & _p, accum_type _dist) + : p(&_p), dist(_dist) { + } + bool operator<(const bbf_nn & rhs) const { + return dist < rhs.dist; + } + }; + typedef std::vector < bbf_nn > bbf_nn_pqueue; +private: + struct bbf_node { // info on branches not taken + int node; // corresponding node + accum_type dist; // minimum distance from bounds to query point + bbf_node(int _node, accum_type _dist) + : node(_node), dist(_dist) { + } + bool operator<(const bbf_node & rhs) const { + return dist > rhs.dist; + } + }; + typedef std::vector < bbf_node > bbf_pqueue; + mutable bbf_pqueue tmp_pq; + + // called for branches not taken, as bbf walks to leaf; + // construct bbf_node given minimum distance to bounds of alternate branch + void pq_alternate(int alt_n, bbf_pqueue & pq, scalar_type dist) const { + if (alt_n == -1) + return; + + // add bbf_node for alternate branch in priority queue + pq.push_back(bbf_node(alt_n, dist)); + std::push_heap(pq.begin(), pq.end()); + } + + // called by bbf to walk to leaf; + // takes one step down the tree towards query point d + template < class __desctype > + int bbf_branch(int i, const __desctype * d, bbf_pqueue & pq) const { + const node & n = nodes[i]; + // push bbf_node with bounds of alternate branch, then branch + if (d[n.dim] <= n.boundary) { // left + pq_alternate(n.right, pq, n.boundary - d[n.dim]); + return n.left; + } else { // right + pq_alternate(n.left, pq, d[n.dim] - n.boundary); + return n.right; + } + } + + // compute euclidean distance between two points + template < class __desctype > + accum_type distance(const __desctype * d, const __valuetype & p) const { + accum_type dist = 0; + for (int j = 0; j < point_dim; ++j) { + accum_type diff = accum_type(d[j]) - accum_type(deref(p, j)); + dist += diff * diff; + } return (accum_type) sqrt(dist); + } + + // called per candidate nearest neighbor; constructs new bbf_nn for + // candidate and adds it to priority queue of all candidates; if + // queue len exceeds k, drops the point furthest from query point d. + template < class __desctype > + void bbf_new_nn(bbf_nn_pqueue & nn_pq, int k, + const __desctype * d, const __valuetype & p) const { + bbf_nn nn(p, distance(d, p)); + if ((int) nn_pq.size() < k) { + nn_pq.push_back(nn); + std::push_heap(nn_pq.begin(), nn_pq.end()); + } else if (nn_pq[0].dist > nn.dist) { + std::pop_heap(nn_pq.begin(), nn_pq.end()); + nn_pq.end()[-1] = nn; + std::push_heap(nn_pq.begin(), nn_pq.end()); + } + assert(nn_pq.size() < 2 || nn_pq[0].dist >= nn_pq[1].dist); + } + +public: + // finds (with high probability) the k nearest neighbors of d, + // searching at most emax leaves/bins. + // ret_nn_pq is an array containing the (at most) k nearest neighbors + // (see bbf_nn structure def above). + template < class __desctype > + int find_nn_bbf(const __desctype * d, + int k, int emax, + bbf_nn_pqueue & ret_nn_pq) const { + assert(k > 0); + ret_nn_pq.clear(); + + if (root_node == -1) + return 0; + + // add root_node to bbf_node priority queue; + // iterate while queue non-empty and emax>0 + tmp_pq.clear(); + tmp_pq.push_back(bbf_node(root_node, 0)); + while (tmp_pq.size() && emax > 0) { + + // from node nearest query point d, run to leaf + std::pop_heap(tmp_pq.begin(), tmp_pq.end()); + bbf_node bbf(tmp_pq.end()[-1]); + tmp_pq.erase(tmp_pq.end() - 1); + + int i; + for (i = bbf.node; + i != -1 && nodes[i].dim >= 0; + i = bbf_branch(i, d, tmp_pq)); + + if (i != -1) { + + // add points in leaf/bin to ret_nn_pq + do { + bbf_new_nn(ret_nn_pq, k, d, nodes[i].value); + } while (-1 != (i = nodes[i].right)); + + --emax; + } + } + + tmp_pq.clear(); + return (int)ret_nn_pq.size(); + } + + //////////////////////////////////////////////////////////////////////////////////////// + // orthogonal range search +private: + void find_ortho_range(int i, scalar_type * bounds_min, + scalar_type * bounds_max, + std::vector < __valuetype > &inbounds) const { + if (i == -1) + return; + const node & n = nodes[i]; + if (n.dim >= 0) { // node + if (bounds_min[n.dim] <= n.boundary) + find_ortho_range(n.left, bounds_min, bounds_max, inbounds); + if (bounds_max[n.dim] > n.boundary) + find_ortho_range(n.right, bounds_min, bounds_max, inbounds); + } else { // leaf + do { + inbounds.push_back(nodes[i].value); + } while (-1 != (i = nodes[i].right)); + } + } +public: + // return all points that lie within the given bounds; inbounds is cleared + int find_ortho_range(scalar_type * bounds_min, + scalar_type * bounds_max, + std::vector < __valuetype > &inbounds) const { + inbounds.clear(); + find_ortho_range(root_node, bounds_min, bounds_max, inbounds); + return (int)inbounds.size(); + } +}; + +#endif // __cv_kdtree_h__ + +// Local Variables: +// mode:C++ +// End: diff --git a/opencv/imgproc/_list.h b/opencv/imgproc/_list.h new file mode 100644 index 0000000..b2b63e9 --- /dev/null +++ b/opencv/imgproc/_list.h @@ -0,0 +1,373 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef _CV_LIST_H_ +#define _CV_LIST_H_ + +#include +#include + +#define CV_FORCE_INLINE CV_INLINE + +#if !defined(_LIST_INLINE) +#define _LIST_INLINE CV_FORCE_INLINE +#endif /*_LIST_INLINE*/ + +#if defined DECLARE_LIST +#if defined _MSC_VER && _MSC_VER >= 1200 + #pragma warning("DECLARE_LIST macro is already defined!") +#endif +#endif /*DECLARE_LIST*/ + +static const long default_size = 10; +static const long default_inc_size = 10; + +struct _pos +{ + void* m_pos; +#ifdef _DEBUG + struct _list* m_list; +#endif /*_DEBUG*/ +}; +typedef struct _pos CVPOS; +struct _list +{ + void* m_buffer; + void* m_first_buffer; + long m_buf_size; /* The size of the buffer */ + long m_size; /* The number of elements */ + CVPOS m_head; + CVPOS m_tail; + CVPOS m_head_free; +}; + +typedef struct _list _CVLIST; + +#define DECLARE_LIST(type, prefix)\ + /* Basic element of a list*/\ + struct prefix##element_##type\ + {\ + struct prefix##element_##type* m_prev;\ + struct prefix##element_##type* m_next;\ + type m_data;\ + };\ + typedef struct prefix##element_##type ELEMENT_##type;\ + /* Initialization and destruction*/\ + _LIST_INLINE _CVLIST* prefix##create_list_##type(long);\ + _LIST_INLINE void prefix##destroy_list_##type(_CVLIST*);\ + /* Access functions*/\ + _LIST_INLINE CVPOS prefix##get_head_pos_##type(_CVLIST*);\ + _LIST_INLINE CVPOS prefix##get_tail_pos_##type(_CVLIST*);\ + _LIST_INLINE type* prefix##get_next_##type(CVPOS*);\ + _LIST_INLINE type* prefix##get_prev_##type(CVPOS*);\ + /* Modification functions*/\ + _LIST_INLINE void prefix##clear_list_##type(_CVLIST*);\ + _LIST_INLINE CVPOS prefix##add_head_##type(_CVLIST*, type*);\ + _LIST_INLINE CVPOS prefix##add_tail_##type(_CVLIST*, type*);\ + _LIST_INLINE void prefix##remove_head_##type(_CVLIST*);\ + _LIST_INLINE void prefix##remove_tail_##type(_CVLIST*);\ + _LIST_INLINE CVPOS prefix##insert_before_##type(_CVLIST*, CVPOS, type*);\ + _LIST_INLINE CVPOS prefix##insert_after_##type(_CVLIST*, CVPOS, type*);\ + _LIST_INLINE void prefix##remove_at_##type(_CVLIST*, CVPOS);\ + _LIST_INLINE void prefix##set_##type(CVPOS, type*);\ + _LIST_INLINE type* prefix##get_##type(CVPOS);\ + /* Statistics functions*/\ + _LIST_INLINE int prefix##get_count_##type(_CVLIST*); + +/* This macro finds a space for a new element and puts in into 'element' pointer */ +#define INSERT_NEW(element_type, l, element)\ + l->m_size++;\ + if(l->m_head_free.m_pos != NULL)\ + {\ + element = (element_type*)(l->m_head_free.m_pos);\ + if(element->m_next != NULL)\ + {\ + element->m_next->m_prev = NULL;\ + l->m_head_free.m_pos = element->m_next;\ + }\ + else\ + {\ + l->m_head_free.m_pos = NULL;\ + }\ + }\ + else\ + {\ + if(l->m_buf_size < l->m_size && l->m_head_free.m_pos == NULL)\ + {\ + *(void**)l->m_buffer = cvAlloc(l->m_buf_size*sizeof(element_type) + sizeof(void*));\ + l->m_buffer = *(void**)l->m_buffer;\ + *(void**)l->m_buffer = NULL;\ + element = (element_type*)((char*)l->m_buffer + sizeof(void*));\ + }\ + else\ + {\ + element = (element_type*)((char*)l->m_buffer + sizeof(void*)) + l->m_size - 1;\ + }\ + } + +/* This macro adds 'element' to the list of free elements*/ +#define INSERT_FREE(element_type, l, element)\ + if(l->m_head_free.m_pos != NULL)\ + {\ + ((element_type*)l->m_head_free.m_pos)->m_prev = element;\ + }\ + element->m_next = ((element_type*)l->m_head_free.m_pos);\ + l->m_head_free.m_pos = element; + + +/*#define GET_FIRST_FREE(l) ((ELEMENT_##type*)(l->m_head_free.m_pos))*/ + +#define IMPLEMENT_LIST(type, prefix)\ +_CVLIST* prefix##create_list_##type(long size)\ +{\ + _CVLIST* pl = (_CVLIST*)cvAlloc(sizeof(_CVLIST));\ + pl->m_buf_size = size > 0 ? size : default_size;\ + pl->m_first_buffer = cvAlloc(pl->m_buf_size*sizeof(ELEMENT_##type) + sizeof(void*));\ + pl->m_buffer = pl->m_first_buffer;\ + *(void**)pl->m_buffer = NULL;\ + pl->m_size = 0;\ + pl->m_head.m_pos = NULL;\ + pl->m_tail.m_pos = NULL;\ + pl->m_head_free.m_pos = NULL;\ + return pl;\ +}\ +void prefix##destroy_list_##type(_CVLIST* l)\ +{\ + void* cur = l->m_first_buffer;\ + void* next;\ + while(cur)\ + {\ + next = *(void**)cur;\ + cvFree(&cur);\ + cur = next;\ + }\ + cvFree(&l);\ +}\ +CVPOS prefix##get_head_pos_##type(_CVLIST* l)\ +{\ + return l->m_head;\ +}\ +CVPOS prefix##get_tail_pos_##type(_CVLIST* l)\ +{\ + return l->m_tail;\ +}\ +type* prefix##get_next_##type(CVPOS* pos)\ +{\ + if(pos->m_pos)\ + {\ + ELEMENT_##type* element = (ELEMENT_##type*)(pos->m_pos);\ + pos->m_pos = element->m_next;\ + return &element->m_data;\ + }\ + else\ + {\ + return NULL;\ + }\ +}\ +type* prefix##get_prev_##type(CVPOS* pos)\ +{\ + if(pos->m_pos)\ + {\ + ELEMENT_##type* element = (ELEMENT_##type*)(pos->m_pos);\ + pos->m_pos = element->m_prev;\ + return &element->m_data;\ + }\ + else\ + {\ + return NULL;\ + }\ +}\ +int prefix##is_pos_##type(CVPOS pos)\ +{\ + return !!pos.m_pos;\ +}\ +void prefix##clear_list_##type(_CVLIST* l)\ +{\ + l->m_head.m_pos = NULL;\ + l->m_tail.m_pos = NULL;\ + l->m_size = 0;\ + l->m_head_free.m_pos = NULL;\ +}\ +CVPOS prefix##add_head_##type(_CVLIST* l, type* data)\ +{\ + ELEMENT_##type* element;\ + INSERT_NEW(ELEMENT_##type, l, element);\ + element->m_prev = NULL;\ + element->m_next = (ELEMENT_##type*)(l->m_head.m_pos);\ + memcpy(&(element->m_data), data, sizeof(*data));\ + if(element->m_next)\ + {\ + element->m_next->m_prev = element;\ + }\ + else\ + {\ + l->m_tail.m_pos = element;\ + }\ + l->m_head.m_pos = element;\ + return l->m_head;\ +}\ +CVPOS prefix##add_tail_##type(_CVLIST* l, type* data)\ +{\ + ELEMENT_##type* element;\ + INSERT_NEW(ELEMENT_##type, l, element);\ + element->m_next = NULL;\ + element->m_prev = (ELEMENT_##type*)(l->m_tail.m_pos);\ + memcpy(&(element->m_data), data, sizeof(*data));\ + if(element->m_prev)\ + {\ + element->m_prev->m_next = element;\ + }\ + else\ + {\ + l->m_head.m_pos = element;\ + }\ + l->m_tail.m_pos = element;\ + return l->m_tail;\ +}\ +void prefix##remove_head_##type(_CVLIST* l)\ +{\ + ELEMENT_##type* element = ((ELEMENT_##type*)(l->m_head.m_pos));\ + if(element->m_next != NULL)\ + {\ + element->m_next->m_prev = NULL;\ + }\ + l->m_head.m_pos = element->m_next;\ + INSERT_FREE(ELEMENT_##type, l, element);\ + l->m_size--;\ +}\ +void prefix##remove_tail_##type(_CVLIST* l)\ +{\ + ELEMENT_##type* element = ((ELEMENT_##type*)(l->m_tail.m_pos));\ + if(element->m_prev != NULL)\ + {\ + element->m_prev->m_next = NULL;\ + }\ + l->m_tail.m_pos = element->m_prev;\ + INSERT_FREE(ELEMENT_##type, l, element);\ + l->m_size--;\ +}\ +CVPOS prefix##insert_after_##type(_CVLIST* l, CVPOS pos, type* data)\ +{\ + ELEMENT_##type* element;\ + ELEMENT_##type* before;\ + CVPOS newpos;\ + INSERT_NEW(ELEMENT_##type, l, element);\ + memcpy(&(element->m_data), data, sizeof(*data));\ + before = (ELEMENT_##type*)pos.m_pos;\ + element->m_prev = before;\ + element->m_next = before->m_next;\ + before->m_next = element;\ + if(element->m_next != NULL)\ + element->m_next->m_prev = element;\ + else\ + l->m_tail.m_pos = element;\ + newpos.m_pos = element;\ + return newpos;\ +}\ +CVPOS prefix##insert_before_##type(_CVLIST* l, CVPOS pos, type* data)\ +{\ + ELEMENT_##type* element;\ + ELEMENT_##type* after;\ + CVPOS newpos;\ + INSERT_NEW(ELEMENT_##type, l, element);\ + memcpy(&(element->m_data), data, sizeof(*data));\ + after = (ELEMENT_##type*)pos.m_pos;\ + element->m_prev = after->m_prev;\ + element->m_next = after;\ + after->m_prev = element;\ + if(element->m_prev != NULL)\ + element->m_prev->m_next = element;\ + else\ + l->m_head.m_pos = element;\ + newpos.m_pos = element;\ + return newpos;\ +}\ +void prefix##remove_at_##type(_CVLIST* l, CVPOS pos)\ +{\ + ELEMENT_##type* element = ((ELEMENT_##type*)pos.m_pos);\ + if(element->m_prev != NULL)\ + {\ + element->m_prev->m_next = element->m_next;\ + }\ + else\ + {\ + l->m_head.m_pos = element->m_next;\ + }\ + if(element->m_next != NULL)\ + {\ + element->m_next->m_prev = element->m_prev;\ + }\ + else\ + {\ + l->m_tail.m_pos = element->m_prev;\ + }\ + INSERT_FREE(ELEMENT_##type, l, element);\ + l->m_size--;\ +}\ +void prefix##set_##type(CVPOS pos, type* data)\ +{\ + ELEMENT_##type* element = ((ELEMENT_##type*)(pos.m_pos));\ + memcpy(&(element->m_data), data, sizeof(data));\ +}\ +type* prefix##get_##type(CVPOS pos)\ +{\ + ELEMENT_##type* element = ((ELEMENT_##type*)(pos.m_pos));\ + return &(element->m_data);\ +}\ +int prefix##get_count_##type(_CVLIST* list)\ +{\ + return list->m_size;\ +} + +#define DECLARE_AND_IMPLEMENT_LIST(type, prefix)\ + DECLARE_LIST(type, prefix)\ + IMPLEMENT_LIST(type, prefix) + +typedef struct __index +{ + int value; + float rho, theta; +} +_index; + +DECLARE_LIST( _index, h_ ) + +#endif/*_CV_LIST_H_*/ diff --git a/opencv/imgproc/accum.cpp b/opencv/imgproc/accum.cpp new file mode 100644 index 0000000..4a588f8 --- /dev/null +++ b/opencv/imgproc/accum.cpp @@ -0,0 +1,481 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +/ +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +namespace cv +{ + +template void +acc_( const T* src, AT* dst, const uchar* mask, int len, int cn ) +{ + int i = 0; + + if( !mask ) + { + len *= cn; + for( ; i <= len - 4; i += 4 ) + { + AT t0, t1; + t0 = src[i] + dst[i]; + t1 = src[i+1] + dst[i+1]; + dst[i] = t0; dst[i+1] = t1; + + t0 = src[i+2] + dst[i+2]; + t1 = src[i+3] + dst[i+3]; + dst[i+2] = t0; dst[i+3] = t1; + } + + for( ; i < len; i++ ) + dst[i] += src[i]; + } + else if( cn == 1 ) + { + for( ; i < len; i++ ) + { + if( mask[i] ) + dst[i] += src[i]; + } + } + else if( cn == 3 ) + { + for( ; i < len; i++, src += 3, dst += 3 ) + { + if( mask[i] ) + { + AT t0 = src[0] + dst[0]; + AT t1 = src[1] + dst[1]; + AT t2 = src[2] + dst[2]; + + dst[0] = t0; dst[1] = t1; dst[2] = t2; + } + } + } + else + { + for( ; i < len; i++, src += cn, dst += cn ) + if( mask[i] ) + { + for( int k = 0; k < cn; k++ ) + dst[k] += src[k]; + } + } +} + + +template void +accSqr_( const T* src, AT* dst, const uchar* mask, int len, int cn ) +{ + int i = 0; + + if( !mask ) + { + len *= cn; + for( ; i <= len - 4; i += 4 ) + { + AT t0, t1; + t0 = (AT)src[i]*src[i] + dst[i]; + t1 = (AT)src[i+1]*src[i+1] + dst[i+1]; + dst[i] = t0; dst[i+1] = t1; + + t0 = (AT)src[i+2]*src[i+2] + dst[i+2]; + t1 = (AT)src[i+3]*src[i+3] + dst[i+3]; + dst[i+2] = t0; dst[i+3] = t1; + } + + for( ; i < len; i++ ) + dst[i] += (AT)src[i]*src[i]; + } + else if( cn == 1 ) + { + for( ; i < len; i++ ) + { + if( mask[i] ) + dst[i] += (AT)src[i]*src[i]; + } + } + else if( cn == 3 ) + { + for( ; i < len; i++, src += 3, dst += 3 ) + { + if( mask[i] ) + { + AT t0 = (AT)src[0]*src[0] + dst[0]; + AT t1 = (AT)src[1]*src[1] + dst[1]; + AT t2 = (AT)src[2]*src[2] + dst[2]; + + dst[0] = t0; dst[1] = t1; dst[2] = t2; + } + } + } + else + { + for( ; i < len; i++, src += cn, dst += cn ) + if( mask[i] ) + { + for( int k = 0; k < cn; k++ ) + dst[k] += (AT)src[k]*src[k]; + } + } +} + + +template void +accProd_( const T* src1, const T* src2, AT* dst, const uchar* mask, int len, int cn ) +{ + int i = 0; + + if( !mask ) + { + len *= cn; + for( ; i <= len - 4; i += 4 ) + { + AT t0, t1; + t0 = (AT)src1[i]*src2[i] + dst[i]; + t1 = (AT)src1[i+1]*src2[i+1] + dst[i+1]; + dst[i] = t0; dst[i+1] = t1; + + t0 = (AT)src1[i+2]*src2[i+2] + dst[i+2]; + t1 = (AT)src1[i+3]*src2[i+3] + dst[i+3]; + dst[i+2] = t0; dst[i+3] = t1; + } + + for( ; i < len; i++ ) + dst[i] += (AT)src1[i]*src2[i]; + } + else if( cn == 1 ) + { + for( ; i < len; i++ ) + { + if( mask[i] ) + dst[i] += (AT)src1[i]*src2[i]; + } + } + else if( cn == 3 ) + { + for( ; i < len; i++, src1 += 3, src2 += 3, dst += 3 ) + { + if( mask[i] ) + { + AT t0 = (AT)src1[0]*src2[0] + dst[0]; + AT t1 = (AT)src1[1]*src2[1] + dst[1]; + AT t2 = (AT)src1[2]*src2[2] + dst[2]; + + dst[0] = t0; dst[1] = t1; dst[2] = t2; + } + } + } + else + { + for( ; i < len; i++, src1 += cn, src2 += cn, dst += cn ) + if( mask[i] ) + { + for( int k = 0; k < cn; k++ ) + dst[k] += (AT)src1[k]*src2[k]; + } + } +} + + +template void +accW_( const T* src, AT* dst, const uchar* mask, int len, int cn, double alpha ) +{ + AT a = (AT)alpha, b = 1 - a; + int i = 0; + + if( !mask ) + { + len *= cn; + for( ; i <= len - 4; i += 4 ) + { + AT t0, t1; + t0 = src[i]*a + dst[i]*b; + t1 = src[i+1]*a + dst[i+1]*b; + dst[i] = t0; dst[i+1] = t1; + + t0 = src[i+2]*a + dst[i+2]*b; + t1 = src[i+3]*a + dst[i+3]*b; + dst[i+2] = t0; dst[i+3] = t1; + } + + for( ; i < len; i++ ) + dst[i] = src[i]*a + dst[i]*b; + } + else if( cn == 1 ) + { + for( ; i < len; i++ ) + { + if( mask[i] ) + dst[i] = src[i]*a + dst[i]*b; + } + } + else if( cn == 3 ) + { + for( ; i < len; i++, src += 3, dst += 3 ) + { + if( mask[i] ) + { + AT t0 = src[0]*a + dst[0]*b; + AT t1 = src[1]*a + dst[1]*b; + AT t2 = src[2]*a + dst[2]*b; + + dst[0] = t0; dst[1] = t1; dst[2] = t2; + } + } + } + else + { + for( ; i < len; i++, src += cn, dst += cn ) + if( mask[i] ) + { + for( int k = 0; k < cn; k++ ) + dst[k] += src[k]*a + dst[k]*b; + } + } +} + + +#define DEF_ACC_FUNCS(suffix, type, acctype) \ +static void acc_##suffix(const type* src, acctype* dst, \ + const uchar* mask, int len, int cn) \ +{ acc_(src, dst, mask, len, cn); } \ +\ +static void accSqr_##suffix(const type* src, acctype* dst, \ + const uchar* mask, int len, int cn) \ +{ accSqr_(src, dst, mask, len, cn); } \ +\ +static void accProd_##suffix(const type* src1, const type* src2, \ + acctype* dst, const uchar* mask, int len, int cn) \ +{ accProd_(src1, src2, dst, mask, len, cn); } \ +\ +static void accW_##suffix(const type* src, acctype* dst, \ + const uchar* mask, int len, int cn, double alpha) \ +{ accW_(src, dst, mask, len, cn, alpha); } + + +DEF_ACC_FUNCS(8u32f, uchar, float) +DEF_ACC_FUNCS(8u64f, uchar, double) +DEF_ACC_FUNCS(16u32f, ushort, float) +DEF_ACC_FUNCS(16u64f, ushort, double) +DEF_ACC_FUNCS(32f, float, float) +DEF_ACC_FUNCS(32f64f, float, double) +DEF_ACC_FUNCS(64f, double, double) + + +typedef void (*AccFunc)(const uchar*, uchar*, const uchar*, int, int); +typedef void (*AccProdFunc)(const uchar*, const uchar*, uchar*, const uchar*, int, int); +typedef void (*AccWFunc)(const uchar*, uchar*, const uchar*, int, int, double); + +static AccFunc accTab[] = +{ + (AccFunc)acc_8u32f, (AccFunc)acc_8u64f, + (AccFunc)acc_16u32f, (AccFunc)acc_16u64f, + (AccFunc)acc_32f, (AccFunc)acc_32f64f, + (AccFunc)acc_64f +}; + +static AccFunc accSqrTab[] = +{ + (AccFunc)accSqr_8u32f, (AccFunc)accSqr_8u64f, + (AccFunc)accSqr_16u32f, (AccFunc)accSqr_16u64f, + (AccFunc)accSqr_32f, (AccFunc)accSqr_32f64f, + (AccFunc)accSqr_64f +}; + +static AccProdFunc accProdTab[] = +{ + (AccProdFunc)accProd_8u32f, (AccProdFunc)accProd_8u64f, + (AccProdFunc)accProd_16u32f, (AccProdFunc)accProd_16u64f, + (AccProdFunc)accProd_32f, (AccProdFunc)accProd_32f64f, + (AccProdFunc)accProd_64f +}; + +static AccWFunc accWTab[] = +{ + (AccWFunc)accW_8u32f, (AccWFunc)accW_8u64f, + (AccWFunc)accW_16u32f, (AccWFunc)accW_16u64f, + (AccWFunc)accW_32f, (AccWFunc)accW_32f64f, + (AccWFunc)accW_64f +}; + +inline int getAccTabIdx(int sdepth, int ddepth) +{ + return sdepth == CV_8U && ddepth == CV_32F ? 0 : + sdepth == CV_8U && ddepth == CV_64F ? 1 : + sdepth == CV_16U && ddepth == CV_32F ? 2 : + sdepth == CV_16U && ddepth == CV_64F ? 3 : + sdepth == CV_32F && ddepth == CV_32F ? 4 : + sdepth == CV_32F && ddepth == CV_64F ? 5 : + sdepth == CV_64F && ddepth == CV_64F ? 6 : -1; +} + +} + +void cv::accumulate( InputArray _src, InputOutputArray _dst, InputArray _mask ) +{ + Mat src = _src.getMat(), dst = _dst.getMat(), mask = _mask.getMat(); + int sdepth = src.depth(), ddepth = dst.depth(), cn = src.channels(); + + CV_Assert( dst.size == src.size && dst.channels() == cn ); + CV_Assert( mask.empty() || (mask.size == src.size && mask.type() == CV_8U) ); + + int fidx = getAccTabIdx(sdepth, ddepth); + AccFunc func = fidx >= 0 ? accTab[fidx] : 0; + CV_Assert( func != 0 ); + + const Mat* arrays[] = {&src, &dst, &mask, 0}; + uchar* ptrs[3]; + NAryMatIterator it(arrays, ptrs); + int len = (int)it.size; + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + func(ptrs[0], ptrs[1], ptrs[2], len, cn); +} + + +void cv::accumulateSquare( InputArray _src, InputOutputArray _dst, InputArray _mask ) +{ + Mat src = _src.getMat(), dst = _dst.getMat(), mask = _mask.getMat(); + int sdepth = src.depth(), ddepth = dst.depth(), cn = src.channels(); + + CV_Assert( dst.size == src.size && dst.channels() == cn ); + CV_Assert( mask.empty() || (mask.size == src.size && mask.type() == CV_8U) ); + + int fidx = getAccTabIdx(sdepth, ddepth); + AccFunc func = fidx >= 0 ? accSqrTab[fidx] : 0; + CV_Assert( func != 0 ); + + const Mat* arrays[] = {&src, &dst, &mask, 0}; + uchar* ptrs[3]; + NAryMatIterator it(arrays, ptrs); + int len = (int)it.size; + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + func(ptrs[0], ptrs[1], ptrs[2], len, cn); +} + +void cv::accumulateProduct( InputArray _src1, InputArray _src2, + InputOutputArray _dst, InputArray _mask ) +{ + Mat src1 = _src1.getMat(), src2 = _src2.getMat(), dst = _dst.getMat(), mask = _mask.getMat(); + int sdepth = src1.depth(), ddepth = dst.depth(), cn = src1.channels(); + + CV_Assert( src2.size && src1.size && src2.type() == src1.type() ); + CV_Assert( dst.size == src1.size && dst.channels() == cn ); + CV_Assert( mask.empty() || (mask.size == src1.size && mask.type() == CV_8U) ); + + int fidx = getAccTabIdx(sdepth, ddepth); + AccProdFunc func = fidx >= 0 ? accProdTab[fidx] : 0; + CV_Assert( func != 0 ); + + const Mat* arrays[] = {&src1, &src2, &dst, &mask, 0}; + uchar* ptrs[4]; + NAryMatIterator it(arrays, ptrs); + int len = (int)it.size; + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + func(ptrs[0], ptrs[1], ptrs[2], ptrs[3], len, cn); +} + + +void cv::accumulateWeighted( InputArray _src, InputOutputArray _dst, + double alpha, InputArray _mask ) +{ + Mat src = _src.getMat(), dst = _dst.getMat(), mask = _mask.getMat(); + int sdepth = src.depth(), ddepth = dst.depth(), cn = src.channels(); + + CV_Assert( dst.size == src.size && dst.channels() == cn ); + CV_Assert( mask.empty() || (mask.size == src.size && mask.type() == CV_8U) ); + + int fidx = getAccTabIdx(sdepth, ddepth); + AccWFunc func = fidx >= 0 ? accWTab[fidx] : 0; + CV_Assert( func != 0 ); + + const Mat* arrays[] = {&src, &dst, &mask, 0}; + uchar* ptrs[3]; + NAryMatIterator it(arrays, ptrs); + int len = (int)it.size; + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + func(ptrs[0], ptrs[1], ptrs[2], len, cn, alpha); +} + + +CV_IMPL void +cvAcc( const void* arr, void* sumarr, const void* maskarr ) +{ + cv::Mat src = cv::cvarrToMat(arr), dst = cv::cvarrToMat(sumarr), mask; + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + cv::accumulate( src, dst, mask ); +} + +CV_IMPL void +cvSquareAcc( const void* arr, void* sumarr, const void* maskarr ) +{ + cv::Mat src = cv::cvarrToMat(arr), dst = cv::cvarrToMat(sumarr), mask; + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + cv::accumulateSquare( src, dst, mask ); +} + +CV_IMPL void +cvMultiplyAcc( const void* arr1, const void* arr2, + void* sumarr, const void* maskarr ) +{ + cv::Mat src1 = cv::cvarrToMat(arr1), src2 = cv::cvarrToMat(arr2); + cv::Mat dst = cv::cvarrToMat(sumarr), mask; + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + cv::accumulateProduct( src1, src2, dst, mask ); +} + +CV_IMPL void +cvRunningAvg( const void* arr, void* sumarr, double alpha, const void* maskarr ) +{ + cv::Mat src = cv::cvarrToMat(arr), dst = cv::cvarrToMat(sumarr), mask; + if( maskarr ) + mask = cv::cvarrToMat(maskarr); + cv::accumulateWeighted( src, dst, alpha, mask ); +} + +/* End of file. */ diff --git a/opencv/imgproc/approx.cpp b/opencv/imgproc/approx.cpp new file mode 100644 index 0000000..fe86df3 --- /dev/null +++ b/opencv/imgproc/approx.cpp @@ -0,0 +1,803 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" + +/****************************************************************************************\ +* Chain Approximation * +\****************************************************************************************/ + +typedef struct _CvPtInfo +{ + CvPoint pt; + int k; /* support region */ + int s; /* curvature value */ + struct _CvPtInfo *next; +} +_CvPtInfo; + + +/* curvature: 0 - 1-curvature, 1 - k-cosine curvature. */ +CvSeq* icvApproximateChainTC89( CvChain* chain, int header_size, + CvMemStorage* storage, int method ) +{ + static const int abs_diff[] = { 1, 2, 3, 4, 3, 2, 1, 0, 1, 2, 3, 4, 3, 2, 1 }; + + cv::AutoBuffer<_CvPtInfo> buf(chain->total + 8); + + _CvPtInfo temp; + _CvPtInfo *array = buf, *first = 0, *current = 0, *prev_current = 0; + int i, j, i1, i2, s, len; + int count = chain->total; + + CvChainPtReader reader; + CvSeqWriter writer; + CvPoint pt = chain->origin; + + CV_Assert( CV_IS_SEQ_CHAIN_CONTOUR( chain )); + CV_Assert( header_size >= (int)sizeof(CvContour) ); + + cvStartWriteSeq( (chain->flags & ~CV_SEQ_ELTYPE_MASK) | CV_SEQ_ELTYPE_POINT, + header_size, sizeof( CvPoint ), storage, &writer ); + + if( chain->total == 0 ) + { + CV_WRITE_SEQ_ELEM( pt, writer ); + return cvEndWriteSeq( &writer ); + } + + cvStartReadChainPoints( chain, &reader ); + + temp.next = 0; + current = &temp; + + /* Pass 0. + Restores all the digital curve points from the chain code. + Removes the points (from the resultant polygon) + that have zero 1-curvature */ + for( i = 0; i < count; i++ ) + { + int prev_code = *reader.prev_elem; + + reader.prev_elem = reader.ptr; + CV_READ_CHAIN_POINT( pt, reader ); + + /* calc 1-curvature */ + s = abs_diff[reader.code - prev_code + 7]; + + if( method <= CV_CHAIN_APPROX_SIMPLE ) + { + if( method == CV_CHAIN_APPROX_NONE || s != 0 ) + { + CV_WRITE_SEQ_ELEM( pt, writer ); + } + } + else + { + if( s != 0 ) + current = current->next = array + i; + array[i].s = s; + array[i].pt = pt; + } + } + + //assert( pt.x == chain->origin.x && pt.y == chain->origin.y ); + + if( method <= CV_CHAIN_APPROX_SIMPLE ) + return cvEndWriteSeq( &writer ); + + current->next = 0; + + len = i; + current = temp.next; + + assert( current ); + + /* Pass 1. + Determines support region for all the remained points */ + do + { + CvPoint pt0; + int k, l = 0, d_num = 0; + + i = (int)(current - array); + pt0 = array[i].pt; + + /* determine support region */ + for( k = 1;; k++ ) + { + int lk, dk_num; + int dx, dy; + Cv32suf d; + + assert( k <= len ); + + /* calc indices */ + i1 = i - k; + i1 += i1 < 0 ? len : 0; + i2 = i + k; + i2 -= i2 >= len ? len : 0; + + dx = array[i2].pt.x - array[i1].pt.x; + dy = array[i2].pt.y - array[i1].pt.y; + + /* distance between p_(i - k) and p_(i + k) */ + lk = dx * dx + dy * dy; + + /* distance between p_i and the line (p_(i-k), p_(i+k)) */ + dk_num = (pt0.x - array[i1].pt.x) * dy - (pt0.y - array[i1].pt.y) * dx; + d.f = (float) (((double) d_num) * lk - ((double) dk_num) * l); + + if( k > 1 && (l >= lk || ((d_num > 0 && d.i <= 0) || (d_num < 0 && d.i >= 0)))) + break; + + d_num = dk_num; + l = lk; + } + + current->k = --k; + + /* determine cosine curvature if it should be used */ + if( method == CV_CHAIN_APPROX_TC89_KCOS ) + { + /* calc k-cosine curvature */ + for( j = k, s = 0; j > 0; j-- ) + { + double temp_num; + int dx1, dy1, dx2, dy2; + Cv32suf sk; + + i1 = i - j; + i1 += i1 < 0 ? len : 0; + i2 = i + j; + i2 -= i2 >= len ? len : 0; + + dx1 = array[i1].pt.x - pt0.x; + dy1 = array[i1].pt.y - pt0.y; + dx2 = array[i2].pt.x - pt0.x; + dy2 = array[i2].pt.y - pt0.y; + + if( (dx1 | dy1) == 0 || (dx2 | dy2) == 0 ) + break; + + temp_num = dx1 * dx2 + dy1 * dy2; + temp_num = + (float) (temp_num / + sqrt( ((double)dx1 * dx1 + (double)dy1 * dy1) * + ((double)dx2 * dx2 + (double)dy2 * dy2) )); + sk.f = (float) (temp_num + 1.1); + + assert( 0 <= sk.f && sk.f <= 2.2 ); + if( j < k && sk.i <= s ) + break; + + s = sk.i; + } + current->s = s; + } + current = current->next; + } + while( current != 0 ); + + prev_current = &temp; + current = temp.next; + + /* Pass 2. + Performs non-maxima supression */ + do + { + int k2 = current->k >> 1; + + s = current->s; + i = (int)(current - array); + + for( j = 1; j <= k2; j++ ) + { + i2 = i - j; + i2 += i2 < 0 ? len : 0; + + if( array[i2].s > s ) + break; + + i2 = i + j; + i2 -= i2 >= len ? len : 0; + + if( array[i2].s > s ) + break; + } + + if( j <= k2 ) /* exclude point */ + { + prev_current->next = current->next; + current->s = 0; /* "clear" point */ + } + else + prev_current = current; + current = current->next; + } + while( current != 0 ); + + /* Pass 3. + Removes non-dominant points with 1-length support region */ + current = temp.next; + assert( current ); + prev_current = &temp; + + do + { + if( current->k == 1 ) + { + s = current->s; + i = (int)(current - array); + + i1 = i - 1; + i1 += i1 < 0 ? len : 0; + + i2 = i + 1; + i2 -= i2 >= len ? len : 0; + + if( s <= array[i1].s || s <= array[i2].s ) + { + prev_current->next = current->next; + current->s = 0; + } + else + prev_current = current; + } + else + prev_current = current; + current = current->next; + } + while( current != 0 ); + + if( method == CV_CHAIN_APPROX_TC89_KCOS ) + goto copy_vect; + + /* Pass 4. + Cleans remained couples of points */ + assert( temp.next ); + + if( array[0].s != 0 && array[len - 1].s != 0 ) /* specific case */ + { + for( i1 = 1; i1 < len && array[i1].s != 0; i1++ ) + { + array[i1 - 1].s = 0; + } + if( i1 == len ) + goto copy_vect; /* all points survived */ + i1--; + + for( i2 = len - 2; i2 > 0 && array[i2].s != 0; i2-- ) + { + array[i2].next = 0; + array[i2 + 1].s = 0; + } + i2++; + + if( i1 == 0 && i2 == len - 1 ) /* only two points */ + { + i1 = (int)(array[0].next - array); + array[len] = array[0]; /* move to the end */ + array[len].next = 0; + array[len - 1].next = array + len; + } + temp.next = array + i1; + } + + current = temp.next; + first = prev_current = &temp; + count = 1; + + /* do last pass */ + do + { + if( current->next == 0 || current->next - current != 1 ) + { + if( count >= 2 ) + { + if( count == 2 ) + { + int s1 = prev_current->s; + int s2 = current->s; + + if( s1 > s2 || (s1 == s2 && prev_current->k <= current->k) ) + /* remove second */ + prev_current->next = current->next; + else + /* remove first */ + first->next = current; + } + else + first->next->next = current; + } + first = current; + count = 1; + } + else + count++; + prev_current = current; + current = current->next; + } + while( current != 0 ); + +copy_vect: + + // gather points + current = temp.next; + assert( current ); + + do + { + CV_WRITE_SEQ_ELEM( current->pt, writer ); + current = current->next; + } + while( current != 0 ); + + return cvEndWriteSeq( &writer ); +} + + +/*Applies some approximation algorithm to chain-coded contour(s) and + converts it/them to polygonal representation */ +CV_IMPL CvSeq* +cvApproxChains( CvSeq* src_seq, + CvMemStorage* storage, + int method, + double /*parameter*/, + int minimal_perimeter, + int recursive ) +{ + CvSeq *prev_contour = 0, *parent = 0; + CvSeq *dst_seq = 0; + + if( !src_seq || !storage ) + CV_Error( CV_StsNullPtr, "" ); + if( method > CV_CHAIN_APPROX_TC89_KCOS || method <= 0 || minimal_perimeter < 0 ) + CV_Error( CV_StsOutOfRange, "" ); + + while( src_seq != 0 ) + { + int len = src_seq->total; + + if( len >= minimal_perimeter ) + { + CvSeq *contour = 0; + + switch( method ) + { + case CV_CHAIN_APPROX_NONE: + case CV_CHAIN_APPROX_SIMPLE: + case CV_CHAIN_APPROX_TC89_L1: + case CV_CHAIN_APPROX_TC89_KCOS: + contour = icvApproximateChainTC89( (CvChain *) src_seq, sizeof( CvContour ), storage, method ); + break; + default: + CV_Error( CV_StsOutOfRange, "" ); + } + + if( contour->total > 0 ) + { + cvBoundingRect( contour, 1 ); + + contour->v_prev = parent; + contour->h_prev = prev_contour; + + if( prev_contour ) + prev_contour->h_next = contour; + else if( parent ) + parent->v_next = contour; + prev_contour = contour; + if( !dst_seq ) + dst_seq = prev_contour; + } + else /* if resultant contour has zero length, skip it */ + { + len = -1; + } + } + + if( !recursive ) + break; + + if( src_seq->v_next && len >= minimal_perimeter ) + { + assert( prev_contour != 0 ); + parent = prev_contour; + prev_contour = 0; + src_seq = src_seq->v_next; + } + else + { + while( src_seq->h_next == 0 ) + { + src_seq = src_seq->v_prev; + if( src_seq == 0 ) + break; + prev_contour = parent; + if( parent ) + parent = parent->v_prev; + } + if( src_seq ) + src_seq = src_seq->h_next; + } + } + + return dst_seq; +} + + +/****************************************************************************************\ +* Polygonal Approximation * +\****************************************************************************************/ + +/* Ramer-Douglas-Peucker algorithm for polygon simplification */ + +/* the version for integer point coordinates */ +template static CvSeq* +icvApproxPolyDP( CvSeq* src_contour, int header_size, + CvMemStorage* storage, double eps ) +{ + typedef cv::Point_ PT; + int init_iters = 3; + CvSlice slice = {0, 0}, right_slice = {0, 0}; + CvSeqReader reader, reader2; + CvSeqWriter writer; + PT start_pt(-1000000, -1000000), end_pt(0, 0), pt(0,0); + int i = 0, j, count = src_contour->total, new_count; + int is_closed = CV_IS_SEQ_CLOSED( src_contour ); + bool le_eps = false; + CvMemStorage* temp_storage = 0; + CvSeq* stack = 0; + CvSeq* dst_contour; + + assert( CV_SEQ_ELTYPE(src_contour) == cv::DataType::type ); + cvStartWriteSeq( src_contour->flags, header_size, sizeof(pt), storage, &writer ); + + if( src_contour->total == 0 ) + return cvEndWriteSeq( &writer ); + + temp_storage = cvCreateChildMemStorage( storage ); + + assert( src_contour->first != 0 ); + stack = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvSlice), temp_storage ); + eps *= eps; + cvStartReadSeq( src_contour, &reader, 0 ); + + if( !is_closed ) + { + right_slice.start_index = count; + end_pt = *(PT*)(reader.ptr); + start_pt = *(PT*)cvGetSeqElem( src_contour, -1 ); + + if( start_pt.x != end_pt.x || start_pt.y != end_pt.y ) + { + slice.start_index = 0; + slice.end_index = count - 1; + cvSeqPush( stack, &slice ); + } + else + { + is_closed = 1; + init_iters = 1; + } + } + + if( is_closed ) + { + /* 1. Find approximately two farthest points of the contour */ + right_slice.start_index = 0; + + for( i = 0; i < init_iters; i++ ) + { + double dist, max_dist = 0; + cvSetSeqReaderPos( &reader, right_slice.start_index, 1 ); + CV_READ_SEQ_ELEM( start_pt, reader ); /* read the first point */ + + for( j = 1; j < count; j++ ) + { + double dx, dy; + + CV_READ_SEQ_ELEM( pt, reader ); + dx = pt.x - start_pt.x; + dy = pt.y - start_pt.y; + + dist = dx * dx + dy * dy; + + if( dist > max_dist ) + { + max_dist = dist; + right_slice.start_index = j; + } + } + + le_eps = max_dist <= eps; + } + + /* 2. initialize the stack */ + if( !le_eps ) + { + slice.start_index = cvGetSeqReaderPos( &reader ); + slice.end_index = right_slice.start_index += slice.start_index; + + right_slice.start_index -= right_slice.start_index >= count ? count : 0; + right_slice.end_index = slice.start_index; + if( right_slice.end_index < right_slice.start_index ) + right_slice.end_index += count; + + cvSeqPush( stack, &right_slice ); + cvSeqPush( stack, &slice ); + } + else + CV_WRITE_SEQ_ELEM( start_pt, writer ); + } + + /* 3. run recursive process */ + while( stack->total != 0 ) + { + cvSeqPop( stack, &slice ); + + cvSetSeqReaderPos( &reader, slice.end_index ); + CV_READ_SEQ_ELEM( end_pt, reader ); + + cvSetSeqReaderPos( &reader, slice.start_index ); + CV_READ_SEQ_ELEM( start_pt, reader ); + + if( slice.end_index > slice.start_index + 1 ) + { + double dx, dy, dist, max_dist = 0; + + dx = end_pt.x - start_pt.x; + dy = end_pt.y - start_pt.y; + + assert( dx != 0 || dy != 0 ); + + for( i = slice.start_index + 1; i < slice.end_index; i++ ) + { + CV_READ_SEQ_ELEM( pt, reader ); + dist = fabs((pt.y - start_pt.y) * dx - (pt.x - start_pt.x) * dy); + + if( dist > max_dist ) + { + max_dist = dist; + right_slice.start_index = i; + } + } + + le_eps = max_dist * max_dist <= eps * (dx * dx + dy * dy); + } + else + { + assert( slice.end_index > slice.start_index ); + le_eps = true; + /* read starting point */ + cvSetSeqReaderPos( &reader, slice.start_index ); + CV_READ_SEQ_ELEM( start_pt, reader ); + } + + if( le_eps ) + { + CV_WRITE_SEQ_ELEM( start_pt, writer ); + } + else + { + right_slice.end_index = slice.end_index; + slice.end_index = right_slice.start_index; + cvSeqPush( stack, &right_slice ); + cvSeqPush( stack, &slice ); + } + } + + is_closed = CV_IS_SEQ_CLOSED( src_contour ); + if( !is_closed ) + CV_WRITE_SEQ_ELEM( end_pt, writer ); + + dst_contour = cvEndWriteSeq( &writer ); + + // last stage: do final clean-up of the approximated contour - + // remove extra points on the [almost] stright lines. + + cvStartReadSeq( dst_contour, &reader, is_closed ); + CV_READ_SEQ_ELEM( start_pt, reader ); + + reader2 = reader; + CV_READ_SEQ_ELEM( pt, reader ); + + new_count = count = dst_contour->total; + for( i = !is_closed; i < count - !is_closed && new_count > 2; i++ ) + { + double dx, dy, dist; + CV_READ_SEQ_ELEM( end_pt, reader ); + + dx = end_pt.x - start_pt.x; + dy = end_pt.y - start_pt.y; + dist = fabs((pt.x - start_pt.x)*dy - (pt.y - start_pt.y)*dx); + if( dist * dist <= 0.5*eps*(dx*dx + dy*dy) && dx != 0 && dy != 0 ) + { + new_count--; + *((PT*)reader2.ptr) = start_pt = end_pt; + CV_NEXT_SEQ_ELEM( sizeof(pt), reader2 ); + CV_READ_SEQ_ELEM( pt, reader ); + i++; + continue; + } + *((PT*)reader2.ptr) = start_pt = pt; + CV_NEXT_SEQ_ELEM( sizeof(pt), reader2 ); + pt = end_pt; + } + + if( !is_closed ) + *((PT*)reader2.ptr) = pt; + + if( new_count < count ) + cvSeqPopMulti( dst_contour, 0, count - new_count ); + + cvReleaseMemStorage( &temp_storage ); + return dst_contour; +} + + +CV_IMPL CvSeq* +cvApproxPoly( const void* array, int header_size, + CvMemStorage* storage, int method, + double parameter, int parameter2 ) +{ + CvSeq* dst_seq = 0; + CvSeq *prev_contour = 0, *parent = 0; + CvContour contour_header; + CvSeq* src_seq = 0; + CvSeqBlock block; + int recursive = 0; + + if( CV_IS_SEQ( array )) + { + src_seq = (CvSeq*)array; + if( !CV_IS_SEQ_POLYLINE( src_seq )) + CV_Error( CV_StsBadArg, "Unsupported sequence type" ); + + recursive = parameter2; + + if( !storage ) + storage = src_seq->storage; + } + else + { + src_seq = cvPointSeqFromMat( + CV_SEQ_KIND_CURVE | (parameter2 ? CV_SEQ_FLAG_CLOSED : 0), + array, &contour_header, &block ); + } + + if( !storage ) + CV_Error( CV_StsNullPtr, "NULL storage pointer " ); + + if( header_size < 0 ) + CV_Error( CV_StsOutOfRange, "header_size is negative. " + "Pass 0 to make the destination header_size == input header_size" ); + + if( header_size == 0 ) + header_size = src_seq->header_size; + + if( !CV_IS_SEQ_POLYLINE( src_seq )) + { + if( CV_IS_SEQ_CHAIN( src_seq )) + { + CV_Error( CV_StsBadArg, "Input curves are not polygonal. " + "Use cvApproxChains first" ); + } + else + { + CV_Error( CV_StsBadArg, "Input curves have uknown type" ); + } + } + + if( header_size == 0 ) + header_size = src_seq->header_size; + + if( header_size < (int)sizeof(CvContour) ) + CV_Error( CV_StsBadSize, "New header size must be non-less than sizeof(CvContour)" ); + + if( method != CV_POLY_APPROX_DP ) + CV_Error( CV_StsOutOfRange, "Unknown approximation method" ); + + while( src_seq != 0 ) + { + CvSeq *contour = 0; + + switch (method) + { + case CV_POLY_APPROX_DP: + if( parameter < 0 ) + CV_Error( CV_StsOutOfRange, "Accuracy must be non-negative" ); + + if( CV_SEQ_ELTYPE(src_seq) == CV_32SC2 ) + contour = icvApproxPolyDP( src_seq, header_size, storage, parameter ); + else + contour = icvApproxPolyDP( src_seq, header_size, storage, parameter ); + break; + default: + assert(0); + CV_Error( CV_StsBadArg, "Invalid approximation method" ); + } + + assert( contour ); + + if( header_size >= (int)sizeof(CvContour)) + cvBoundingRect( contour, 1 ); + + contour->v_prev = parent; + contour->h_prev = prev_contour; + + if( prev_contour ) + prev_contour->h_next = contour; + else if( parent ) + parent->v_next = contour; + prev_contour = contour; + if( !dst_seq ) + dst_seq = prev_contour; + + if( !recursive ) + break; + + if( src_seq->v_next ) + { + assert( prev_contour != 0 ); + parent = prev_contour; + prev_contour = 0; + src_seq = src_seq->v_next; + } + else + { + while( src_seq->h_next == 0 ) + { + src_seq = src_seq->v_prev; + if( src_seq == 0 ) + break; + prev_contour = parent; + if( parent ) + parent = parent->v_prev; + } + if( src_seq ) + src_seq = src_seq->h_next; + } + } + + return dst_seq; +} + +/* End of file. */ diff --git a/opencv/imgproc/canny.cpp b/opencv/imgproc/canny.cpp new file mode 100644 index 0000000..d02c038 --- /dev/null +++ b/opencv/imgproc/canny.cpp @@ -0,0 +1,349 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +CV_IMPL void cvCanny( const void* srcarr, void* dstarr, + double low_thresh, double high_thresh, + int aperture_size ) +{ + cv::Ptr dx, dy; + cv::AutoBuffer buffer; + std::vector stack; + uchar **stack_top = 0, **stack_bottom = 0; + + CvMat srcstub, *src = cvGetMat( srcarr, &srcstub ); + CvMat dststub, *dst = cvGetMat( dstarr, &dststub ); + CvSize size; + int flags = aperture_size; + int low, high; + int* mag_buf[3]; + uchar* map; + ptrdiff_t mapstep; + int maxsize; + int i, j; + CvMat mag_row; + + if( CV_MAT_TYPE( src->type ) != CV_8UC1 || + CV_MAT_TYPE( dst->type ) != CV_8UC1 ) + CV_Error( CV_StsUnsupportedFormat, "" ); + + if( !CV_ARE_SIZES_EQ( src, dst )) + CV_Error( CV_StsUnmatchedSizes, "" ); + + if( low_thresh > high_thresh ) + { + double t; + CV_SWAP( low_thresh, high_thresh, t ); + } + + aperture_size &= INT_MAX; + if( (aperture_size & 1) == 0 || aperture_size < 3 || aperture_size > 7 ) + CV_Error( CV_StsBadFlag, "" ); + + size = cvGetMatSize( src ); + + dx = cvCreateMat( size.height, size.width, CV_16SC1 ); + dy = cvCreateMat( size.height, size.width, CV_16SC1 ); + cvSobel( src, dx, 1, 0, aperture_size ); + cvSobel( src, dy, 0, 1, aperture_size ); + + /*if( icvCannyGetSize_p && icvCanny_16s8u_C1R_p && !(flags & CV_CANNY_L2_GRADIENT) ) + { + int buf_size= 0; + IPPI_CALL( icvCannyGetSize_p( size, &buf_size )); + CV_CALL( buffer = cvAlloc( buf_size )); + IPPI_CALL( icvCanny_16s8u_C1R_p( (short*)dx->data.ptr, dx->step, + (short*)dy->data.ptr, dy->step, + dst->data.ptr, dst->step, + size, (float)low_thresh, + (float)high_thresh, buffer )); + EXIT; + }*/ + + if( flags & CV_CANNY_L2_GRADIENT ) + { + Cv32suf ul, uh; + ul.f = (float)low_thresh; + uh.f = (float)high_thresh; + + low = ul.i; + high = uh.i; + } + else + { + low = cvFloor( low_thresh ); + high = cvFloor( high_thresh ); + } + + buffer.allocate( (size.width+2)*(size.height+2) + (size.width+2)*3*sizeof(int) ); + + mag_buf[0] = (int*)(char*)buffer; + mag_buf[1] = mag_buf[0] + size.width + 2; + mag_buf[2] = mag_buf[1] + size.width + 2; + map = (uchar*)(mag_buf[2] + size.width + 2); + mapstep = size.width + 2; + + maxsize = MAX( 1 << 10, size.width*size.height/10 ); + stack.resize( maxsize ); + stack_top = stack_bottom = &stack[0]; + + memset( mag_buf[0], 0, (size.width+2)*sizeof(int) ); + memset( map, 1, mapstep ); + memset( map + mapstep*(size.height + 1), 1, mapstep ); + + /* sector numbers + (Top-Left Origin) + + 1 2 3 + * * * + * * * + 0*******0 + * * * + * * * + 3 2 1 + */ + + #define CANNY_PUSH(d) *(d) = (uchar)2, *stack_top++ = (d) + #define CANNY_POP(d) (d) = *--stack_top + + mag_row = cvMat( 1, size.width, CV_32F ); + + // calculate magnitude and angle of gradient, perform non-maxima supression. + // fill the map with one of the following values: + // 0 - the pixel might belong to an edge + // 1 - the pixel can not belong to an edge + // 2 - the pixel does belong to an edge + for( i = 0; i <= size.height; i++ ) + { + int* _mag = mag_buf[(i > 0) + 1] + 1; + float* _magf = (float*)_mag; + const short* _dx = (short*)(dx->data.ptr + dx->step*i); + const short* _dy = (short*)(dy->data.ptr + dy->step*i); + uchar* _map; + int x, y; + ptrdiff_t magstep1, magstep2; + int prev_flag = 0; + + if( i < size.height ) + { + _mag[-1] = _mag[size.width] = 0; + + if( !(flags & CV_CANNY_L2_GRADIENT) ) + for( j = 0; j < size.width; j++ ) + _mag[j] = abs(_dx[j]) + abs(_dy[j]); + /*else if( icvFilterSobelVert_8u16s_C1R_p != 0 ) // check for IPP + { + // use vectorized sqrt + mag_row.data.fl = _magf; + for( j = 0; j < size.width; j++ ) + { + x = _dx[j]; y = _dy[j]; + _magf[j] = (float)((double)x*x + (double)y*y); + } + cvPow( &mag_row, &mag_row, 0.5 ); + }*/ + else + { + for( j = 0; j < size.width; j++ ) + { + x = _dx[j]; y = _dy[j]; + _magf[j] = (float)std::sqrt((double)x*x + (double)y*y); + } + } + } + else + memset( _mag-1, 0, (size.width + 2)*sizeof(int) ); + + // at the very beginning we do not have a complete ring + // buffer of 3 magnitude rows for non-maxima suppression + if( i == 0 ) + continue; + + _map = map + mapstep*i + 1; + _map[-1] = _map[size.width] = 1; + + _mag = mag_buf[1] + 1; // take the central row + _dx = (short*)(dx->data.ptr + dx->step*(i-1)); + _dy = (short*)(dy->data.ptr + dy->step*(i-1)); + + magstep1 = mag_buf[2] - mag_buf[1]; + magstep2 = mag_buf[0] - mag_buf[1]; + + if( (stack_top - stack_bottom) + size.width > maxsize ) + { + int sz = (int)(stack_top - stack_bottom); + maxsize = MAX( maxsize * 3/2, maxsize + 8 ); + stack.resize(maxsize); + stack_bottom = &stack[0]; + stack_top = stack_bottom + sz; + } + + for( j = 0; j < size.width; j++ ) + { + #define CANNY_SHIFT 15 + #define TG22 (int)(0.4142135623730950488016887242097*(1< low ) + { + int tg22x = x * TG22; + int tg67x = tg22x + ((x + x) << CANNY_SHIFT); + + y <<= CANNY_SHIFT; + + if( y < tg22x ) + { + if( m > _mag[j-1] && m >= _mag[j+1] ) + { + if( m > high && !prev_flag && _map[j-mapstep] != 2 ) + { + CANNY_PUSH( _map + j ); + prev_flag = 1; + } + else + _map[j] = (uchar)0; + continue; + } + } + else if( y > tg67x ) + { + if( m > _mag[j+magstep2] && m >= _mag[j+magstep1] ) + { + if( m > high && !prev_flag && _map[j-mapstep] != 2 ) + { + CANNY_PUSH( _map + j ); + prev_flag = 1; + } + else + _map[j] = (uchar)0; + continue; + } + } + else + { + s = s < 0 ? -1 : 1; + if( m > _mag[j+magstep2-s] && m > _mag[j+magstep1+s] ) + { + if( m > high && !prev_flag && _map[j-mapstep] != 2 ) + { + CANNY_PUSH( _map + j ); + prev_flag = 1; + } + else + _map[j] = (uchar)0; + continue; + } + } + } + prev_flag = 0; + _map[j] = (uchar)1; + } + + // scroll the ring buffer + _mag = mag_buf[0]; + mag_buf[0] = mag_buf[1]; + mag_buf[1] = mag_buf[2]; + mag_buf[2] = _mag; + } + + // now track the edges (hysteresis thresholding) + while( stack_top > stack_bottom ) + { + uchar* m; + if( (stack_top - stack_bottom) + 8 > maxsize ) + { + int sz = (int)(stack_top - stack_bottom); + maxsize = MAX( maxsize * 3/2, maxsize + 8 ); + stack.resize(maxsize); + stack_bottom = &stack[0]; + stack_top = stack_bottom + sz; + } + + CANNY_POP(m); + + if( !m[-1] ) + CANNY_PUSH( m - 1 ); + if( !m[1] ) + CANNY_PUSH( m + 1 ); + if( !m[-mapstep-1] ) + CANNY_PUSH( m - mapstep - 1 ); + if( !m[-mapstep] ) + CANNY_PUSH( m - mapstep ); + if( !m[-mapstep+1] ) + CANNY_PUSH( m - mapstep + 1 ); + if( !m[mapstep-1] ) + CANNY_PUSH( m + mapstep - 1 ); + if( !m[mapstep] ) + CANNY_PUSH( m + mapstep ); + if( !m[mapstep+1] ) + CANNY_PUSH( m + mapstep + 1 ); + } + + // the final pass, form the final image + for( i = 0; i < size.height; i++ ) + { + const uchar* _map = map + mapstep*(i+1) + 1; + uchar* _dst = dst->data.ptr + dst->step*i; + + for( j = 0; j < size.width; j++ ) + _dst[j] = (uchar)-(_map[j] >> 1); + } +} + +void cv::Canny( InputArray image, OutputArray _edges, + double threshold1, double threshold2, + int apertureSize, bool L2gradient ) +{ + Mat src = image.getMat(); + _edges.create(src.size(), CV_8U); + CvMat c_src = src, c_dst = _edges.getMat(); + cvCanny( &c_src, &c_dst, threshold1, threshold2, + apertureSize + (L2gradient ? CV_CANNY_L2_GRADIENT : 0)); +} + +/* End of file. */ diff --git a/opencv/imgproc/color.cpp b/opencv/imgproc/color.cpp new file mode 100644 index 0000000..b7e0360 --- /dev/null +++ b/opencv/imgproc/color.cpp @@ -0,0 +1,3179 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2010, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/********************************* COPYRIGHT NOTICE *******************************\ + The function for RGB to Lab conversion is based on the MATLAB script + RGB2Lab.m translated by Mark Ruzon from C code by Yossi Rubner, 23 September 1997. + See the page [http://vision.stanford.edu/~ruzon/software/rgblab.html] +\**********************************************************************************/ + +/********************************* COPYRIGHT NOTICE *******************************\ + Original code for Bayer->BGR/RGB conversion is provided by Dirk Schaefer + from MD-Mathematische Dienste GmbH. Below is the copyright notice: + + IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. + By downloading, copying, installing or using the software you agree + to this license. If you do not agree to this license, do not download, + install, copy or use the software. + + Contributors License Agreement: + + Copyright (c) 2002, + MD-Mathematische Dienste GmbH + Im Defdahl 5-10 + 44141 Dortmund + Germany + www.md-it.de + + Redistribution and use in source and binary forms, + with or without modification, are permitted provided + that the following conditions are met: + + Redistributions of source code must retain + the above copyright notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + The name of Contributor may not be used to endorse or promote products + derived from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + THE POSSIBILITY OF SUCH DAMAGE. +\**********************************************************************************/ + +#include "precomp.hpp" +#include + +namespace cv +{ + +// computes cubic spline coefficients for a function: (xi=i, yi=f[i]), i=0..n +template static void splineBuild(const _Tp* f, int n, _Tp* tab) +{ + _Tp cn = 0; + int i; + tab[0] = tab[1] = (_Tp)0; + + for(i = 1; i < n-1; i++) + { + _Tp t = 3*(f[i+1] - 2*f[i] + f[i-1]); + _Tp l = 1/(4 - tab[(i-1)*4]); + tab[i*4] = l; tab[i*4+1] = (t - tab[(i-1)*4+1])*l; + } + + for(i = n-1; i >= 0; i--) + { + _Tp c = tab[i*4+1] - tab[i*4]*cn; + _Tp b = f[i+1] - f[i] - (cn + c*2)*(_Tp)0.3333333333333333; + _Tp d = (cn - c)*(_Tp)0.3333333333333333; + tab[i*4] = f[i]; tab[i*4+1] = b; + tab[i*4+2] = c; tab[i*4+3] = d; + cn = c; + } +} + +// interpolates value of a function at x, 0 <= x <= n using a cubic spline. +template static inline _Tp splineInterpolate(_Tp x, const _Tp* tab, int n) +{ + int ix = cvFloor(x); + ix = std::min(std::max(ix, 0), n-1); + x -= ix; + tab += ix*4; + return ((tab[3]*x + tab[2])*x + tab[1])*x + tab[0]; +} + + +template struct ColorChannel +{ + typedef float worktype_f; + static _Tp max() { return std::numeric_limits<_Tp>::max(); } + static _Tp half() { return (_Tp)(max()/2 + 1); } +}; + +template<> struct ColorChannel +{ + typedef float worktype_f; + static float max() { return 1.f; } + static float half() { return 0.5f; } +}; + +/*template<> struct ColorChannel +{ + typedef double worktype_f; + static double max() { return 1.; } + static double half() { return 0.5; } +};*/ + + +///////////////////////////// Top-level template function //////////////////////////////// + +template void CvtColorLoop(const Mat& srcmat, Mat& dstmat, const Cvt& cvt) +{ + typedef typename Cvt::channel_type _Tp; + Size sz = srcmat.size(); + const uchar* src = srcmat.data; + uchar* dst = dstmat.data; + size_t srcstep = srcmat.step, dststep = dstmat.step; + + if( srcmat.isContinuous() && dstmat.isContinuous() ) + { + sz.width *= sz.height; + sz.height = 1; + } + + for( ; sz.height--; src += srcstep, dst += dststep ) + cvt((const _Tp*)src, (_Tp*)dst, sz.width); +} + + +////////////////// Various 3/4-channel to 3/4-channel RGB transformations ///////////////// + +template struct RGB2RGB +{ + typedef _Tp channel_type; + + RGB2RGB(int _srccn, int _dstcn, int _blueIdx) : srccn(_srccn), dstcn(_dstcn), blueIdx(_blueIdx) {} + void operator()(const _Tp* src, _Tp* dst, int n) const + { + int scn = srccn, dcn = dstcn, bidx = blueIdx; + if( dcn == 3 ) + { + n *= 3; + for( int i = 0; i < n; i += 3, src += scn ) + { + _Tp t0 = src[bidx], t1 = src[1], t2 = src[bidx ^ 2]; + dst[i] = t0; dst[i+1] = t1; dst[i+2] = t2; + } + } + else if( scn == 3 ) + { + n *= 3; + _Tp alpha = ColorChannel<_Tp>::max(); + for( int i = 0; i < n; i += 3, dst += 4 ) + { + _Tp t0 = src[i], t1 = src[i+1], t2 = src[i+2]; + dst[bidx] = t0; dst[1] = t1; dst[bidx^2] = t2; dst[3] = alpha; + } + } + else + { + n *= 4; + for( int i = 0; i < n; i += 4 ) + { + _Tp t0 = src[i], t1 = src[i+1], t2 = src[i+2], t3 = src[i+3]; + dst[i] = t2; dst[i+1] = t1; dst[i+2] = t0; dst[i+3] = t3; + } + } + } + + int srccn, dstcn, blueIdx; +}; + +/////////// Transforming 16-bit (565 or 555) RGB to/from 24/32-bit (888[8]) RGB ////////// + +struct RGB5x52RGB +{ + typedef uchar channel_type; + + RGB5x52RGB(int _dstcn, int _blueIdx, int _greenBits) + : dstcn(_dstcn), blueIdx(_blueIdx), greenBits(_greenBits) {} + + void operator()(const uchar* src, uchar* dst, int n) const + { + int dcn = dstcn, bidx = blueIdx; + if( greenBits == 6 ) + for( int i = 0; i < n; i++, dst += dcn ) + { + unsigned t = ((const ushort*)src)[i]; + dst[bidx] = (uchar)(t << 3); + dst[1] = (uchar)((t >> 3) & ~3); + dst[bidx ^ 2] = (uchar)((t >> 8) & ~7); + if( dcn == 4 ) + dst[3] = 255; + } + else + for( int i = 0; i < n; i++, dst += dcn ) + { + unsigned t = ((const ushort*)src)[i]; + dst[bidx] = (uchar)(t << 3); + dst[1] = (uchar)((t >> 2) & ~7); + dst[bidx ^ 2] = (uchar)((t >> 7) & ~7); + if( dcn == 4 ) + dst[3] = t & 0x8000 ? 255 : 0; + } + } + + int dstcn, blueIdx, greenBits; +}; + + +struct RGB2RGB5x5 +{ + typedef uchar channel_type; + + RGB2RGB5x5(int _srccn, int _blueIdx, int _greenBits) + : srccn(_srccn), blueIdx(_blueIdx), greenBits(_greenBits) {} + + void operator()(const uchar* src, uchar* dst, int n) const + { + int scn = srccn, bidx = blueIdx; + if( greenBits == 6 ) + for( int i = 0; i < n; i++, src += scn ) + { + ((ushort*)dst)[i] = (ushort)((src[bidx] >> 3)|((src[1]&~3) << 3)|((src[bidx^2]&~7) << 8)); + } + else if( scn == 3 ) + for( int i = 0; i < n; i++, src += 3 ) + { + ((ushort*)dst)[i] = (ushort)((src[bidx] >> 3)|((src[1]&~7) << 2)|((src[bidx^2]&~7) << 7)); + } + else + for( int i = 0; i < n; i++, src += 4 ) + { + ((ushort*)dst)[i] = (ushort)((src[bidx] >> 3)|((src[1]&~7) << 2)| + ((src[bidx^2]&~7) << 7)|(src[3] ? 0x8000 : 0)); + } + } + + int srccn, blueIdx, greenBits; +}; + +///////////////////////////////// Color to/from Grayscale //////////////////////////////// + +template +struct Gray2RGB +{ + typedef _Tp channel_type; + + Gray2RGB(int _dstcn) : dstcn(_dstcn) {} + void operator()(const _Tp* src, _Tp* dst, int n) const + { + if( dstcn == 3 ) + for( int i = 0; i < n; i++, dst += 3 ) + { + dst[0] = dst[1] = dst[2] = src[i]; + } + else + { + _Tp alpha = ColorChannel<_Tp>::max(); + for( int i = 0; i < n; i++, dst += 4 ) + { + dst[0] = dst[1] = dst[2] = src[i]; + dst[3] = alpha; + } + } + } + + int dstcn; +}; + + +struct Gray2RGB5x5 +{ + typedef uchar channel_type; + + Gray2RGB5x5(int _greenBits) : greenBits(_greenBits) {} + void operator()(const uchar* src, uchar* dst, int n) const + { + if( greenBits == 6 ) + for( int i = 0; i < n; i++ ) + { + int t = src[i]; + ((ushort*)dst)[i] = (ushort)((t >> 3)|((t & ~3) << 3)|((t & ~7) << 8)); + } + else + for( int i = 0; i < n; i++ ) + { + int t = src[i] >> 3; + ((ushort*)dst)[i] = (ushort)(t|(t << 5)|(t << 10)); + } + } + int greenBits; +}; + + +#undef R2Y +#undef G2Y +#undef B2Y + +enum +{ + yuv_shift = 14, + xyz_shift = 12, + R2Y = 4899, + G2Y = 9617, + B2Y = 1868, + BLOCK_SIZE = 256 +}; + + +struct RGB5x52Gray +{ + typedef uchar channel_type; + + RGB5x52Gray(int _greenBits) : greenBits(_greenBits) {} + void operator()(const uchar* src, uchar* dst, int n) const + { + if( greenBits == 6 ) + for( int i = 0; i < n; i++ ) + { + int t = ((ushort*)src)[i]; + dst[i] = (uchar)CV_DESCALE(((t << 3) & 0xf8)*B2Y + + ((t >> 3) & 0xfc)*G2Y + + ((t >> 8) & 0xf8)*R2Y, yuv_shift); + } + else + for( int i = 0; i < n; i++ ) + { + int t = ((ushort*)src)[i]; + dst[i] = (uchar)CV_DESCALE(((t << 3) & 0xf8)*B2Y + + ((t >> 2) & 0xf8)*G2Y + + ((t >> 7) & 0xf8)*R2Y, yuv_shift); + } + } + int greenBits; +}; + + +template struct RGB2Gray +{ + typedef _Tp channel_type; + + RGB2Gray(int _srccn, int blueIdx, const float* _coeffs) : srccn(_srccn) + { + static const float coeffs0[] = { 0.299f, 0.587f, 0.114f }; + memcpy( coeffs, _coeffs ? _coeffs : coeffs0, 3*sizeof(coeffs[0]) ); + if(blueIdx == 0) + std::swap(coeffs[0], coeffs[2]); + } + + void operator()(const _Tp* src, _Tp* dst, int n) const + { + int scn = srccn; + float cb = coeffs[0], cg = coeffs[1], cr = coeffs[2]; + for(int i = 0; i < n; i++, src += scn) + dst[i] = saturate_cast<_Tp>(src[0]*cb + src[1]*cg + src[2]*cr); + } + int srccn; + float coeffs[3]; +}; + + +template<> struct RGB2Gray +{ + typedef uchar channel_type; + + RGB2Gray(int _srccn, int blueIdx, const int* coeffs) : srccn(_srccn) + { + const int coeffs0[] = { R2Y, G2Y, B2Y }; + if(!coeffs) coeffs = coeffs0; + + int b = 0, g = 0, r = (1 << (yuv_shift-1)); + int db = coeffs[blueIdx^2], dg = coeffs[1], dr = coeffs[blueIdx]; + + for( int i = 0; i < 256; i++, b += db, g += dg, r += dr ) + { + tab[i] = b; + tab[i+256] = g; + tab[i+512] = r; + } + } + void operator()(const uchar* src, uchar* dst, int n) const + { + int scn = srccn; + const int* _tab = tab; + for(int i = 0; i < n; i++, src += scn) + dst[i] = (uchar)((_tab[src[0]] + _tab[src[1]+256] + _tab[src[2]+512]) >> yuv_shift); + } + int srccn, blueIdx; + int tab[256*3]; +}; + + +template<> struct RGB2Gray +{ + typedef ushort channel_type; + + RGB2Gray(int _srccn, int blueIdx, const int* _coeffs) : srccn(_srccn) + { + static const int coeffs0[] = { R2Y, G2Y, B2Y }; + memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 3*sizeof(coeffs[0])); + if( blueIdx == 0 ) + std::swap(coeffs[0], coeffs[2]); + } + + void operator()(const ushort* src, ushort* dst, int n) const + { + int scn = srccn, cb = coeffs[0], cg = coeffs[1], cr = coeffs[2]; + for(int i = 0; i < n; i++, src += scn) + dst[i] = (ushort)CV_DESCALE((unsigned)(src[0]*cb + src[1]*cg + src[2]*cr), yuv_shift); + } + int srccn; + int coeffs[3]; +}; + + +///////////////////////////////////// RGB <-> YCrCb ////////////////////////////////////// + +template struct RGB2YCrCb_f +{ + typedef _Tp channel_type; + + RGB2YCrCb_f(int _srccn, int _blueIdx, const float* _coeffs) : srccn(_srccn), blueIdx(_blueIdx) + { + static const float coeffs0[] = {0.299f, 0.587f, 0.114f, 0.713f, 0.564f}; + memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 5*sizeof(coeffs[0])); + if(blueIdx==0) std::swap(coeffs[0], coeffs[2]); + } + + void operator()(const _Tp* src, _Tp* dst, int n) const + { + int scn = srccn, bidx = blueIdx; + const _Tp delta = ColorChannel<_Tp>::half(); + float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3], C4 = coeffs[4]; + n *= 3; + for(int i = 0; i < n; i += 3, src += scn) + { + _Tp Y = saturate_cast<_Tp>(src[0]*C0 + src[1]*C1 + src[2]*C2); + _Tp Cr = saturate_cast<_Tp>((src[bidx^2] - Y)*C3 + delta); + _Tp Cb = saturate_cast<_Tp>((src[bidx] - Y)*C4 + delta); + dst[i] = Y; dst[i+1] = Cr; dst[i+2] = Cb; + } + } + int srccn, blueIdx; + float coeffs[5]; +}; + + +template struct RGB2YCrCb_i +{ + typedef _Tp channel_type; + + RGB2YCrCb_i(int _srccn, int _blueIdx, const int* _coeffs) + : srccn(_srccn), blueIdx(_blueIdx) + { + static const int coeffs0[] = {R2Y, G2Y, B2Y, 11682, 9241}; + memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 5*sizeof(coeffs[0])); + if(blueIdx==0) std::swap(coeffs[0], coeffs[2]); + } + void operator()(const _Tp* src, _Tp* dst, int n) const + { + int scn = srccn, bidx = blueIdx; + int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3], C4 = coeffs[4]; + int delta = ColorChannel<_Tp>::half()*(1 << yuv_shift); + n *= 3; + for(int i = 0; i < n; i += 3, src += scn) + { + int Y = CV_DESCALE(src[0]*C0 + src[1]*C1 + src[2]*C2, yuv_shift); + int Cr = CV_DESCALE((src[bidx^2] - Y)*C3 + delta, yuv_shift); + int Cb = CV_DESCALE((src[bidx] - Y)*C4 + delta, yuv_shift); + dst[i] = saturate_cast<_Tp>(Y); + dst[i+1] = saturate_cast<_Tp>(Cr); + dst[i+2] = saturate_cast<_Tp>(Cb); + } + } + int srccn, blueIdx; + int coeffs[5]; +}; + + +template struct YCrCb2RGB_f +{ + typedef _Tp channel_type; + + YCrCb2RGB_f(int _dstcn, int _blueIdx, const float* _coeffs) + : dstcn(_dstcn), blueIdx(_blueIdx) + { + static const float coeffs0[] = {1.403f, -0.714f, -0.344f, 1.773f}; + memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 4*sizeof(coeffs[0])); + } + void operator()(const _Tp* src, _Tp* dst, int n) const + { + int dcn = dstcn, bidx = blueIdx; + const _Tp delta = ColorChannel<_Tp>::half(), alpha = ColorChannel<_Tp>::max(); + float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3]; + n *= 3; + for(int i = 0; i < n; i += 3, dst += dcn) + { + _Tp Y = src[i]; + _Tp Cr = src[i+1]; + _Tp Cb = src[i+2]; + + _Tp b = saturate_cast<_Tp>(Y + (Cb - delta)*C3); + _Tp g = saturate_cast<_Tp>(Y + (Cb - delta)*C2 + (Cr - delta)*C1); + _Tp r = saturate_cast<_Tp>(Y + (Cr - delta)*C0); + + dst[bidx] = b; dst[1] = g; dst[bidx^2] = r; + if( dcn == 4 ) + dst[3] = alpha; + } + } + int dstcn, blueIdx; + float coeffs[4]; +}; + + +template struct YCrCb2RGB_i +{ + typedef _Tp channel_type; + + YCrCb2RGB_i(int _dstcn, int _blueIdx, const int* _coeffs) + : dstcn(_dstcn), blueIdx(_blueIdx) + { + static const int coeffs0[] = {22987, -11698, -5636, 29049}; + memcpy(coeffs, _coeffs ? _coeffs : coeffs0, 4*sizeof(coeffs[0])); + } + + void operator()(const _Tp* src, _Tp* dst, int n) const + { + int dcn = dstcn, bidx = blueIdx; + const _Tp delta = ColorChannel<_Tp>::half(), alpha = ColorChannel<_Tp>::max(); + int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], C3 = coeffs[3]; + n *= 3; + for(int i = 0; i < n; i += 3, dst += dcn) + { + _Tp Y = src[i]; + _Tp Cr = src[i+1]; + _Tp Cb = src[i+2]; + + int b = Y + CV_DESCALE((Cb - delta)*C3, yuv_shift); + int g = Y + CV_DESCALE((Cb - delta)*C2 + (Cr - delta)*C1, yuv_shift); + int r = Y + CV_DESCALE((Cr - delta)*C0, yuv_shift); + + dst[bidx] = saturate_cast<_Tp>(b); + dst[1] = saturate_cast<_Tp>(g); + dst[bidx^2] = saturate_cast<_Tp>(r); + if( dcn == 4 ) + dst[3] = alpha; + } + } + int dstcn, blueIdx; + int coeffs[4]; +}; + + +////////////////////////////////////// RGB <-> XYZ /////////////////////////////////////// + +static const float sRGB2XYZ_D65[] = +{ + 0.412453f, 0.357580f, 0.180423f, + 0.212671f, 0.715160f, 0.072169f, + 0.019334f, 0.119193f, 0.950227f +}; + +static const float XYZ2sRGB_D65[] = +{ + 3.240479f, -1.53715f, -0.498535f, + -0.969256f, 1.875991f, 0.041556f, + 0.055648f, -0.204043f, 1.057311f +}; + +template struct RGB2XYZ_f +{ + typedef _Tp channel_type; + + RGB2XYZ_f(int _srccn, int blueIdx, const float* _coeffs) : srccn(_srccn) + { + memcpy(coeffs, _coeffs ? _coeffs : sRGB2XYZ_D65, 9*sizeof(coeffs[0])); + if(blueIdx == 0) + { + std::swap(coeffs[0], coeffs[2]); + std::swap(coeffs[3], coeffs[5]); + std::swap(coeffs[6], coeffs[8]); + } + } + void operator()(const _Tp* src, _Tp* dst, int n) const + { + int scn = srccn; + float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], + C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5], + C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8]; + + n *= 3; + for(int i = 0; i < n; i += 3, src += scn) + { + _Tp X = saturate_cast<_Tp>(src[0]*C0 + src[1]*C1 + src[2]*C2); + _Tp Y = saturate_cast<_Tp>(src[0]*C3 + src[1]*C4 + src[2]*C5); + _Tp Z = saturate_cast<_Tp>(src[0]*C6 + src[1]*C7 + src[2]*C8); + dst[i] = X; dst[i+1] = Y; dst[i+2] = Z; + } + } + int srccn; + float coeffs[9]; +}; + + +template struct RGB2XYZ_i +{ + typedef _Tp channel_type; + + RGB2XYZ_i(int _srccn, int blueIdx, const float* _coeffs) : srccn(_srccn) + { + static const int coeffs0[] = + { + 1689, 1465, 739, + 871, 2929, 296, + 79, 488, 3892 + }; + for( int i = 0; i < 9; i++ ) + coeffs[i] = _coeffs ? cvRound(_coeffs[i]*(1 << xyz_shift)) : coeffs0[i]; + if(blueIdx == 0) + { + std::swap(coeffs[0], coeffs[2]); + std::swap(coeffs[3], coeffs[5]); + std::swap(coeffs[6], coeffs[8]); + } + } + void operator()(const _Tp* src, _Tp* dst, int n) const + { + int scn = srccn; + int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], + C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5], + C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8]; + n *= 3; + for(int i = 0; i < n; i += 3, src += scn) + { + int X = CV_DESCALE(src[0]*C0 + src[1]*C1 + src[2]*C2, xyz_shift); + int Y = CV_DESCALE(src[0]*C3 + src[1]*C4 + src[2]*C5, xyz_shift); + int Z = CV_DESCALE(src[0]*C6 + src[1]*C7 + src[2]*C8, xyz_shift); + dst[i] = saturate_cast<_Tp>(X); dst[i+1] = saturate_cast<_Tp>(Y); + dst[i+2] = saturate_cast<_Tp>(Z); + } + } + int srccn; + int coeffs[9]; +}; + + +template struct XYZ2RGB_f +{ + typedef _Tp channel_type; + + XYZ2RGB_f(int _dstcn, int _blueIdx, const float* _coeffs) + : dstcn(_dstcn), blueIdx(_blueIdx) + { + memcpy(coeffs, _coeffs ? _coeffs : XYZ2sRGB_D65, 9*sizeof(coeffs[0])); + if(blueIdx == 0) + { + std::swap(coeffs[0], coeffs[6]); + std::swap(coeffs[1], coeffs[7]); + std::swap(coeffs[2], coeffs[8]); + } + } + + void operator()(const _Tp* src, _Tp* dst, int n) const + { + int dcn = dstcn; + _Tp alpha = ColorChannel<_Tp>::max(); + float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], + C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5], + C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8]; + n *= 3; + for(int i = 0; i < n; i += 3, dst += dcn) + { + _Tp B = saturate_cast<_Tp>(src[i]*C0 + src[i+1]*C1 + src[i+2]*C2); + _Tp G = saturate_cast<_Tp>(src[i]*C3 + src[i+1]*C4 + src[i+2]*C5); + _Tp R = saturate_cast<_Tp>(src[i]*C6 + src[i+1]*C7 + src[i+2]*C8); + dst[0] = B; dst[1] = G; dst[2] = R; + if( dcn == 4 ) + dst[3] = alpha; + } + } + int dstcn, blueIdx; + float coeffs[9]; +}; + + +template struct XYZ2RGB_i +{ + typedef _Tp channel_type; + + XYZ2RGB_i(int _dstcn, int _blueIdx, const int* _coeffs) + : dstcn(_dstcn), blueIdx(_blueIdx) + { + static const int coeffs0[] = + { + 13273, -6296, -2042, + -3970, 7684, 170, + 228, -836, 4331 + }; + for(int i = 0; i < 9; i++) + coeffs[i] = _coeffs ? cvRound(_coeffs[i]*(1 << xyz_shift)) : coeffs0[i]; + + if(blueIdx == 0) + { + std::swap(coeffs[0], coeffs[6]); + std::swap(coeffs[1], coeffs[7]); + std::swap(coeffs[2], coeffs[8]); + } + } + void operator()(const _Tp* src, _Tp* dst, int n) const + { + int dcn = dstcn; + _Tp alpha = ColorChannel<_Tp>::max(); + int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], + C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5], + C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8]; + n *= 3; + for(int i = 0; i < n; i += 3, dst += dcn) + { + int B = CV_DESCALE(src[i]*C0 + src[i+1]*C1 + src[i+2]*C2, xyz_shift); + int G = CV_DESCALE(src[i]*C3 + src[i+1]*C4 + src[i+2]*C5, xyz_shift); + int R = CV_DESCALE(src[i]*C6 + src[i+1]*C7 + src[i+2]*C8, xyz_shift); + dst[0] = saturate_cast<_Tp>(B); dst[1] = saturate_cast<_Tp>(G); + dst[2] = saturate_cast<_Tp>(R); + if( dcn == 4 ) + dst[3] = alpha; + } + } + int dstcn, blueIdx; + int coeffs[9]; +}; + + +////////////////////////////////////// RGB <-> HSV /////////////////////////////////////// + + +struct RGB2HSV_b +{ + typedef uchar channel_type; + + RGB2HSV_b(int _srccn, int _blueIdx, int _hrange) + : srccn(_srccn), blueIdx(_blueIdx), hrange(_hrange) + { + CV_Assert( hrange == 180 || hrange == 256 ); + } + + void operator()(const uchar* src, uchar* dst, int n) const + { + int i, bidx = blueIdx, scn = srccn; + const int hsv_shift = 12; + + static int sdiv_table[256]; + static int hdiv_table180[256]; + static int hdiv_table256[256]; + static volatile bool initialized = false; + + int hr = hrange; + const int* hdiv_table = hr == 180 ? hdiv_table180 : hdiv_table256; + n *= 3; + + if( !initialized ) + { + sdiv_table[0] = hdiv_table180[0] = hdiv_table256[0] = 0; + for( i = 1; i < 256; i++ ) + { + sdiv_table[i] = saturate_cast((255 << hsv_shift)/(1.*i)); + hdiv_table180[i] = saturate_cast((180 << hsv_shift)/(6.*i)); + hdiv_table256[i] = saturate_cast((256 << hsv_shift)/(6.*i)); + } + initialized = true; + } + + for( i = 0; i < n; i += 3, src += scn ) + { + int b = src[bidx], g = src[1], r = src[bidx^2]; + int h, s, v = b; + int vmin = b, diff; + int vr, vg; + + CV_CALC_MAX_8U( v, g ); + CV_CALC_MAX_8U( v, r ); + CV_CALC_MIN_8U( vmin, g ); + CV_CALC_MIN_8U( vmin, r ); + + diff = v - vmin; + vr = v == r ? -1 : 0; + vg = v == g ? -1 : 0; + + s = (diff * sdiv_table[v] + (1 << (hsv_shift-1))) >> hsv_shift; + h = (vr & (g - b)) + + (~vr & ((vg & (b - r + 2 * diff)) + ((~vg) & (r - g + 4 * diff)))); + h = (h * hdiv_table[diff] + (1 << (hsv_shift-1))) >> hsv_shift; + h += h < 0 ? hr : 0; + + dst[i] = saturate_cast(h); + dst[i+1] = (uchar)s; + dst[i+2] = (uchar)v; + } + } + + int srccn, blueIdx, hrange; +}; + + +struct RGB2HSV_f +{ + typedef float channel_type; + + RGB2HSV_f(int _srccn, int _blueIdx, float _hrange) + : srccn(_srccn), blueIdx(_blueIdx), hrange(_hrange) {} + + void operator()(const float* src, float* dst, int n) const + { + int i, bidx = blueIdx, scn = srccn; + float hscale = hrange*(1.f/360.f); + n *= 3; + + for( i = 0; i < n; i += 3, src += scn ) + { + float b = src[bidx], g = src[1], r = src[bidx^2]; + float h, s, v; + + float vmin, diff; + + v = vmin = r; + if( v < g ) v = g; + if( v < b ) v = b; + if( vmin > g ) vmin = g; + if( vmin > b ) vmin = b; + + diff = v - vmin; + s = diff/(float)(fabs(v) + FLT_EPSILON); + diff = (float)(60./(diff + FLT_EPSILON)); + if( v == r ) + h = (g - b)*diff; + else if( v == g ) + h = (b - r)*diff + 120.f; + else + h = (r - g)*diff + 240.f; + + if( h < 0 ) h += 360.f; + + dst[i] = h*hscale; + dst[i+1] = s; + dst[i+2] = v; + } + } + + int srccn, blueIdx; + float hrange; +}; + + +struct HSV2RGB_f +{ + typedef float channel_type; + + HSV2RGB_f(int _dstcn, int _blueIdx, float _hrange) + : dstcn(_dstcn), blueIdx(_blueIdx), hscale(6.f/_hrange) {} + + void operator()(const float* src, float* dst, int n) const + { + int i, bidx = blueIdx, dcn = dstcn; + float _hscale = hscale; + float alpha = ColorChannel::max(); + n *= 3; + + for( i = 0; i < n; i += 3, dst += dcn ) + { + float h = src[i], s = src[i+1], v = src[i+2]; + float b, g, r; + + if( s == 0 ) + b = g = r = v; + else + { + static const int sector_data[][3]= + {{1,3,0}, {1,0,2}, {3,0,1}, {0,2,1}, {0,1,3}, {2,1,0}}; + float tab[4]; + int sector; + h *= _hscale; + if( h < 0 ) + do h += 6; while( h < 0 ); + else if( h >= 6 ) + do h -= 6; while( h >= 6 ); + sector = cvFloor(h); + h -= sector; + + tab[0] = v; + tab[1] = v*(1.f - s); + tab[2] = v*(1.f - s*h); + tab[3] = v*(1.f - s*(1.f - h)); + + b = tab[sector_data[sector][0]]; + g = tab[sector_data[sector][1]]; + r = tab[sector_data[sector][2]]; + } + + dst[bidx] = b; + dst[1] = g; + dst[bidx^2] = r; + if( dcn == 4 ) + dst[3] = alpha; + } + } + + int dstcn, blueIdx; + float hscale; +}; + + +struct HSV2RGB_b +{ + typedef uchar channel_type; + + HSV2RGB_b(int _dstcn, int _blueIdx, int _hrange) + : dstcn(_dstcn), cvt(3, _blueIdx, (float)_hrange) + {} + + void operator()(const uchar* src, uchar* dst, int n) const + { + int i, j, dcn = dstcn; + uchar alpha = ColorChannel::max(); + float buf[3*BLOCK_SIZE]; + + for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 ) + { + int dn = std::min(n - i, (int)BLOCK_SIZE); + + for( j = 0; j < dn*3; j += 3 ) + { + buf[j] = src[j]; + buf[j+1] = src[j+1]*(1.f/255.f); + buf[j+2] = src[j+2]*(1.f/255.f); + } + cvt(buf, buf, dn); + + for( j = 0; j < dn*3; j += 3, dst += dcn ) + { + dst[0] = saturate_cast(buf[j]*255.f); + dst[1] = saturate_cast(buf[j+1]*255.f); + dst[2] = saturate_cast(buf[j+2]*255.f); + if( dcn == 4 ) + dst[3] = alpha; + } + } + } + + int dstcn; + HSV2RGB_f cvt; +}; + + +///////////////////////////////////// RGB <-> HLS //////////////////////////////////////// + +struct RGB2HLS_f +{ + typedef float channel_type; + + RGB2HLS_f(int _srccn, int _blueIdx, float _hrange) + : srccn(_srccn), blueIdx(_blueIdx), hrange(_hrange) {} + + void operator()(const float* src, float* dst, int n) const + { + int i, bidx = blueIdx, scn = srccn; + float hscale = hrange*(1.f/360.f); + n *= 3; + + for( i = 0; i < n; i += 3, src += scn ) + { + float b = src[bidx], g = src[1], r = src[bidx^2]; + float h = 0.f, s = 0.f, l; + float vmin, vmax, diff; + + vmax = vmin = r; + if( vmax < g ) vmax = g; + if( vmax < b ) vmax = b; + if( vmin > g ) vmin = g; + if( vmin > b ) vmin = b; + + diff = vmax - vmin; + l = (vmax + vmin)*0.5f; + + if( diff > FLT_EPSILON ) + { + s = l < 0.5f ? diff/(vmax + vmin) : diff/(2 - vmax - vmin); + diff = 60.f/diff; + + if( vmax == r ) + h = (g - b)*diff; + else if( vmax == g ) + h = (b - r)*diff + 120.f; + else + h = (r - g)*diff + 240.f; + + if( h < 0.f ) h += 360.f; + } + + dst[i] = h*hscale; + dst[i+1] = l; + dst[i+2] = s; + } + } + + int srccn, blueIdx; + float hrange; +}; + + +struct RGB2HLS_b +{ + typedef uchar channel_type; + + RGB2HLS_b(int _srccn, int _blueIdx, int _hrange) + : srccn(_srccn), cvt(3, _blueIdx, (float)_hrange) {} + + void operator()(const uchar* src, uchar* dst, int n) const + { + int i, j, scn = srccn; + float buf[3*BLOCK_SIZE]; + + for( i = 0; i < n; i += BLOCK_SIZE, dst += BLOCK_SIZE*3 ) + { + int dn = std::min(n - i, (int)BLOCK_SIZE); + + for( j = 0; j < dn*3; j += 3, src += scn ) + { + buf[j] = src[0]*(1.f/255.f); + buf[j+1] = src[1]*(1.f/255.f); + buf[j+2] = src[2]*(1.f/255.f); + } + cvt(buf, buf, dn); + + for( j = 0; j < dn*3; j += 3 ) + { + dst[j] = saturate_cast(buf[j]); + dst[j+1] = saturate_cast(buf[j+1]*255.f); + dst[j+2] = saturate_cast(buf[j+2]*255.f); + } + } + } + + int srccn; + RGB2HLS_f cvt; +}; + + +struct HLS2RGB_f +{ + typedef float channel_type; + + HLS2RGB_f(int _dstcn, int _blueIdx, float _hrange) + : dstcn(_dstcn), blueIdx(_blueIdx), hscale(6.f/_hrange) {} + + void operator()(const float* src, float* dst, int n) const + { + int i, bidx = blueIdx, dcn = dstcn; + float _hscale = hscale; + float alpha = ColorChannel::max(); + n *= 3; + + for( i = 0; i < n; i += 3, dst += dcn ) + { + float h = src[i], l = src[i+1], s = src[i+2]; + float b, g, r; + + if( s == 0 ) + b = g = r = l; + else + { + static const int sector_data[][3]= + {{1,3,0}, {1,0,2}, {3,0,1}, {0,2,1}, {0,1,3}, {2,1,0}}; + float tab[4]; + int sector; + + float p2 = l <= 0.5f ? l*(1 + s) : l + s - l*s; + float p1 = 2*l - p2; + + h *= _hscale; + if( h < 0 ) + do h += 6; while( h < 0 ); + else if( h >= 6 ) + do h -= 6; while( h >= 6 ); + + assert( 0 <= h && h < 6 ); + sector = cvFloor(h); + h -= sector; + + tab[0] = p2; + tab[1] = p1; + tab[2] = p1 + (p2 - p1)*(1-h); + tab[3] = p1 + (p2 - p1)*h; + + b = tab[sector_data[sector][0]]; + g = tab[sector_data[sector][1]]; + r = tab[sector_data[sector][2]]; + } + + dst[bidx] = b; + dst[1] = g; + dst[bidx^2] = r; + if( dcn == 4 ) + dst[3] = alpha; + } + } + + int dstcn, blueIdx; + float hscale; +}; + + +struct HLS2RGB_b +{ + typedef uchar channel_type; + + HLS2RGB_b(int _dstcn, int _blueIdx, int _hrange) + : dstcn(_dstcn), cvt(3, _blueIdx, (float)_hrange) + {} + + void operator()(const uchar* src, uchar* dst, int n) const + { + int i, j, dcn = dstcn; + uchar alpha = ColorChannel::max(); + float buf[3*BLOCK_SIZE]; + + for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 ) + { + int dn = std::min(n - i, (int)BLOCK_SIZE); + + for( j = 0; j < dn*3; j += 3 ) + { + buf[j] = src[j]; + buf[j+1] = src[j+1]*(1.f/255.f); + buf[j+2] = src[j+2]*(1.f/255.f); + } + cvt(buf, buf, dn); + + for( j = 0; j < dn*3; j += 3, dst += dcn ) + { + dst[0] = saturate_cast(buf[j]*255.f); + dst[1] = saturate_cast(buf[j+1]*255.f); + dst[2] = saturate_cast(buf[j+2]*255.f); + if( dcn == 4 ) + dst[3] = alpha; + } + } + } + + int dstcn; + HLS2RGB_f cvt; +}; + + +///////////////////////////////////// RGB <-> L*a*b* ///////////////////////////////////// + +static const float D65[] = { 0.950456f, 1.f, 1.088754f }; + +enum { LAB_CBRT_TAB_SIZE = 1024, GAMMA_TAB_SIZE = 1024 }; +static float LabCbrtTab[LAB_CBRT_TAB_SIZE*4]; +static const float LabCbrtTabScale = LAB_CBRT_TAB_SIZE/1.5f; + +static float sRGBGammaTab[GAMMA_TAB_SIZE*4], sRGBInvGammaTab[GAMMA_TAB_SIZE*4]; +static const float GammaTabScale = (float)GAMMA_TAB_SIZE; + +static ushort sRGBGammaTab_b[256], linearGammaTab_b[256]; +#undef lab_shift +#define lab_shift xyz_shift +#define gamma_shift 3 +#define lab_shift2 (lab_shift + gamma_shift) +#define LAB_CBRT_TAB_SIZE_B (256*3/2*(1<(255.f*(1 << gamma_shift)*(x <= 0.04045f ? x*(1.f/12.92f) : (float)pow((double)(x + 0.055)*(1./1.055), 2.4))); + linearGammaTab_b[i] = (ushort)(i*(1 << gamma_shift)); + } + + for(i = 0; i < LAB_CBRT_TAB_SIZE_B; i++) + { + float x = i*(1.f/(255.f*(1 << gamma_shift))); + LabCbrtTab_b[i] = saturate_cast((1 << lab_shift2)*(x < 0.008856f ? x*7.787f + 0.13793103448275862f : cvCbrt(x))); + } + initialized = true; + } +} + +struct RGB2Lab_b +{ + typedef uchar channel_type; + + RGB2Lab_b(int _srccn, int blueIdx, const float* _coeffs, + const float* _whitept, bool _srgb) + : srccn(_srccn), srgb(_srgb) + { + static volatile int _3 = 3; + initLabTabs(); + + if(!_coeffs) _coeffs = sRGB2XYZ_D65; + if(!_whitept) _whitept = D65; + float scale[] = + { + (1 << lab_shift)/_whitept[0], + (float)(1 << lab_shift), + (1 << lab_shift)/_whitept[2] + }; + + for( int i = 0; i < _3; i++ ) + { + coeffs[i*3+(blueIdx^2)] = cvRound(_coeffs[i*3]*scale[i]); + coeffs[i*3+1] = cvRound(_coeffs[i*3+1]*scale[i]); + coeffs[i*3+blueIdx] = cvRound(_coeffs[i*3+2]*scale[i]); + + CV_Assert( coeffs[i] >= 0 && coeffs[i*3+1] >= 0 && coeffs[i*3+2] >= 0 && + coeffs[i*3] + coeffs[i*3+1] + coeffs[i*3+2] < 2*(1 << lab_shift) ); + } + } + + void operator()(const uchar* src, uchar* dst, int n) const + { + const int Lscale = (116*255+50)/100; + const int Lshift = -((16*255*(1 << lab_shift2) + 50)/100); + const ushort* tab = srgb ? sRGBGammaTab_b : linearGammaTab_b; + int i, scn = srccn; + int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], + C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5], + C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8]; + n *= 3; + + for( i = 0; i < n; i += 3, src += scn ) + { + int R = tab[src[0]], G = tab[src[1]], B = tab[src[2]]; + int fX = LabCbrtTab_b[CV_DESCALE(R*C0 + G*C1 + B*C2, lab_shift)]; + int fY = LabCbrtTab_b[CV_DESCALE(R*C3 + G*C4 + B*C5, lab_shift)]; + int fZ = LabCbrtTab_b[CV_DESCALE(R*C6 + G*C7 + B*C8, lab_shift)]; + + int L = CV_DESCALE( Lscale*fY + Lshift, lab_shift2 ); + int a = CV_DESCALE( 500*(fX - fY) + 128*(1 << lab_shift2), lab_shift2 ); + int b = CV_DESCALE( 200*(fY - fZ) + 128*(1 << lab_shift2), lab_shift2 ); + + dst[i] = saturate_cast(L); + dst[i+1] = saturate_cast(a); + dst[i+2] = saturate_cast(b); + } + } + + int srccn; + int coeffs[9]; + bool srgb; +}; + + +struct RGB2Lab_f +{ + typedef float channel_type; + + RGB2Lab_f(int _srccn, int blueIdx, const float* _coeffs, + const float* _whitept, bool _srgb) + : srccn(_srccn), srgb(_srgb) + { + volatile int _3 = 3; + initLabTabs(); + + if(!_coeffs) _coeffs = sRGB2XYZ_D65; + if(!_whitept) _whitept = D65; + float scale[] = { LabCbrtTabScale/_whitept[0], LabCbrtTabScale, LabCbrtTabScale/_whitept[2] }; + + for( int i = 0; i < _3; i++ ) + { + coeffs[i*3+(blueIdx^2)] = _coeffs[i*3]*scale[i]; + coeffs[i*3+1] = _coeffs[i*3+1]*scale[i]; + coeffs[i*3+blueIdx] = _coeffs[i*3+2]*scale[i]; + CV_Assert( coeffs[i*3] >= 0 && coeffs[i*3+1] >= 0 && coeffs[i*3+2] >= 0 && + coeffs[i*3] + coeffs[i*3+1] + coeffs[i*3+2] < 1.5f*LabCbrtTabScale ); + } + } + + void operator()(const float* src, float* dst, int n) const + { + int i, scn = srccn; + float gscale = GammaTabScale; + const float* gammaTab = srgb ? sRGBGammaTab : 0; + float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], + C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5], + C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8]; + n *= 3; + + for( i = 0; i < n; i += 3, src += scn ) + { + float R = src[0], G = src[1], B = src[2]; + if( gammaTab ) + { + R = splineInterpolate(R*gscale, gammaTab, GAMMA_TAB_SIZE); + G = splineInterpolate(G*gscale, gammaTab, GAMMA_TAB_SIZE); + B = splineInterpolate(B*gscale, gammaTab, GAMMA_TAB_SIZE); + } + float fX = splineInterpolate(R*C0 + G*C1 + B*C2, LabCbrtTab, LAB_CBRT_TAB_SIZE); + float fY = splineInterpolate(R*C3 + G*C4 + B*C5, LabCbrtTab, LAB_CBRT_TAB_SIZE); + float fZ = splineInterpolate(R*C6 + G*C7 + B*C8, LabCbrtTab, LAB_CBRT_TAB_SIZE); + + float L = 116.f*fY - 16.f; + float a = 500.f*(fX - fY); + float b = 200.f*(fY - fZ); + + dst[i] = L; dst[i+1] = a; dst[i+2] = b; + } + } + + int srccn; + float coeffs[9]; + bool srgb; +}; + + +struct Lab2RGB_f +{ + typedef float channel_type; + + Lab2RGB_f( int _dstcn, int blueIdx, const float* _coeffs, + const float* _whitept, bool _srgb ) + : dstcn(_dstcn), srgb(_srgb) + { + initLabTabs(); + + if(!_coeffs) _coeffs = XYZ2sRGB_D65; + if(!_whitept) _whitept = D65; + + for( int i = 0; i < 3; i++ ) + { + coeffs[i+(blueIdx^2)*3] = _coeffs[i]*_whitept[i]; + coeffs[i+3] = _coeffs[i+3]*_whitept[i]; + coeffs[i+blueIdx*3] = _coeffs[i+6]*_whitept[i]; + } + } + + void operator()(const float* src, float* dst, int n) const + { + int i, dcn = dstcn; + const float* gammaTab = srgb ? sRGBInvGammaTab : 0; + float gscale = GammaTabScale; + float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], + C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5], + C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8]; + float alpha = ColorChannel::max(); + n *= 3; + + for( i = 0; i < n; i += 3, dst += dcn ) + { + float L = src[i], a = src[i+1], b = src[i+2]; + float Y = (L + 16.f)*(1.f/116.f); + float X = (Y + a*0.002f); + float Z = (Y - b*0.005f); + Y = Y*Y*Y; + X = X*X*X; + Z = Z*Z*Z; + + float R = X*C0 + Y*C1 + Z*C2; + float G = X*C3 + Y*C4 + Z*C5; + float B = X*C6 + Y*C7 + Z*C8; + + if( gammaTab ) + { + R = splineInterpolate(R*gscale, gammaTab, GAMMA_TAB_SIZE); + G = splineInterpolate(G*gscale, gammaTab, GAMMA_TAB_SIZE); + B = splineInterpolate(B*gscale, gammaTab, GAMMA_TAB_SIZE); + } + + dst[0] = R; dst[1] = G; dst[2] = B; + if( dcn == 4 ) + dst[3] = alpha; + } + } + + int dstcn; + float coeffs[9]; + bool srgb; +}; + + +struct Lab2RGB_b +{ + typedef uchar channel_type; + + Lab2RGB_b( int _dstcn, int blueIdx, const float* _coeffs, + const float* _whitept, bool _srgb ) + : dstcn(_dstcn), cvt(3, blueIdx, _coeffs, _whitept, _srgb ) {} + + void operator()(const uchar* src, uchar* dst, int n) const + { + int i, j, dcn = dstcn; + uchar alpha = ColorChannel::max(); + float buf[3*BLOCK_SIZE]; + + for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 ) + { + int dn = std::min(n - i, (int)BLOCK_SIZE); + + for( j = 0; j < dn*3; j += 3 ) + { + buf[j] = src[j]*(100.f/255.f); + buf[j+1] = (float)(src[j+1] - 128); + buf[j+2] = (float)(src[j+2] - 128); + } + cvt(buf, buf, dn); + + for( j = 0; j < dn*3; j += 3, dst += dcn ) + { + dst[0] = saturate_cast(buf[j]*255.f); + dst[1] = saturate_cast(buf[j+1]*255.f); + dst[2] = saturate_cast(buf[j+2]*255.f); + if( dcn == 4 ) + dst[3] = alpha; + } + } + } + + int dstcn; + Lab2RGB_f cvt; +}; + + +///////////////////////////////////// RGB <-> L*u*v* ///////////////////////////////////// + +struct RGB2Luv_f +{ + typedef float channel_type; + + RGB2Luv_f( int _srccn, int blueIdx, const float* _coeffs, + const float* whitept, bool _srgb ) + : srccn(_srccn), srgb(_srgb) + { + volatile int i; + initLabTabs(); + + if(!_coeffs) _coeffs = sRGB2XYZ_D65; + if(!whitept) whitept = D65; + + for( i = 0; i < 3; i++ ) + { + coeffs[i*3] = _coeffs[i*3]; + coeffs[i*3+1] = _coeffs[i*3+1]; + coeffs[i*3+2] = _coeffs[i*3+2]; + if( blueIdx == 0 ) + std::swap(coeffs[i*3], coeffs[i*3+2]); + CV_Assert( coeffs[i*3] >= 0 && coeffs[i*3+1] >= 0 && coeffs[i*3+2] >= 0 && + coeffs[i*3] + coeffs[i*3+1] + coeffs[i*3+2] < 1.5f ); + } + + float d = 1.f/(whitept[0] + whitept[1]*15 + whitept[2]*3); + un = 4*whitept[0]*d; + vn = 9*whitept[1]*d; + + CV_Assert(whitept[1] == 1.f); + } + + void operator()(const float* src, float* dst, int n) const + { + int i, scn = srccn; + float gscale = GammaTabScale; + const float* gammaTab = srgb ? sRGBGammaTab : 0; + float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], + C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5], + C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8]; + float _un = 13*un, _vn = 13*vn; + n *= 3; + + for( i = 0; i < n; i += 3, src += scn ) + { + float R = src[0], G = src[1], B = src[2]; + if( gammaTab ) + { + R = splineInterpolate(R*gscale, gammaTab, GAMMA_TAB_SIZE); + G = splineInterpolate(G*gscale, gammaTab, GAMMA_TAB_SIZE); + B = splineInterpolate(B*gscale, gammaTab, GAMMA_TAB_SIZE); + } + + float X = R*C0 + G*C1 + B*C2; + float Y = R*C3 + G*C4 + B*C5; + float Z = R*C6 + G*C7 + B*C8; + + float L = splineInterpolate(Y*LabCbrtTabScale, LabCbrtTab, LAB_CBRT_TAB_SIZE); + L = 116.f*L - 16.f; + + float d = (4*13) / std::max(X + 15 * Y + 3 * Z, FLT_EPSILON); + float u = L*(X*d - _un); + float v = L*((9*0.25f)*Y*d - _vn); + + dst[i] = L; dst[i+1] = u; dst[i+2] = v; + } + } + + int srccn; + float coeffs[9], un, vn; + bool srgb; +}; + + +struct Luv2RGB_f +{ + typedef float channel_type; + + Luv2RGB_f( int _dstcn, int blueIdx, const float* _coeffs, + const float* whitept, bool _srgb ) + : dstcn(_dstcn), srgb(_srgb) + { + initLabTabs(); + + if(!_coeffs) _coeffs = XYZ2sRGB_D65; + if(!whitept) whitept = D65; + + for( int i = 0; i < 3; i++ ) + { + coeffs[i+(blueIdx^2)*3] = _coeffs[i]; + coeffs[i+3] = _coeffs[i+3]; + coeffs[i+blueIdx*3] = _coeffs[i+6]; + } + + float d = 1.f/(whitept[0] + whitept[1]*15 + whitept[2]*3); + un = 4*whitept[0]*d; + vn = 9*whitept[1]*d; + + CV_Assert(whitept[1] == 1.f); + } + + void operator()(const float* src, float* dst, int n) const + { + int i, dcn = dstcn; + const float* gammaTab = srgb ? sRGBInvGammaTab : 0; + float gscale = GammaTabScale; + float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2], + C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5], + C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8]; + float alpha = ColorChannel::max(); + float _un = un, _vn = vn; + n *= 3; + + for( i = 0; i < n; i += 3, dst += dcn ) + { + float L = src[i], u = src[i+1], v = src[i+2], d, X, Y, Z; + Y = (L + 16.f) * (1.f/116.f); + Y = Y*Y*Y; + d = (1.f/13.f)/L; + u = u*d + _un; + v = v*d + _vn; + float iv = 1.f/v; + X = 2.25f * u * Y * iv ; + Z = (12 - 3 * u - 20 * v) * Y * 0.25f * iv; + + float R = X*C0 + Y*C1 + Z*C2; + float G = X*C3 + Y*C4 + Z*C5; + float B = X*C6 + Y*C7 + Z*C8; + + if( gammaTab ) + { + R = splineInterpolate(R*gscale, gammaTab, GAMMA_TAB_SIZE); + G = splineInterpolate(G*gscale, gammaTab, GAMMA_TAB_SIZE); + B = splineInterpolate(B*gscale, gammaTab, GAMMA_TAB_SIZE); + } + + dst[0] = R; dst[1] = G; dst[2] = B; + if( dcn == 4 ) + dst[3] = alpha; + } + } + + int dstcn; + float coeffs[9], un, vn; + bool srgb; +}; + + +struct RGB2Luv_b +{ + typedef uchar channel_type; + + RGB2Luv_b( int _srccn, int blueIdx, const float* _coeffs, + const float* _whitept, bool _srgb ) + : srccn(_srccn), cvt(3, blueIdx, _coeffs, _whitept, _srgb) {} + + void operator()(const uchar* src, uchar* dst, int n) const + { + int i, j, scn = srccn; + float buf[3*BLOCK_SIZE]; + + for( i = 0; i < n; i += BLOCK_SIZE, dst += BLOCK_SIZE*3 ) + { + int dn = std::min(n - i, (int)BLOCK_SIZE); + + for( j = 0; j < dn*3; j += 3, src += scn ) + { + buf[j] = src[0]*(1.f/255.f); + buf[j+1] = (float)(src[1]*(1.f/255.f)); + buf[j+2] = (float)(src[2]*(1.f/255.f)); + } + cvt(buf, buf, dn); + + for( j = 0; j < dn*3; j += 3 ) + { + dst[j] = saturate_cast(buf[j]*2.55f); + dst[j+1] = saturate_cast(buf[j+1]*0.72033898305084743f + 96.525423728813564f); + dst[j+2] = saturate_cast(buf[j+2]*0.99609375f + 139.453125f); + } + } + } + + int srccn; + RGB2Luv_f cvt; +}; + + +struct Luv2RGB_b +{ + typedef uchar channel_type; + + Luv2RGB_b( int _dstcn, int blueIdx, const float* _coeffs, + const float* _whitept, bool _srgb ) + : dstcn(_dstcn), cvt(3, blueIdx, _coeffs, _whitept, _srgb ) {} + + void operator()(const uchar* src, uchar* dst, int n) const + { + int i, j, dcn = dstcn; + uchar alpha = ColorChannel::max(); + float buf[3*BLOCK_SIZE]; + + for( i = 0; i < n; i += BLOCK_SIZE, src += BLOCK_SIZE*3 ) + { + int dn = std::min(n - i, (int)BLOCK_SIZE); + + for( j = 0; j < dn*3; j += 3 ) + { + buf[j] = src[j]*(100.f/255.f); + buf[j+1] = (float)(src[j+1]*1.388235294117647f - 134.f); + buf[j+2] = (float)(src[j+2]*1.003921568627451f - 140.f); + } + cvt(buf, buf, dn); + + for( j = 0; j < dn*3; j += 3, dst += dcn ) + { + dst[0] = saturate_cast(buf[j]*255.f); + dst[1] = saturate_cast(buf[j+1]*255.f); + dst[2] = saturate_cast(buf[j+2]*255.f); + if( dcn == 4 ) + dst[3] = alpha; + } + } + } + + int dstcn; + Luv2RGB_f cvt; +}; + + +//////////////////////////// Bayer Pattern -> RGB conversion ///////////////////////////// + +template +class SIMDBayerStubInterpolator_ +{ +public: + int bayer2Gray(const T*, int, T*, int, int, int, int) const + { + return 0; + } + + int bayer2RGB(const T*, int, T*, int, int) const + { + return 0; + } +}; + +#if CV_SSE2 +class SIMDBayerInterpolator_8u +{ +public: + SIMDBayerInterpolator_8u() + { + use_simd = checkHardwareSupport(CV_CPU_SSE2); + } + + int bayer2Gray(const uchar* bayer, int bayer_step, uchar* dst, + int width, int bcoeff, int gcoeff, int rcoeff) const + { + if( !use_simd ) + return 0; + + __m128i _b2y = _mm_set1_epi16((short)(rcoeff*2)); + __m128i _g2y = _mm_set1_epi16((short)(gcoeff*2)); + __m128i _r2y = _mm_set1_epi16((short)(bcoeff*2)); + const uchar* bayer_end = bayer + width; + + for( ; bayer <= bayer_end - 18; bayer += 14, dst += 14 ) + { + __m128i r0 = _mm_loadu_si128((const __m128i*)bayer); + __m128i r1 = _mm_loadu_si128((const __m128i*)(bayer+bayer_step)); + __m128i r2 = _mm_loadu_si128((const __m128i*)(bayer+bayer_step*2)); + + __m128i b1 = _mm_add_epi16(_mm_srli_epi16(_mm_slli_epi16(r0, 8), 8), + _mm_srli_epi16(_mm_slli_epi16(r2, 8), 8)); + __m128i b0 = _mm_add_epi16(b1, _mm_srli_si128(b1, 2)); + b1 = _mm_slli_epi16(_mm_srli_si128(b1, 2), 1); + + __m128i g0 = _mm_add_epi16(_mm_srli_epi16(r0, 8), _mm_srli_epi16(r2, 8)); + __m128i g1 = _mm_srli_epi16(_mm_slli_epi16(r1, 8), 8); + g0 = _mm_add_epi16(g0, _mm_add_epi16(g1, _mm_srli_si128(g1, 2))); + g1 = _mm_slli_epi16(_mm_srli_si128(g1, 2), 2); + + r0 = _mm_srli_epi16(r1, 8); + r1 = _mm_slli_epi16(_mm_add_epi16(r0, _mm_srli_si128(r0, 2)), 1); + r0 = _mm_slli_epi16(r0, 2); + + g0 = _mm_add_epi16(_mm_mulhi_epi16(b0, _b2y), _mm_mulhi_epi16(g0, _g2y)); + g1 = _mm_add_epi16(_mm_mulhi_epi16(b1, _b2y), _mm_mulhi_epi16(g1, _g2y)); + g0 = _mm_add_epi16(g0, _mm_mulhi_epi16(r0, _r2y)); + g1 = _mm_add_epi16(g1, _mm_mulhi_epi16(r1, _r2y)); + g0 = _mm_srli_epi16(g0, 1); + g1 = _mm_srli_epi16(g1, 1); + g0 = _mm_packus_epi16(g0, g0); + g1 = _mm_packus_epi16(g1, g1); + g0 = _mm_unpacklo_epi8(g0, g1); + _mm_storeu_si128((__m128i*)dst, g0); + } + + return (int)(bayer - (bayer_end - width)); + } + + int bayer2RGB(const uchar* bayer, int bayer_step, uchar* dst, int width, int blue) const + { + if( !use_simd ) + return 0; + /* + B G B G | B G B G | B G B G | B G B G + G R G R | G R G R | G R G R | G R G R + B G B G | B G B G | B G B G | B G B G + */ + __m128i delta1 = _mm_set1_epi16(1), delta2 = _mm_set1_epi16(2); + __m128i mask = _mm_set1_epi16(blue < 0 ? -1 : 0), z = _mm_setzero_si128(); + __m128i masklo = _mm_set1_epi16(0x00ff); + const uchar* bayer_end = bayer + width; + + for( ; bayer <= bayer_end - 18; bayer += 14, dst += 42 ) + { + __m128i r0 = _mm_loadu_si128((const __m128i*)bayer); + __m128i r1 = _mm_loadu_si128((const __m128i*)(bayer+bayer_step)); + __m128i r2 = _mm_loadu_si128((const __m128i*)(bayer+bayer_step*2)); + + __m128i b1 = _mm_add_epi16(_mm_and_si128(r0, masklo), _mm_and_si128(r2, masklo)); + __m128i b0 = _mm_add_epi16(b1, _mm_srli_si128(b1, 2)); + b1 = _mm_srli_si128(b1, 2); + b1 = _mm_srli_epi16(_mm_add_epi16(b1, delta1), 1); + b0 = _mm_srli_epi16(_mm_add_epi16(b0, delta2), 2); + b0 = _mm_packus_epi16(b0, b1); + + __m128i g0 = _mm_add_epi16(_mm_srli_epi16(r0, 8), _mm_srli_epi16(r2, 8)); + __m128i g1 = _mm_and_si128(r1, masklo); + g0 = _mm_add_epi16(g0, _mm_add_epi16(g1, _mm_srli_si128(g1, 2))); + g1 = _mm_srli_si128(g1, 2); + g0 = _mm_srli_epi16(_mm_add_epi16(g0, delta2), 2); + g0 = _mm_packus_epi16(g0, g1); + + r0 = _mm_srli_epi16(r1, 8); + r1 = _mm_add_epi16(r0, _mm_srli_si128(r0, 2)); + r1 = _mm_srli_epi16(_mm_add_epi16(r1, delta1), 1); + r0 = _mm_packus_epi16(r0, r1); + + b1 = _mm_and_si128(_mm_xor_si128(b0, r0), mask); + b0 = _mm_xor_si128(b0, b1); + r0 = _mm_xor_si128(r0, b1); + + // b1 g1 b1 g1 ... + b1 = _mm_unpackhi_epi8(b0, g0); + // b0 g0 b2 g2 b4 g4 .... + b0 = _mm_unpacklo_epi8(b0, g0); + + // r1 0 r3 0 ... + r1 = _mm_unpackhi_epi8(r0, z); + // r0 0 r2 0 r4 0 ... + r0 = _mm_unpacklo_epi8(r0, z); + + // 0 b0 g0 r0 0 b2 g2 r2 0 ... + g0 = _mm_slli_si128(_mm_unpacklo_epi16(b0, r0), 1); + // 0 b8 g8 r8 0 b10 g10 r10 0 ... + g1 = _mm_slli_si128(_mm_unpackhi_epi16(b0, r0), 1); + + // b1 g1 r1 0 b3 g3 r3 .... + r0 = _mm_unpacklo_epi16(b1, r1); + // b9 g9 r9 0 ... + r1 = _mm_unpackhi_epi16(b1, r1); + + b0 = _mm_srli_si128(_mm_unpacklo_epi32(g0, r0), 1); + b1 = _mm_srli_si128(_mm_unpackhi_epi32(g0, r0), 1); + + _mm_storel_epi64((__m128i*)(dst-1+0), b0); + _mm_storel_epi64((__m128i*)(dst-1+6*1), _mm_srli_si128(b0, 8)); + _mm_storel_epi64((__m128i*)(dst-1+6*2), b1); + _mm_storel_epi64((__m128i*)(dst-1+6*3), _mm_srli_si128(b1, 8)); + + g0 = _mm_srli_si128(_mm_unpacklo_epi32(g1, r1), 1); + g1 = _mm_srli_si128(_mm_unpackhi_epi32(g1, r1), 1); + + _mm_storel_epi64((__m128i*)(dst-1+6*4), g0); + _mm_storel_epi64((__m128i*)(dst-1+6*5), _mm_srli_si128(g0, 8)); + + _mm_storel_epi64((__m128i*)(dst-1+6*6), g1); + } + + return (int)(bayer - (bayer_end - width)); + } + + bool use_simd; +}; +#else +typedef SIMDBayerStubInterpolator_ SIMDBayerInterpolator_8u; +#endif + +template +static void Bayer2Gray_( const Mat& srcmat, Mat& dstmat, int code ) +{ + SIMDInterpolator vecOp; + const int R2Y = 4899; + const int G2Y = 9617; + const int B2Y = 1868; + const int SHIFT = 14; + + const T* bayer0 = (const T*)srcmat.data; + int bayer_step = (int)(srcmat.step/sizeof(T)); + T* dst0 = (T*)dstmat.data; + int dst_step = (int)(dstmat.step/sizeof(T)); + Size size = srcmat.size(); + int bcoeff = B2Y, rcoeff = R2Y; + int start_with_green = code == CV_BayerGB2GRAY || code == CV_BayerGR2GRAY; + bool brow = true; + + if( code != CV_BayerBG2GRAY && code != CV_BayerGB2GRAY ) + { + brow = false; + std::swap(bcoeff, rcoeff); + } + + dst0 += dst_step + 1; + size.height -= 2; + size.width -= 2; + + for( ; size.height-- > 0; bayer0 += bayer_step, dst0 += dst_step ) + { + unsigned t0, t1, t2; + const T* bayer = bayer0; + T* dst = dst0; + const T* bayer_end = bayer + size.width; + + if( size.width <= 0 ) + { + dst[-1] = dst[size.width] = 0; + continue; + } + + if( start_with_green ) + { + t0 = (bayer[1] + bayer[bayer_step*2+1])*rcoeff; + t1 = (bayer[bayer_step] + bayer[bayer_step+2])*bcoeff; + t2 = bayer[bayer_step+1]*(2*G2Y); + + dst[0] = (T)CV_DESCALE(t0 + t1 + t2, SHIFT+1); + bayer++; + dst++; + } + + int delta = vecOp.bayer2Gray(bayer, bayer_step, dst, size.width, bcoeff, G2Y, rcoeff); + bayer += delta; + dst += delta; + + for( ; bayer <= bayer_end - 2; bayer += 2, dst += 2 ) + { + t0 = (bayer[0] + bayer[2] + bayer[bayer_step*2] + bayer[bayer_step*2+2])*rcoeff; + t1 = (bayer[1] + bayer[bayer_step] + bayer[bayer_step+2] + bayer[bayer_step*2+1])*G2Y; + t2 = bayer[bayer_step+1]*(4*bcoeff); + dst[0] = (T)CV_DESCALE(t0 + t1 + t2, SHIFT+2); + + t0 = (bayer[2] + bayer[bayer_step*2+2])*rcoeff; + t1 = (bayer[bayer_step+1] + bayer[bayer_step+3])*bcoeff; + t2 = bayer[bayer_step+2]*(2*G2Y); + dst[1] = (T)CV_DESCALE(t0 + t1 + t2, SHIFT+1); + } + + if( bayer < bayer_end ) + { + t0 = (bayer[0] + bayer[2] + bayer[bayer_step*2] + bayer[bayer_step*2+2])*rcoeff; + t1 = (bayer[1] + bayer[bayer_step] + bayer[bayer_step+2] + bayer[bayer_step*2+1])*G2Y; + t2 = bayer[bayer_step+1]*(4*bcoeff); + dst[0] = (T)CV_DESCALE(t0 + t1 + t2, SHIFT+2); + bayer++; + dst++; + } + + dst0[-1] = dst0[0]; + dst0[size.width] = dst0[size.width-1]; + + brow = !brow; + std::swap(bcoeff, rcoeff); + start_with_green = !start_with_green; + } + + size = dstmat.size(); + dst0 = (T*)dstmat.data; + if( size.height > 2 ) + for( int i = 0; i < size.width; i++ ) + { + dst0[i] = dst0[i + dst_step]; + dst0[i + (size.height-1)*dst_step] = dst0[i + (size.height-2)*dst_step]; + } + else + for( int i = 0; i < size.width; i++ ) + { + dst0[i] = dst0[i + (size.height-1)*dst_step] = 0; + } +} + +template +static void Bayer2RGB_( const Mat& srcmat, Mat& dstmat, int code ) +{ + SIMDInterpolator vecOp; + const T* bayer0 = (const T*)srcmat.data; + int bayer_step = (int)(srcmat.step/sizeof(T)); + T* dst0 = (T*)dstmat.data; + int dst_step = (int)(dstmat.step/sizeof(T)); + Size size = srcmat.size(); + int blue = code == CV_BayerBG2BGR || code == CV_BayerGB2BGR ? -1 : 1; + int start_with_green = code == CV_BayerGB2BGR || code == CV_BayerGR2BGR; + + dst0 += dst_step + 3 + 1; + size.height -= 2; + size.width -= 2; + + for( ; size.height-- > 0; bayer0 += bayer_step, dst0 += dst_step ) + { + int t0, t1; + const T* bayer = bayer0; + T* dst = dst0; + const T* bayer_end = bayer + size.width; + + if( size.width <= 0 ) + { + dst[-4] = dst[-3] = dst[-2] = dst[size.width*3-1] = + dst[size.width*3] = dst[size.width*3+1] = 0; + continue; + } + + if( start_with_green ) + { + t0 = (bayer[1] + bayer[bayer_step*2+1] + 1) >> 1; + t1 = (bayer[bayer_step] + bayer[bayer_step+2] + 1) >> 1; + dst[-blue] = (T)t0; + dst[0] = bayer[bayer_step+1]; + dst[blue] = (T)t1; + bayer++; + dst += 3; + } + + int delta = vecOp.bayer2RGB(bayer, bayer_step, dst, size.width, blue); + bayer += delta; + dst += delta*3; + + if( blue > 0 ) + { + for( ; bayer <= bayer_end - 2; bayer += 2, dst += 6 ) + { + t0 = (bayer[0] + bayer[2] + bayer[bayer_step*2] + + bayer[bayer_step*2+2] + 2) >> 2; + t1 = (bayer[1] + bayer[bayer_step] + + bayer[bayer_step+2] + bayer[bayer_step*2+1]+2) >> 2; + dst[-1] = (T)t0; + dst[0] = (T)t1; + dst[1] = bayer[bayer_step+1]; + + t0 = (bayer[2] + bayer[bayer_step*2+2] + 1) >> 1; + t1 = (bayer[bayer_step+1] + bayer[bayer_step+3] + 1) >> 1; + dst[2] = (T)t0; + dst[3] = bayer[bayer_step+2]; + dst[4] = (T)t1; + } + } + else + { + for( ; bayer <= bayer_end - 2; bayer += 2, dst += 6 ) + { + t0 = (bayer[0] + bayer[2] + bayer[bayer_step*2] + + bayer[bayer_step*2+2] + 2) >> 2; + t1 = (bayer[1] + bayer[bayer_step] + + bayer[bayer_step+2] + bayer[bayer_step*2+1]+2) >> 2; + dst[1] = (T)t0; + dst[0] = (T)t1; + dst[-1] = bayer[bayer_step+1]; + + t0 = (bayer[2] + bayer[bayer_step*2+2] + 1) >> 1; + t1 = (bayer[bayer_step+1] + bayer[bayer_step+3] + 1) >> 1; + dst[4] = (T)t0; + dst[3] = bayer[bayer_step+2]; + dst[2] = (T)t1; + } + } + + if( bayer < bayer_end ) + { + t0 = (bayer[0] + bayer[2] + bayer[bayer_step*2] + + bayer[bayer_step*2+2] + 2) >> 2; + t1 = (bayer[1] + bayer[bayer_step] + + bayer[bayer_step+2] + bayer[bayer_step*2+1]+2) >> 2; + dst[-blue] = (T)t0; + dst[0] = (T)t1; + dst[blue] = bayer[bayer_step+1]; + bayer++; + dst += 3; + } + + dst0[-4] = dst0[-1]; + dst0[-3] = dst0[0]; + dst0[-2] = dst0[1]; + dst0[size.width*3-1] = dst0[size.width*3-4]; + dst0[size.width*3] = dst0[size.width*3-3]; + dst0[size.width*3+1] = dst0[size.width*3-2]; + + blue = -blue; + start_with_green = !start_with_green; + } + + size = dstmat.size(); + dst0 = (T*)dstmat.data; + if( size.height > 2 ) + for( int i = 0; i < size.width*3; i++ ) + { + dst0[i] = dst0[i + dst_step]; + dst0[i + (size.height-1)*dst_step] = dst0[i + (size.height-2)*dst_step]; + } + else + for( int i = 0; i < size.width*3; i++ ) + { + dst0[i] = dst0[i + (size.height-1)*dst_step] = 0; + } +} + + +/////////////////// Demosaicing using Variable Number of Gradients /////////////////////// + +static void Bayer2RGB_VNG_8u( const Mat& srcmat, Mat& dstmat, int code ) +{ + const uchar* bayer = srcmat.data; + int bstep = (int)srcmat.step; + uchar* dst = dstmat.data; + int dststep = (int)dstmat.step; + Size size = srcmat.size(); + + int blueIdx = code == CV_BayerBG2BGR_VNG || code == CV_BayerGB2BGR_VNG ? 0 : 2; + bool greenCell0 = code != CV_BayerBG2BGR_VNG && code != CV_BayerRG2BGR_VNG; + + // for too small images use the simple interpolation algorithm + if( MIN(size.width, size.height) < 8 ) + { + Bayer2RGB_( srcmat, dstmat, code ); + return; + } + + const int brows = 3, bcn = 7; + int N = size.width, N2 = N*2, N3 = N*3, N4 = N*4, N5 = N*5, N6 = N*6, N7 = N*7; + int i, bufstep = N7*bcn; + cv::AutoBuffer _buf(bufstep*brows); + ushort* buf = (ushort*)_buf; + + bayer += bstep*2; + +#if CV_SSE2 + bool haveSSE = cv::checkHardwareSupport(CV_CPU_SSE2); + #define _mm_absdiff_epu16(a,b) _mm_adds_epu16(_mm_subs_epu16(a, b), _mm_subs_epu16(b, a)) +#endif + + for( int y = 2; y < size.height - 4; y++ ) + { + uchar* dstrow = dst + dststep*y + 6; + const uchar* srow; + + for( int dy = (y == 2 ? -1 : 1); dy <= 1; dy++ ) + { + ushort* brow = buf + ((y + dy - 1)%brows)*bufstep + 1; + srow = bayer + (y+dy)*bstep + 1; + + for( i = 0; i < bcn; i++ ) + brow[N*i-1] = brow[(N-2) + N*i] = 0; + + i = 1; + +#if CV_SSE2 + if( haveSSE ) + { + __m128i z = _mm_setzero_si128(); + for( ; i <= N-9; i += 8, srow += 8, brow += 8 ) + { + __m128i s1, s2, s3, s4, s6, s7, s8, s9; + + s1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-1-bstep)),z); + s2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-bstep)),z); + s3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+1-bstep)),z); + + s4 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-1)),z); + s6 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+1)),z); + + s7 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-1+bstep)),z); + s8 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+bstep)),z); + s9 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+1+bstep)),z); + + __m128i b0, b1, b2, b3, b4, b5, b6; + + b0 = _mm_adds_epu16(_mm_slli_epi16(_mm_absdiff_epu16(s2,s8),1), + _mm_adds_epu16(_mm_absdiff_epu16(s1, s7), + _mm_absdiff_epu16(s3, s9))); + b1 = _mm_adds_epu16(_mm_slli_epi16(_mm_absdiff_epu16(s4,s6),1), + _mm_adds_epu16(_mm_absdiff_epu16(s1, s3), + _mm_absdiff_epu16(s7, s9))); + b2 = _mm_slli_epi16(_mm_absdiff_epu16(s3,s7),1); + b3 = _mm_slli_epi16(_mm_absdiff_epu16(s1,s9),1); + + _mm_storeu_si128((__m128i*)brow, b0); + _mm_storeu_si128((__m128i*)(brow + N), b1); + _mm_storeu_si128((__m128i*)(brow + N2), b2); + _mm_storeu_si128((__m128i*)(brow + N3), b3); + + b4 = _mm_adds_epu16(b2,_mm_adds_epu16(_mm_absdiff_epu16(s2, s4), + _mm_absdiff_epu16(s6, s8))); + b5 = _mm_adds_epu16(b3,_mm_adds_epu16(_mm_absdiff_epu16(s2, s6), + _mm_absdiff_epu16(s4, s8))); + b6 = _mm_adds_epu16(_mm_adds_epu16(s2, s4), _mm_adds_epu16(s6, s8)); + b6 = _mm_srli_epi16(b6, 1); + + _mm_storeu_si128((__m128i*)(brow + N4), b4); + _mm_storeu_si128((__m128i*)(brow + N5), b5); + _mm_storeu_si128((__m128i*)(brow + N6), b6); + } + } +#endif + + for( ; i < N-1; i++, srow++, brow++ ) + { + brow[0] = (ushort)(std::abs(srow[-1-bstep] - srow[-1+bstep]) + + std::abs(srow[-bstep] - srow[+bstep])*2 + + std::abs(srow[1-bstep] - srow[1+bstep])); + brow[N] = (ushort)(std::abs(srow[-1-bstep] - srow[1-bstep]) + + std::abs(srow[-1] - srow[1])*2 + + std::abs(srow[-1+bstep] - srow[1+bstep])); + brow[N2] = (ushort)(std::abs(srow[+1-bstep] - srow[-1+bstep])*2); + brow[N3] = (ushort)(std::abs(srow[-1-bstep] - srow[1+bstep])*2); + brow[N4] = (ushort)(brow[N2] + std::abs(srow[-bstep] - srow[-1]) + + std::abs(srow[+bstep] - srow[1])); + brow[N5] = (ushort)(brow[N3] + std::abs(srow[-bstep] - srow[1]) + + std::abs(srow[+bstep] - srow[-1])); + brow[N6] = (ushort)((srow[-bstep] + srow[-1] + srow[1] + srow[+bstep])>>1); + } + } + + const ushort* brow0 = buf + ((y - 2) % brows)*bufstep + 2; + const ushort* brow1 = buf + ((y - 1) % brows)*bufstep + 2; + const ushort* brow2 = buf + (y % brows)*bufstep + 2; + static const float scale[] = { 0.f, 0.5f, 0.25f, 0.1666666666667f, 0.125f, 0.1f, 0.08333333333f, 0.0714286f, 0.0625f }; + srow = bayer + y*bstep + 2; + bool greenCell = greenCell0; + + i = 2; +#if CV_SSE2 + int limit = !haveSSE ? N-2 : greenCell ? std::min(3, N-2) : 2; +#else + int limit = N - 2; +#endif + + do + { + for( ; i < limit; i++, srow++, brow0++, brow1++, brow2++, dstrow += 3 ) + { + int gradN = brow0[0] + brow1[0]; + int gradS = brow1[0] + brow2[0]; + int gradW = brow1[N-1] + brow1[N]; + int gradE = brow1[N] + brow1[N+1]; + int minGrad = std::min(std::min(std::min(gradN, gradS), gradW), gradE); + int maxGrad = std::max(std::max(std::max(gradN, gradS), gradW), gradE); + int R, G, B; + + if( !greenCell ) + { + int gradNE = brow0[N4+1] + brow1[N4]; + int gradSW = brow1[N4] + brow2[N4-1]; + int gradNW = brow0[N5-1] + brow1[N5]; + int gradSE = brow1[N5] + brow2[N5+1]; + + minGrad = std::min(std::min(std::min(std::min(minGrad, gradNE), gradSW), gradNW), gradSE); + maxGrad = std::max(std::max(std::max(std::max(maxGrad, gradNE), gradSW), gradNW), gradSE); + int T = minGrad + maxGrad/2; + + int Rs = 0, Gs = 0, Bs = 0, ng = 0; + if( gradN < T ) + { + Rs += srow[-bstep*2] + srow[0]; + Gs += srow[-bstep]*2; + Bs += srow[-bstep-1] + srow[-bstep+1]; + ng++; + } + if( gradS < T ) + { + Rs += srow[bstep*2] + srow[0]; + Gs += srow[bstep]*2; + Bs += srow[bstep-1] + srow[bstep+1]; + ng++; + } + if( gradW < T ) + { + Rs += srow[-2] + srow[0]; + Gs += srow[-1]*2; + Bs += srow[-bstep-1] + srow[bstep-1]; + ng++; + } + if( gradE < T ) + { + Rs += srow[2] + srow[0]; + Gs += srow[1]*2; + Bs += srow[-bstep+1] + srow[bstep+1]; + ng++; + } + if( gradNE < T ) + { + Rs += srow[-bstep*2+2] + srow[0]; + Gs += brow0[N6+1]; + Bs += srow[-bstep+1]*2; + ng++; + } + if( gradSW < T ) + { + Rs += srow[bstep*2-2] + srow[0]; + Gs += brow2[N6-1]; + Bs += srow[bstep-1]*2; + ng++; + } + if( gradNW < T ) + { + Rs += srow[-bstep*2-2] + srow[0]; + Gs += brow0[N6-1]; + Bs += srow[-bstep+1]*2; + ng++; + } + if( gradSE < T ) + { + Rs += srow[bstep*2+2] + srow[0]; + Gs += brow2[N6+1]; + Bs += srow[-bstep+1]*2; + ng++; + } + R = srow[0]; + G = R + cvRound((Gs - Rs)*scale[ng]); + B = R + cvRound((Bs - Rs)*scale[ng]); + } + else + { + int gradNE = brow0[N2] + brow0[N2+1] + brow1[N2] + brow1[N2+1]; + int gradSW = brow1[N2] + brow1[N2-1] + brow2[N2] + brow2[N2-1]; + int gradNW = brow0[N3] + brow0[N3-1] + brow1[N3] + brow1[N3-1]; + int gradSE = brow1[N3] + brow1[N3+1] + brow2[N3] + brow2[N3+1]; + + minGrad = std::min(std::min(std::min(std::min(minGrad, gradNE), gradSW), gradNW), gradSE); + maxGrad = std::max(std::max(std::max(std::max(maxGrad, gradNE), gradSW), gradNW), gradSE); + int T = minGrad + maxGrad/2; + + int Rs = 0, Gs = 0, Bs = 0, ng = 0; + if( gradN < T ) + { + Rs += srow[-bstep*2-1] + srow[-bstep*2+1]; + Gs += srow[-bstep*2] + srow[0]; + Bs += srow[-bstep]*2; + ng++; + } + if( gradS < T ) + { + Rs += srow[bstep*2-1] + srow[bstep*2+1]; + Gs += srow[bstep*2] + srow[0]; + Bs += srow[bstep]*2; + ng++; + } + if( gradW < T ) + { + Rs += srow[-1]*2; + Gs += srow[-2] + srow[0]; + Bs += srow[-bstep-2]+srow[bstep-2]; + ng++; + } + if( gradE < T ) + { + Rs += srow[1]*2; + Gs += srow[2] + srow[0]; + Bs += srow[-bstep+2]+srow[bstep+2]; + ng++; + } + if( gradNE < T ) + { + Rs += srow[-bstep*2+1] + srow[1]; + Gs += srow[-bstep+1]*2; + Bs += srow[-bstep] + srow[-bstep+2]; + ng++; + } + if( gradSW < T ) + { + Rs += srow[bstep*2-1] + srow[-1]; + Gs += srow[bstep-1]*2; + Bs += srow[bstep] + srow[bstep-2]; + ng++; + } + if( gradNW < T ) + { + Rs += srow[-bstep*2-1] + srow[-1]; + Gs += srow[-bstep-1]*2; + Bs += srow[-bstep-2]+srow[-bstep]; + ng++; + } + if( gradSE < T ) + { + Rs += srow[bstep*2+1] + srow[1]; + Gs += srow[bstep+1]*2; + Bs += srow[bstep+2]+srow[bstep]; + ng++; + } + G = srow[0]; + R = G + cvRound((Rs - Gs)*scale[ng]); + B = G + cvRound((Bs - Gs)*scale[ng]); + } + dstrow[blueIdx] = CV_CAST_8U(B); + dstrow[1] = CV_CAST_8U(G); + dstrow[blueIdx^2] = CV_CAST_8U(R); + greenCell = !greenCell; + } + +#if CV_SSE2 + if( !haveSSE ) + break; + + __m128i emask = _mm_set1_epi32(0x0000ffff), + omask = _mm_set1_epi32(0xffff0000), + all_ones = _mm_set1_epi16(1), + z = _mm_setzero_si128(); + __m128 _0_5 = _mm_set1_ps(0.5f); + + #define _mm_merge_epi16(a, b) \ + _mm_or_si128(_mm_and_si128(a, emask), _mm_and_si128(b, omask)) + #define _mm_cvtloepi16_ps(a) _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpacklo_epi16(a,a), 16)) + #define _mm_cvthiepi16_ps(a) _mm_cvtepi32_ps(_mm_srai_epi32(_mm_unpackhi_epi16(a,a), 16)) + + // process 8 pixels at once + for( ; i <= N - 10; i += 8, srow += 8, brow0 += 8, brow1 += 8, brow2 += 8 ) + { + __m128i gradN, gradS, gradW, gradE, gradNE, gradSW, gradNW, gradSE; + gradN = _mm_adds_epu16(_mm_loadu_si128((__m128i*)brow0), + _mm_loadu_si128((__m128i*)brow1)); + gradS = _mm_adds_epu16(_mm_loadu_si128((__m128i*)brow1), + _mm_loadu_si128((__m128i*)brow2)); + gradW = _mm_adds_epu16(_mm_loadu_si128((__m128i*)(brow1+N-1)), + _mm_loadu_si128((__m128i*)(brow1+N))); + gradE = _mm_adds_epu16(_mm_loadu_si128((__m128i*)(brow1+N+1)), + _mm_loadu_si128((__m128i*)(brow1+N))); + + __m128i minGrad, maxGrad, T; + minGrad = _mm_min_epi16(_mm_min_epi16(_mm_min_epi16(gradN, gradS), gradW), gradE); + maxGrad = _mm_max_epi16(_mm_max_epi16(_mm_max_epi16(gradN, gradS), gradW), gradE); + + __m128i grad0, grad1; + + grad0 = _mm_adds_epu16(_mm_loadu_si128((__m128i*)(brow0+N4+1)), + _mm_loadu_si128((__m128i*)(brow1+N4))); + grad1 = _mm_adds_epu16(_mm_adds_epu16(_mm_loadu_si128((__m128i*)(brow0+N2)), + _mm_loadu_si128((__m128i*)(brow0+N2+1))), + _mm_adds_epu16(_mm_loadu_si128((__m128i*)(brow1+N2)), + _mm_loadu_si128((__m128i*)(brow1+N2+1)))); + gradNE = _mm_srli_epi16(_mm_merge_epi16(grad0, grad1), 1); + + grad0 = _mm_adds_epu16(_mm_loadu_si128((__m128i*)(brow2+N4-1)), + _mm_loadu_si128((__m128i*)(brow1+N4))); + grad1 = _mm_adds_epu16(_mm_adds_epu16(_mm_loadu_si128((__m128i*)(brow2+N2)), + _mm_loadu_si128((__m128i*)(brow2+N2-1))), + _mm_adds_epu16(_mm_loadu_si128((__m128i*)(brow1+N2)), + _mm_loadu_si128((__m128i*)(brow1+N2-1)))); + gradSW = _mm_srli_epi16(_mm_merge_epi16(grad0, grad1), 1); + + minGrad = _mm_min_epi16(_mm_min_epi16(minGrad, gradNE), gradSW); + maxGrad = _mm_max_epi16(_mm_max_epi16(maxGrad, gradNE), gradSW); + + grad0 = _mm_adds_epu16(_mm_loadu_si128((__m128i*)(brow0+N5-1)), + _mm_loadu_si128((__m128i*)(brow1+N5))); + grad1 = _mm_adds_epu16(_mm_adds_epu16(_mm_loadu_si128((__m128i*)(brow0+N3)), + _mm_loadu_si128((__m128i*)(brow0+N3-1))), + _mm_adds_epu16(_mm_loadu_si128((__m128i*)(brow1+N3)), + _mm_loadu_si128((__m128i*)(brow1+N3-1)))); + gradNW = _mm_srli_epi16(_mm_merge_epi16(grad0, grad1), 1); + + grad0 = _mm_adds_epu16(_mm_loadu_si128((__m128i*)(brow2+N5+1)), + _mm_loadu_si128((__m128i*)(brow1+N5))); + grad1 = _mm_adds_epu16(_mm_adds_epu16(_mm_loadu_si128((__m128i*)(brow2+N3)), + _mm_loadu_si128((__m128i*)(brow2+N3+1))), + _mm_adds_epu16(_mm_loadu_si128((__m128i*)(brow1+N3)), + _mm_loadu_si128((__m128i*)(brow1+N3+1)))); + gradSE = _mm_srli_epi16(_mm_merge_epi16(grad0, grad1), 1); + + minGrad = _mm_min_epi16(_mm_min_epi16(minGrad, gradNW), gradSE); + maxGrad = _mm_max_epi16(_mm_max_epi16(maxGrad, gradNW), gradSE); + + T = _mm_add_epi16(_mm_srli_epi16(maxGrad, 1), minGrad); + __m128i RGs = z, GRs = z, Bs = z, ng = z, mask; + + __m128i t0, t1, x0, x1, x2, x3, x4, x5, x6, x7, x8, + x9, x10, x11, x12, x13, x14, x15, x16; + + x0 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)srow), z); + + x1 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-bstep-1)), z); + x2 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-bstep*2-1)), z); + x3 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-bstep)), z); + x4 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-bstep*2+1)), z); + x5 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-bstep+1)), z); + x6 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-bstep+2)), z); + x7 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+1)), z); + x8 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+bstep+2)), z); + x9 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+bstep+1)), z); + x10 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+bstep*2+1)), z); + x11 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+bstep)), z); + x12 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+bstep*2-1)), z); + x13 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+bstep-1)), z); + x14 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+bstep-2)), z); + x15 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-1)), z); + x16 = _mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-bstep-2)), z); + + // gradN + mask = _mm_cmpgt_epi16(T, gradN); + ng = _mm_sub_epi16(ng, mask); + + t0 = _mm_slli_epi16(x3, 1); + t1 = _mm_adds_epu16(_mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-bstep*2)), z), x0); + + RGs = _mm_adds_epu16(RGs, _mm_and_si128(t1, mask)); + GRs = _mm_adds_epu16(GRs, _mm_and_si128(_mm_merge_epi16(t0, _mm_adds_epu16(x2,x4)), mask)); + Bs = _mm_adds_epu16(Bs, _mm_and_si128(_mm_merge_epi16(_mm_adds_epu16(x1,x5), t0), mask)); + + // gradNE + mask = _mm_cmpgt_epi16(T, gradNE); + ng = _mm_sub_epi16(ng, mask); + + t0 = _mm_slli_epi16(x5, 1); + t1 = _mm_adds_epu16(_mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-bstep*2+2)), z), x0); + + RGs = _mm_adds_epu16(RGs, _mm_and_si128(_mm_merge_epi16(t1, t0), mask)); + GRs = _mm_adds_epu16(GRs, _mm_and_si128(_mm_merge_epi16(_mm_loadu_si128((__m128i*)(brow0+N6+1)), + _mm_adds_epu16(x4,x7)), mask)); + Bs = _mm_adds_epu16(Bs, _mm_and_si128(_mm_merge_epi16(t0,_mm_adds_epu16(x3,x6)), mask)); + + // gradE + mask = _mm_cmpgt_epi16(T, gradE); + ng = _mm_sub_epi16(ng, mask); + + t0 = _mm_slli_epi16(x7, 1); + t1 = _mm_adds_epu16(_mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+2)), z), x0); + + RGs = _mm_adds_epu16(RGs, _mm_and_si128(t1, mask)); + GRs = _mm_adds_epu16(GRs, _mm_and_si128(t0, mask)); + Bs = _mm_adds_epu16(Bs, _mm_and_si128(_mm_merge_epi16(_mm_adds_epu16(x5,x9), + _mm_adds_epu16(x6,x8)), mask)); + + // gradSE + mask = _mm_cmpgt_epi16(T, gradSE); + ng = _mm_sub_epi16(ng, mask); + + t0 = _mm_slli_epi16(x9, 1); + t1 = _mm_adds_epu16(_mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+bstep*2+2)), z), x0); + + RGs = _mm_adds_epu16(RGs, _mm_and_si128(_mm_merge_epi16(t1, t0), mask)); + GRs = _mm_adds_epu16(GRs, _mm_and_si128(_mm_merge_epi16(_mm_loadu_si128((__m128i*)(brow2+N6+1)), + _mm_adds_epu16(x7,x10)), mask)); + Bs = _mm_adds_epu16(Bs, _mm_and_si128(_mm_merge_epi16(t0, _mm_adds_epu16(x8,x11)), mask)); + + // gradS + mask = _mm_cmpgt_epi16(T, gradS); + ng = _mm_sub_epi16(ng, mask); + + t0 = _mm_slli_epi16(x11, 1); + t1 = _mm_adds_epu16(_mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+bstep*2)), z), x0); + + RGs = _mm_adds_epu16(RGs, _mm_and_si128(t1, mask)); + GRs = _mm_adds_epu16(GRs, _mm_and_si128(_mm_merge_epi16(t0, _mm_adds_epu16(x10,x12)), mask)); + Bs = _mm_adds_epu16(Bs, _mm_and_si128(_mm_merge_epi16(_mm_adds_epu16(x9,x13), t0), mask)); + + // gradSW + mask = _mm_cmpgt_epi16(T, gradSW); + ng = _mm_sub_epi16(ng, mask); + + t0 = _mm_slli_epi16(x13, 1); + t1 = _mm_adds_epu16(_mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow+bstep*2-2)), z), x0); + + RGs = _mm_adds_epu16(RGs, _mm_and_si128(_mm_merge_epi16(t1, t0), mask)); + GRs = _mm_adds_epu16(GRs, _mm_and_si128(_mm_merge_epi16(_mm_loadu_si128((__m128i*)(brow2+N6-1)), + _mm_adds_epu16(x12,x15)), mask)); + Bs = _mm_adds_epu16(Bs, _mm_and_si128(_mm_merge_epi16(t0,_mm_adds_epu16(x11,x14)), mask)); + + // gradW + mask = _mm_cmpgt_epi16(T, gradW); + ng = _mm_sub_epi16(ng, mask); + + t0 = _mm_slli_epi16(x15, 1); + t1 = _mm_adds_epu16(_mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-2)), z), x0); + + RGs = _mm_adds_epu16(RGs, _mm_and_si128(t1, mask)); + GRs = _mm_adds_epu16(GRs, _mm_and_si128(t0, mask)); + Bs = _mm_adds_epu16(Bs, _mm_and_si128(_mm_merge_epi16(_mm_adds_epu16(x1,x13), + _mm_adds_epu16(x14,x16)), mask)); + + // gradNW + mask = _mm_cmpgt_epi16(T, gradNW); + ng = _mm_max_epi16(_mm_sub_epi16(ng, mask), all_ones); + + __m128 ngf0, ngf1; + ngf0 = _mm_div_ps(_0_5, _mm_cvtloepi16_ps(ng)); + ngf1 = _mm_div_ps(_0_5, _mm_cvthiepi16_ps(ng)); + + t0 = _mm_slli_epi16(x1, 1); + t1 = _mm_adds_epu16(_mm_unpacklo_epi8(_mm_loadl_epi64((__m128i*)(srow-bstep*2-2)), z), x0); + + RGs = _mm_adds_epu16(RGs, _mm_and_si128(_mm_merge_epi16(t1, t0), mask)); + GRs = _mm_adds_epu16(GRs, _mm_and_si128(_mm_merge_epi16(_mm_loadu_si128((__m128i*)(brow0+N6-1)), + _mm_adds_epu16(x2,x15)), mask)); + Bs = _mm_adds_epu16(Bs, _mm_and_si128(_mm_merge_epi16(t0,_mm_adds_epu16(x3,x16)), mask)); + + // now interpolate r, g & b + t0 = _mm_sub_epi16(GRs, RGs); + t1 = _mm_sub_epi16(Bs, RGs); + + t0 = _mm_add_epi16(x0, _mm_packs_epi32( + _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtloepi16_ps(t0), ngf0)), + _mm_cvtps_epi32(_mm_mul_ps(_mm_cvthiepi16_ps(t0), ngf1)))); + + t1 = _mm_add_epi16(x0, _mm_packs_epi32( + _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtloepi16_ps(t1), ngf0)), + _mm_cvtps_epi32(_mm_mul_ps(_mm_cvthiepi16_ps(t1), ngf1)))); + + x1 = _mm_merge_epi16(x0, t0); + x2 = _mm_merge_epi16(t0, x0); + + uchar R[8], G[8], B[8]; + + _mm_storel_epi64(blueIdx ? (__m128i*)B : (__m128i*)R, _mm_packus_epi16(x1, z)); + _mm_storel_epi64((__m128i*)G, _mm_packus_epi16(x2, z)); + _mm_storel_epi64(blueIdx ? (__m128i*)R : (__m128i*)B, _mm_packus_epi16(t1, z)); + + for( int j = 0; j < 8; j++, dstrow += 3 ) + { + dstrow[0] = B[j]; dstrow[1] = G[j]; dstrow[2] = R[j]; + } + } +#endif + + limit = N - 2; + } + while( i < N - 2 ); + + for( i = 0; i < 6; i++ ) + { + dst[dststep*y + 5 - i] = dst[dststep*y + 8 - i]; + dst[dststep*y + (N - 2)*3 + i] = dst[dststep*y + (N - 3)*3 + i]; + } + + greenCell0 = !greenCell0; + blueIdx ^= 2; + } + + for( i = 0; i < size.width*3; i++ ) + { + dst[i] = dst[i + dststep] = dst[i + dststep*2]; + dst[i + dststep*(size.height-4)] = + dst[i + dststep*(size.height-3)] = + dst[i + dststep*(size.height-2)] = + dst[i + dststep*(size.height-1)] = dst[i + dststep*(size.height-5)]; + } +} + +///////////////////////////////////// YUV420 -> RGB ///////////////////////////////////// + +template +struct YUV4202BGR888Invoker +{ + Mat* dst; + const uchar* my1, *muv; + int width; + + YUV4202BGR888Invoker(Mat& _dst, int _width, const uchar* _y1, const uchar* _uv) + : dst(&_dst), my1(_y1), muv(_uv), width(_width) {} + + void operator()(const BlockedRange& range) const + { + //B = 1.164(Y - 16) + 2.018(U - 128) + //G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) + //R = 1.164(Y - 16) + 1.596(V - 128) + + const uchar* y1 = my1 + range.begin() * width, *uv = muv + range.begin() * width / 2; + + for (int j = range.begin(); j < range.end(); j+=2, y1+=width*2, uv+=width) + { + uchar* row1 = dst->ptr(j); + uchar* row2 = dst->ptr(j+1); + const uchar* y2 = y1 + width; + + for(int i = 0; i < width; i+=2,row1+=6,row2+=6) + { + int cr = uv[i + SPorI + 0] - 128; + int cb = uv[i - SPorI + 1] - 128; + + int ruv = 409 * cr + 128; + int guv = 128 - 100 * cb - 208 * cr; + int buv = 516 * cb + 128; + + int y00 = (y1[i] - 16) * 298; + row1[0+R] = saturate_cast((y00 + buv) >> 8); + row1[1] = saturate_cast((y00 + guv) >> 8); + row1[2-R] = saturate_cast((y00 + ruv) >> 8); + + int y01 = (y1[i+1] - 16) * 298; + row1[3+R] = saturate_cast((y01 + buv) >> 8); + row1[4] = saturate_cast((y01 + guv) >> 8); + row1[5-R] = saturate_cast((y01 + ruv) >> 8); + + int y10 = (y2[i] - 16) * 298; + row2[0+R] = saturate_cast((y10 + buv) >> 8); + row2[1] = saturate_cast((y10 + guv) >> 8); + row2[2-R] = saturate_cast((y10 + ruv) >> 8); + + int y11 = (y2[i+1] - 16) * 298; + row2[3+R] = saturate_cast((y11 + buv) >> 8); + row2[4] = saturate_cast((y11 + guv) >> 8); + row2[5-R] = saturate_cast((y11 + ruv) >> 8); + } + } + } +}; + +template +struct YUV4202BGRA8888Invoker +{ + Mat* dst; + const uchar* my1, *muv; + int width; + + YUV4202BGRA8888Invoker(Mat& _dst, int _width, const uchar* _y1, const uchar* _uv) + : dst(&_dst), my1(_y1), muv(_uv), width(_width) {} + + void operator()(const BlockedRange& range) const + { + //B = 1.164(Y - 16) + 2.018(U - 128) + //G = 1.164(Y - 16) - 0.813(V - 128) - 0.391(U - 128) + //R = 1.164(Y - 16) + 1.596(V - 128) + + const uchar* y1 = my1 + range.begin() * width, *uv = muv + range.begin() * width / 2; + + for (int j = range.begin(); j < range.end(); j+=2, y1+=width*2, uv+=width) + { + uchar* row1 = dst->ptr(j); + uchar* row2 = dst->ptr(j+1); + const uchar* y2 = y1 + width; + + for(int i = 0; i < width; i+=2,row1+=8,row2+=8) + { + int cr = uv[i + SPorI + 0] - 128; + int cb = uv[i - SPorI + 1] - 128; + + int ruv = 409 * cr + 128; + int guv = 128 - 100 * cb - 208 * cr; + int buv = 516 * cb + 128; + + int y00 = (y1[i] - 16) * 298; + row1[0+R] = saturate_cast((y00 + buv) >> 8); + row1[1] = saturate_cast((y00 + guv) >> 8); + row1[2-R] = saturate_cast((y00 + ruv) >> 8); + row1[3] = (uchar)0xff; + + int y01 = (y1[i+1] - 16) * 298; + row1[4+R] = saturate_cast((y01 + buv) >> 8); + row1[5] = saturate_cast((y01 + guv) >> 8); + row1[6-R] = saturate_cast((y01 + ruv) >> 8); + row1[7] = (uchar)0xff; + + int y10 = (y2[i] - 16) * 298; + row2[0+R] = saturate_cast((y10 + buv) >> 8); + row2[1] = saturate_cast((y10 + guv) >> 8); + row2[2-R] = saturate_cast((y10 + ruv) >> 8); + row2[3] = (uchar)0xff; + + int y11 = (y2[i+1] - 16) * 298; + row2[4+R] = saturate_cast((y11 + buv) >> 8); + row2[5] = saturate_cast((y11 + guv) >> 8); + row2[6-R] = saturate_cast((y11 + ruv) >> 8); + row2[7] = (uchar)0xff; + } + } + } +}; + +}//namespace cv + +////////////////////////////////////////////////////////////////////////////////////////// +// The main function // +////////////////////////////////////////////////////////////////////////////////////////// + +void cv::cvtColor( InputArray _src, OutputArray _dst, int code, int dcn ) +{ + Mat src = _src.getMat(), dst; + Size sz = src.size(); + int scn = src.channels(), depth = src.depth(), bidx; + + CV_Assert( depth == CV_8U || depth == CV_16U || depth == CV_32F ); + + switch( code ) + { + case CV_BGR2BGRA: case CV_RGB2BGRA: case CV_BGRA2BGR: + case CV_RGBA2BGR: case CV_RGB2BGR: case CV_BGRA2RGBA: + CV_Assert( scn == 3 || scn == 4 ); + dcn = code == CV_BGR2BGRA || code == CV_RGB2BGRA || code == CV_BGRA2RGBA ? 4 : 3; + bidx = code == CV_BGR2BGRA || code == CV_BGRA2BGR ? 0 : 2; + + _dst.create( sz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + if( depth == CV_8U ) + CvtColorLoop(src, dst, RGB2RGB(scn, dcn, bidx)); + else if( depth == CV_16U ) + CvtColorLoop(src, dst, RGB2RGB(scn, dcn, bidx)); + else + CvtColorLoop(src, dst, RGB2RGB(scn, dcn, bidx)); + break; + + case CV_BGR2BGR565: case CV_BGR2BGR555: case CV_RGB2BGR565: case CV_RGB2BGR555: + case CV_BGRA2BGR565: case CV_BGRA2BGR555: case CV_RGBA2BGR565: case CV_RGBA2BGR555: + CV_Assert( (scn == 3 || scn == 4) && depth == CV_8U ); + _dst.create(sz, CV_8UC2); + dst = _dst.getMat(); + + CvtColorLoop(src, dst, RGB2RGB5x5(scn, + code == CV_BGR2BGR565 || code == CV_BGR2BGR555 || + code == CV_BGRA2BGR565 || code == CV_BGRA2BGR555 ? 0 : 2, + code == CV_BGR2BGR565 || code == CV_RGB2BGR565 || + code == CV_BGRA2BGR565 || code == CV_RGBA2BGR565 ? 6 : 5 // green bits + )); + break; + + case CV_BGR5652BGR: case CV_BGR5552BGR: case CV_BGR5652RGB: case CV_BGR5552RGB: + case CV_BGR5652BGRA: case CV_BGR5552BGRA: case CV_BGR5652RGBA: case CV_BGR5552RGBA: + if(dcn <= 0) dcn = 3; + CV_Assert( (dcn == 3 || dcn == 4) && scn == 2 && depth == CV_8U ); + _dst.create(sz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + CvtColorLoop(src, dst, RGB5x52RGB(dcn, + code == CV_BGR5652BGR || code == CV_BGR5552BGR || + code == CV_BGR5652BGRA || code == CV_BGR5552BGRA ? 0 : 2, // blue idx + code == CV_BGR5652BGR || code == CV_BGR5652RGB || + code == CV_BGR5652BGRA || code == CV_BGR5652RGBA ? 6 : 5 // green bits + )); + break; + + case CV_BGR2GRAY: case CV_BGRA2GRAY: case CV_RGB2GRAY: case CV_RGBA2GRAY: + CV_Assert( scn == 3 || scn == 4 ); + _dst.create(sz, CV_MAKETYPE(depth, 1)); + dst = _dst.getMat(); + + bidx = code == CV_BGR2GRAY || code == CV_BGRA2GRAY ? 0 : 2; + + if( depth == CV_8U ) + CvtColorLoop(src, dst, RGB2Gray(scn, bidx, 0)); + else if( depth == CV_16U ) + CvtColorLoop(src, dst, RGB2Gray(scn, bidx, 0)); + else + CvtColorLoop(src, dst, RGB2Gray(scn, bidx, 0)); + break; + + case CV_BGR5652GRAY: case CV_BGR5552GRAY: + CV_Assert( scn == 2 && depth == CV_8U ); + _dst.create(sz, CV_8UC1); + dst = _dst.getMat(); + + CvtColorLoop(src, dst, RGB5x52Gray(code == CV_BGR5652GRAY ? 6 : 5)); + break; + + case CV_GRAY2BGR: case CV_GRAY2BGRA: + if( dcn <= 0 ) dcn = 3; + CV_Assert( scn == 1 && (dcn == 3 || dcn == 4)); + _dst.create(sz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + if( depth == CV_8U ) + CvtColorLoop(src, dst, Gray2RGB(dcn)); + else if( depth == CV_16U ) + CvtColorLoop(src, dst, Gray2RGB(dcn)); + else + CvtColorLoop(src, dst, Gray2RGB(dcn)); + break; + + case CV_GRAY2BGR565: case CV_GRAY2BGR555: + CV_Assert( scn == 1 && depth == CV_8U ); + _dst.create(sz, CV_8UC2); + dst = _dst.getMat(); + + CvtColorLoop(src, dst, Gray2RGB5x5(code == CV_GRAY2BGR565 ? 6 : 5)); + break; + + case CV_BGR2YCrCb: case CV_RGB2YCrCb: + case CV_BGR2YUV: case CV_RGB2YUV: + { + CV_Assert( scn == 3 || scn == 4 ); + bidx = code == CV_BGR2YCrCb || code == CV_RGB2YUV ? 0 : 2; + static const float yuv_f[] = { 0.114f, 0.587f, 0.299f, 0.492f, 0.877f }; + static const int yuv_i[] = { B2Y, G2Y, R2Y, 8061, 14369 }; + const float* coeffs_f = code == CV_BGR2YCrCb || code == CV_RGB2YCrCb ? 0 : yuv_f; + const int* coeffs_i = code == CV_BGR2YCrCb || code == CV_RGB2YCrCb ? 0 : yuv_i; + + _dst.create(sz, CV_MAKETYPE(depth, 3)); + dst = _dst.getMat(); + + if( depth == CV_8U ) + CvtColorLoop(src, dst, RGB2YCrCb_i(scn, bidx, coeffs_i)); + else if( depth == CV_16U ) + CvtColorLoop(src, dst, RGB2YCrCb_i(scn, bidx, coeffs_i)); + else + CvtColorLoop(src, dst, RGB2YCrCb_f(scn, bidx, coeffs_f)); + } + break; + + case CV_YCrCb2BGR: case CV_YCrCb2RGB: + case CV_YUV2BGR: case CV_YUV2RGB: + { + if( dcn <= 0 ) dcn = 3; + CV_Assert( scn == 3 && (dcn == 3 || dcn == 4) ); + bidx = code == CV_YCrCb2BGR || code == CV_YUV2RGB ? 0 : 2; + static const float yuv_f[] = { 2.032f, -0.395f, -0.581f, 1.140f }; + static const int yuv_i[] = { 33292, -6472, -9519, 18678 }; + const float* coeffs_f = code == CV_YCrCb2BGR || code == CV_YCrCb2RGB ? 0 : yuv_f; + const int* coeffs_i = code == CV_YCrCb2BGR || code == CV_YCrCb2RGB ? 0 : yuv_i; + + _dst.create(sz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + if( depth == CV_8U ) + CvtColorLoop(src, dst, YCrCb2RGB_i(dcn, bidx, coeffs_i)); + else if( depth == CV_16U ) + CvtColorLoop(src, dst, YCrCb2RGB_i(dcn, bidx, coeffs_i)); + else + CvtColorLoop(src, dst, YCrCb2RGB_f(dcn, bidx, coeffs_f)); + } + break; + + case CV_BGR2XYZ: case CV_RGB2XYZ: + CV_Assert( scn == 3 || scn == 4 ); + bidx = code == CV_BGR2XYZ ? 0 : 2; + + _dst.create(sz, CV_MAKETYPE(depth, 3)); + dst = _dst.getMat(); + + if( depth == CV_8U ) + CvtColorLoop(src, dst, RGB2XYZ_i(scn, bidx, 0)); + else if( depth == CV_16U ) + CvtColorLoop(src, dst, RGB2XYZ_i(scn, bidx, 0)); + else + CvtColorLoop(src, dst, RGB2XYZ_f(scn, bidx, 0)); + break; + + case CV_XYZ2BGR: case CV_XYZ2RGB: + if( dcn <= 0 ) dcn = 3; + CV_Assert( scn == 3 && (dcn == 3 || dcn == 4) ); + bidx = code == CV_XYZ2BGR ? 0 : 2; + + _dst.create(sz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + if( depth == CV_8U ) + CvtColorLoop(src, dst, XYZ2RGB_i(dcn, bidx, 0)); + else if( depth == CV_16U ) + CvtColorLoop(src, dst, XYZ2RGB_i(dcn, bidx, 0)); + else + CvtColorLoop(src, dst, XYZ2RGB_f(dcn, bidx, 0)); + break; + + case CV_BGR2HSV: case CV_RGB2HSV: case CV_BGR2HSV_FULL: case CV_RGB2HSV_FULL: + case CV_BGR2HLS: case CV_RGB2HLS: case CV_BGR2HLS_FULL: case CV_RGB2HLS_FULL: + { + CV_Assert( (scn == 3 || scn == 4) && (depth == CV_8U || depth == CV_32F) ); + bidx = code == CV_BGR2HSV || code == CV_BGR2HLS || + code == CV_BGR2HSV_FULL || code == CV_BGR2HLS_FULL ? 0 : 2; + int hrange = depth == CV_32F ? 360 : code == CV_BGR2HSV || code == CV_RGB2HSV || + code == CV_BGR2HLS || code == CV_RGB2HLS ? 180 : 256; + + _dst.create(sz, CV_MAKETYPE(depth, 3)); + dst = _dst.getMat(); + + if( code == CV_BGR2HSV || code == CV_RGB2HSV || + code == CV_BGR2HSV_FULL || code == CV_RGB2HSV_FULL ) + { + if( depth == CV_8U ) + CvtColorLoop(src, dst, RGB2HSV_b(scn, bidx, hrange)); + else + CvtColorLoop(src, dst, RGB2HSV_f(scn, bidx, (float)hrange)); + } + else + { + if( depth == CV_8U ) + CvtColorLoop(src, dst, RGB2HLS_b(scn, bidx, hrange)); + else + CvtColorLoop(src, dst, RGB2HLS_f(scn, bidx, (float)hrange)); + } + } + break; + + case CV_HSV2BGR: case CV_HSV2RGB: case CV_HSV2BGR_FULL: case CV_HSV2RGB_FULL: + case CV_HLS2BGR: case CV_HLS2RGB: case CV_HLS2BGR_FULL: case CV_HLS2RGB_FULL: + { + if( dcn <= 0 ) dcn = 3; + CV_Assert( scn == 3 && (dcn == 3 || dcn == 4) && (depth == CV_8U || depth == CV_32F) ); + bidx = code == CV_HSV2BGR || code == CV_HLS2BGR || + code == CV_HSV2BGR_FULL || code == CV_HLS2BGR_FULL ? 0 : 2; + int hrange = depth == CV_32F ? 360 : code == CV_HSV2BGR || code == CV_HSV2RGB || + code == CV_HLS2BGR || code == CV_HLS2RGB ? 180 : 255; + + _dst.create(sz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + if( code == CV_HSV2BGR || code == CV_HSV2RGB || + code == CV_HSV2BGR_FULL || code == CV_HSV2RGB_FULL ) + { + if( depth == CV_8U ) + CvtColorLoop(src, dst, HSV2RGB_b(dcn, bidx, hrange)); + else + CvtColorLoop(src, dst, HSV2RGB_f(dcn, bidx, (float)hrange)); + } + else + { + if( depth == CV_8U ) + CvtColorLoop(src, dst, HLS2RGB_b(dcn, bidx, hrange)); + else + CvtColorLoop(src, dst, HLS2RGB_f(dcn, bidx, (float)hrange)); + } + } + break; + + case CV_BGR2Lab: case CV_RGB2Lab: case CV_LBGR2Lab: case CV_LRGB2Lab: + case CV_BGR2Luv: case CV_RGB2Luv: case CV_LBGR2Luv: case CV_LRGB2Luv: + { + CV_Assert( (scn == 3 || scn == 4) && (depth == CV_8U || depth == CV_32F) ); + bidx = code == CV_BGR2Lab || code == CV_BGR2Luv || + code == CV_LBGR2Lab || code == CV_LBGR2Luv ? 0 : 2; + bool srgb = code == CV_BGR2Lab || code == CV_RGB2Lab || + code == CV_BGR2Luv || code == CV_RGB2Luv; + + _dst.create(sz, CV_MAKETYPE(depth, 3)); + dst = _dst.getMat(); + + if( code == CV_BGR2Lab || code == CV_RGB2Lab || + code == CV_LBGR2Lab || code == CV_LRGB2Lab ) + { + if( depth == CV_8U ) + CvtColorLoop(src, dst, RGB2Lab_b(scn, bidx, 0, 0, srgb)); + else + CvtColorLoop(src, dst, RGB2Lab_f(scn, bidx, 0, 0, srgb)); + } + else + { + if( depth == CV_8U ) + CvtColorLoop(src, dst, RGB2Luv_b(scn, bidx, 0, 0, srgb)); + else + CvtColorLoop(src, dst, RGB2Luv_f(scn, bidx, 0, 0, srgb)); + } + } + break; + + case CV_Lab2BGR: case CV_Lab2RGB: case CV_Lab2LBGR: case CV_Lab2LRGB: + case CV_Luv2BGR: case CV_Luv2RGB: case CV_Luv2LBGR: case CV_Luv2LRGB: + { + if( dcn <= 0 ) dcn = 3; + CV_Assert( scn == 3 && (dcn == 3 || dcn == 4) && (depth == CV_8U || depth == CV_32F) ); + bidx = code == CV_Lab2BGR || code == CV_Luv2BGR || + code == CV_Lab2LBGR || code == CV_Luv2LBGR ? 0 : 2; + bool srgb = code == CV_Lab2BGR || code == CV_Lab2RGB || + code == CV_Luv2BGR || code == CV_Luv2RGB; + + _dst.create(sz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + if( code == CV_Lab2BGR || code == CV_Lab2RGB || + code == CV_Lab2LBGR || code == CV_Lab2LRGB ) + { + if( depth == CV_8U ) + CvtColorLoop(src, dst, Lab2RGB_b(dcn, bidx, 0, 0, srgb)); + else + CvtColorLoop(src, dst, Lab2RGB_f(dcn, bidx, 0, 0, srgb)); + } + else + { + if( depth == CV_8U ) + CvtColorLoop(src, dst, Luv2RGB_b(dcn, bidx, 0, 0, srgb)); + else + CvtColorLoop(src, dst, Luv2RGB_f(dcn, bidx, 0, 0, srgb)); + } + } + break; + + case CV_BayerBG2GRAY: case CV_BayerGB2GRAY: case CV_BayerRG2GRAY: case CV_BayerGR2GRAY: + if(dcn <= 0) dcn = 1; + CV_Assert( scn == 1 && dcn == 1 ); + + _dst.create(sz, depth); + dst = _dst.getMat(); + + if( depth == CV_8U ) + Bayer2Gray_(src, dst, code); + else if( depth == CV_16U ) + Bayer2Gray_ >(src, dst, code); + else + CV_Error(CV_StsUnsupportedFormat, "Bayer->Gray demosaicing only supports 8u and 16u types"); + break; + + case CV_BayerBG2BGR: case CV_BayerGB2BGR: case CV_BayerRG2BGR: case CV_BayerGR2BGR: + case CV_BayerBG2BGR_VNG: case CV_BayerGB2BGR_VNG: case CV_BayerRG2BGR_VNG: case CV_BayerGR2BGR_VNG: + if(dcn <= 0) dcn = 3; + CV_Assert( scn == 1 && dcn == 3 ); + + _dst.create(sz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + if( code == CV_BayerBG2BGR || code == CV_BayerGB2BGR || + code == CV_BayerRG2BGR || code == CV_BayerGR2BGR ) + { + if( depth == CV_8U ) + Bayer2RGB_(src, dst, code); + else if( depth == CV_16U ) + Bayer2RGB_ >(src, dst, code); + else + CV_Error(CV_StsUnsupportedFormat, "Bayer->RGB demosaicing only supports 8u and 16u types"); + } + else + { + CV_Assert( depth == CV_8U ); + Bayer2RGB_VNG_8u(src, dst, code); + } + break; + case CV_YUV420sp2BGR: case CV_YUV420sp2RGB: case CV_YUV420i2BGR: case CV_YUV420i2RGB: + { + if(dcn <= 0) dcn = 3; + CV_Assert( dcn == 3 || dcn == 4 ); + CV_Assert( sz.width % 2 == 0 && sz.height % 3 == 0 && depth == CV_8U && src.isContinuous() ); + + Size dstSz(sz.width, sz.height * 2 / 3); + _dst.create( dstSz, CV_MAKETYPE(depth, dcn)); + dst = _dst.getMat(); + + const uchar* y = src.ptr(); + const uchar* uv = y + dstSz.area(); + +#ifdef HAVE_TEGRA_OPTIMIZATION + if (!tegra::YUV420i2BGR(y, uv, dst, CV_YUV420sp2RGB == code)) +#endif + { + if (CV_YUV420sp2RGB == code) + { + if (dcn == 3) + parallel_for(BlockedRange(0, dstSz.height, 2), YUV4202BGR888Invoker<2,0>(dst, dstSz.width, y, uv)); + else + parallel_for(BlockedRange(0, dstSz.height, 2), YUV4202BGRA8888Invoker<2,0>(dst, dstSz.width, y, uv)); + } + else if (CV_YUV420sp2BGR == code) + { + if (dcn == 3) + parallel_for(BlockedRange(0, dstSz.height, 2), YUV4202BGR888Invoker<0,0>(dst, dstSz.width, y, uv)); + else + parallel_for(BlockedRange(0, dstSz.height, 2), YUV4202BGRA8888Invoker<0,0>(dst, dstSz.width, y, uv)); + } + else if (CV_YUV420i2RGB == code) + { + if (dcn == 3) + parallel_for(BlockedRange(0, dstSz.height, 2), YUV4202BGR888Invoker<2,1>(dst, dstSz.width, y, uv)); + else + parallel_for(BlockedRange(0, dstSz.height, 2), YUV4202BGRA8888Invoker<2,1>(dst, dstSz.width, y, uv)); + } + else if (CV_YUV420i2BGR == code) + { + if (dcn == 3) + parallel_for(BlockedRange(0, dstSz.height, 2), YUV4202BGR888Invoker<0,1>(dst, dstSz.width, y, uv)); + else + parallel_for(BlockedRange(0, dstSz.height, 2), YUV4202BGRA8888Invoker<0,1>(dst, dstSz.width, y, uv)); + } + } + } + break; + default: + CV_Error( CV_StsBadFlag, "Unknown/unsupported color conversion code" ); + } +} + +CV_IMPL void +cvCvtColor( const CvArr* srcarr, CvArr* dstarr, int code ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst0 = cv::cvarrToMat(dstarr), dst = dst0; + CV_Assert( src.depth() == dst.depth() ); + + cv::cvtColor(src, dst, code, dst.channels()); + CV_Assert( dst.data == dst0.data ); +} + + +/* End of file. */ diff --git a/opencv/imgproc/contours.cpp b/opencv/imgproc/contours.cpp new file mode 100644 index 0000000..d369f15 --- /dev/null +++ b/opencv/imgproc/contours.cpp @@ -0,0 +1,1780 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" + +/* initializes 8-element array for fast access to 3x3 neighborhood of a pixel */ +#define CV_INIT_3X3_DELTAS( deltas, step, nch ) \ + ((deltas)[0] = (nch), (deltas)[1] = -(step) + (nch), \ + (deltas)[2] = -(step), (deltas)[3] = -(step) - (nch), \ + (deltas)[4] = -(nch), (deltas)[5] = (step) - (nch), \ + (deltas)[6] = (step), (deltas)[7] = (step) + (nch)) + +static const CvPoint icvCodeDeltas[8] = + { {1, 0}, {1, -1}, {0, -1}, {-1, -1}, {-1, 0}, {-1, 1}, {0, 1}, {1, 1} }; + +CV_IMPL void +cvStartReadChainPoints( CvChain * chain, CvChainPtReader * reader ) +{ + int i; + + if( !chain || !reader ) + CV_Error( CV_StsNullPtr, "" ); + + if( chain->elem_size != 1 || chain->header_size < (int)sizeof(CvChain)) + CV_Error( CV_StsBadSize, "" ); + + cvStartReadSeq( (CvSeq *) chain, (CvSeqReader *) reader, 0 ); + + reader->pt = chain->origin; + for( i = 0; i < 8; i++ ) + { + reader->deltas[i][0] = (schar) icvCodeDeltas[i].x; + reader->deltas[i][1] = (schar) icvCodeDeltas[i].y; + } +} + + +/* retrieves next point of the chain curve and updates reader */ +CV_IMPL CvPoint +cvReadChainPoint( CvChainPtReader * reader ) +{ + schar *ptr; + int code; + CvPoint pt = { 0, 0 }; + + if( !reader ) + CV_Error( CV_StsNullPtr, "" ); + + pt = reader->pt; + + ptr = reader->ptr; + if( ptr ) + { + code = *ptr++; + + if( ptr >= reader->block_max ) + { + cvChangeSeqBlock( (CvSeqReader *) reader, 1 ); + ptr = reader->ptr; + } + + reader->ptr = ptr; + reader->code = (schar)code; + assert( (code & ~7) == 0 ); + reader->pt.x = pt.x + icvCodeDeltas[code].x; + reader->pt.y = pt.y + icvCodeDeltas[code].y; + } + + return pt; +} + + +/****************************************************************************************\ +* Raster->Chain Tree (Suzuki algorithms) * +\****************************************************************************************/ + +typedef struct _CvContourInfo +{ + int flags; + struct _CvContourInfo *next; /* next contour with the same mark value */ + struct _CvContourInfo *parent; /* information about parent contour */ + CvSeq *contour; /* corresponding contour (may be 0, if rejected) */ + CvRect rect; /* bounding rectangle */ + CvPoint origin; /* origin point (where the contour was traced from) */ + int is_hole; /* hole flag */ +} +_CvContourInfo; + + +/* + Structure that is used for sequental retrieving contours from the image. + It supports both hierarchical and plane variants of Suzuki algorithm. +*/ +typedef struct _CvContourScanner +{ + CvMemStorage *storage1; /* contains fetched contours */ + CvMemStorage *storage2; /* contains approximated contours + (!=storage1 if approx_method2 != approx_method1) */ + CvMemStorage *cinfo_storage; /* contains _CvContourInfo nodes */ + CvSet *cinfo_set; /* set of _CvContourInfo nodes */ + CvMemStoragePos initial_pos; /* starting storage pos */ + CvMemStoragePos backup_pos; /* beginning of the latest approx. contour */ + CvMemStoragePos backup_pos2; /* ending of the latest approx. contour */ + schar *img0; /* image origin */ + schar *img; /* current image row */ + int img_step; /* image step */ + CvSize img_size; /* ROI size */ + CvPoint offset; /* ROI offset: coordinates, added to each contour point */ + CvPoint pt; /* current scanner position */ + CvPoint lnbd; /* position of the last met contour */ + int nbd; /* current mark val */ + _CvContourInfo *l_cinfo; /* information about latest approx. contour */ + _CvContourInfo cinfo_temp; /* temporary var which is used in simple modes */ + _CvContourInfo frame_info; /* information about frame */ + CvSeq frame; /* frame itself */ + int approx_method1; /* approx method when tracing */ + int approx_method2; /* final approx method */ + int mode; /* contour scanning mode: + 0 - external only + 1 - all the contours w/o any hierarchy + 2 - connected components (i.e. two-level structure - + external contours and holes) */ + int subst_flag; + int seq_type1; /* type of fetched contours */ + int header_size1; /* hdr size of fetched contours */ + int elem_size1; /* elem size of fetched contours */ + int seq_type2; /* */ + int header_size2; /* the same for approx. contours */ + int elem_size2; /* */ + _CvContourInfo *cinfo_table[126]; +} +_CvContourScanner; + +#define _CV_FIND_CONTOURS_FLAGS_EXTERNAL_ONLY 1 +#define _CV_FIND_CONTOURS_FLAGS_HIERARCHIC 2 + +/* + Initializes scanner structure. + Prepare image for scanning ( clear borders and convert all pixels to 0-1. +*/ +CV_IMPL CvContourScanner +cvStartFindContours( void* _img, CvMemStorage* storage, + int header_size, int mode, + int method, CvPoint offset ) +{ + int y; + int step; + CvSize size; + uchar *img = 0; + CvContourScanner scanner = 0; + + if( !storage ) + CV_Error( CV_StsNullPtr, "" ); + + CvMat stub, *mat = cvGetMat( _img, &stub ); + + if( !CV_IS_MASK_ARR( mat )) + CV_Error( CV_StsUnsupportedFormat, "[Start]FindContours support only 8uC1 images" ); + + size = cvSize( mat->width, mat->height ); + step = mat->step; + img = (uchar*)(mat->data.ptr); + + if( method < 0 || method > CV_CHAIN_APPROX_TC89_KCOS ) + CV_Error( CV_StsOutOfRange, "" ); + + if( header_size < (int) (method == CV_CHAIN_CODE ? sizeof( CvChain ) : sizeof( CvContour ))) + CV_Error( CV_StsBadSize, "" ); + + scanner = (CvContourScanner)cvAlloc( sizeof( *scanner )); + memset( scanner, 0, sizeof( *scanner )); + + scanner->storage1 = scanner->storage2 = storage; + scanner->img0 = (schar *) img; + scanner->img = (schar *) (img + step); + scanner->img_step = step; + scanner->img_size.width = size.width - 1; /* exclude rightest column */ + scanner->img_size.height = size.height - 1; /* exclude bottomost row */ + scanner->mode = mode; + scanner->offset = offset; + scanner->pt.x = scanner->pt.y = 1; + scanner->lnbd.x = 0; + scanner->lnbd.y = 1; + scanner->nbd = 2; + scanner->mode = (int) mode; + scanner->frame_info.contour = &(scanner->frame); + scanner->frame_info.is_hole = 1; + scanner->frame_info.next = 0; + scanner->frame_info.parent = 0; + scanner->frame_info.rect = cvRect( 0, 0, size.width, size.height ); + scanner->l_cinfo = 0; + scanner->subst_flag = 0; + + scanner->frame.flags = CV_SEQ_FLAG_HOLE; + + scanner->approx_method2 = scanner->approx_method1 = method; + + if( method == CV_CHAIN_APPROX_TC89_L1 || method == CV_CHAIN_APPROX_TC89_KCOS ) + scanner->approx_method1 = CV_CHAIN_CODE; + + if( scanner->approx_method1 == CV_CHAIN_CODE ) + { + scanner->seq_type1 = CV_SEQ_CHAIN_CONTOUR; + scanner->header_size1 = scanner->approx_method1 == scanner->approx_method2 ? + header_size : sizeof( CvChain ); + scanner->elem_size1 = sizeof( char ); + } + else + { + scanner->seq_type1 = CV_SEQ_POLYGON; + scanner->header_size1 = scanner->approx_method1 == scanner->approx_method2 ? + header_size : sizeof( CvContour ); + scanner->elem_size1 = sizeof( CvPoint ); + } + + scanner->header_size2 = header_size; + + if( scanner->approx_method2 == CV_CHAIN_CODE ) + { + scanner->seq_type2 = scanner->seq_type1; + scanner->elem_size2 = scanner->elem_size1; + } + else + { + scanner->seq_type2 = CV_SEQ_POLYGON; + scanner->elem_size2 = sizeof( CvPoint ); + } + + scanner->seq_type1 = scanner->approx_method1 == CV_CHAIN_CODE ? + CV_SEQ_CHAIN_CONTOUR : CV_SEQ_POLYGON; + + scanner->seq_type2 = scanner->approx_method2 == CV_CHAIN_CODE ? + CV_SEQ_CHAIN_CONTOUR : CV_SEQ_POLYGON; + + cvSaveMemStoragePos( storage, &(scanner->initial_pos) ); + + if( method > CV_CHAIN_APPROX_SIMPLE ) + { + scanner->storage1 = cvCreateChildMemStorage( scanner->storage2 ); + } + + if( mode > CV_RETR_LIST ) + { + scanner->cinfo_storage = cvCreateChildMemStorage( scanner->storage2 ); + scanner->cinfo_set = cvCreateSet( 0, sizeof( CvSet ), sizeof( _CvContourInfo ), + scanner->cinfo_storage ); + } + + /* make zero borders */ + memset( img, 0, size.width ); + memset( img + step * (size.height - 1), 0, size.width ); + + for( y = 1, img += step; y < size.height - 1; y++, img += step ) + { + img[0] = img[size.width - 1] = 0; + } + + /* converts all pixels to 0 or 1 */ + cvThreshold( mat, mat, 0, 1, CV_THRESH_BINARY ); + + return scanner; +} + +/* + Final stage of contour processing. + Three variants possible: + 1. Contour, which was retrieved using border following, is added to + the contour tree. It is the case when the icvSubstituteContour function + was not called after retrieving the contour. + + 2. New contour, assigned by icvSubstituteContour function, is added to the + tree. The retrieved contour itself is removed from the storage. + Here two cases are possible: + 2a. If one deals with plane variant of algorithm + (hierarchical strucutre is not reconstructed), + the contour is removed completely. + 2b. In hierarchical case, the header of the contour is not removed. + It's marked as "link to contour" and h_next pointer of it is set to + new, substituting contour. + + 3. The similar to 2, but when NULL pointer was assigned by + icvSubstituteContour function. In this case, the function removes + retrieved contour completely if plane case and + leaves header if hierarchical (but doesn't mark header as "link"). + ------------------------------------------------------------------------ + The 1st variant can be used to retrieve and store all the contours from the image + (with optional convertion from chains to contours using some approximation from + restriced set of methods). Some characteristics of contour can be computed in the + same pass. + + The usage scheme can look like: + + icvContourScanner scanner; + CvMemStorage* contour_storage; + CvSeq* first_contour; + CvStatus result; + + ... + + icvCreateMemStorage( &contour_storage, block_size/0 ); + + ... + + cvStartFindContours + ( img, contour_storage, + header_size, approx_method, + [external_only,] + &scanner ); + + for(;;) + { + [CvSeq* contour;] + result = icvFindNextContour( &scanner, &contour/0 ); + + if( result != CV_OK ) break; + + // calculate some characteristics + ... + } + + if( result < 0 ) goto error_processing; + + cvEndFindContours( &scanner, &first_contour ); + ... + + ----------------------------------------------------------------- + + Second variant is more complex and can be used when someone wants store not + the retrieved contours but transformed ones. (e.g. approximated with some + non-default algorithm ). + + The scheme can be the as following: + + icvContourScanner scanner; + CvMemStorage* contour_storage; + CvMemStorage* temp_storage; + CvSeq* first_contour; + CvStatus result; + + ... + + icvCreateMemStorage( &contour_storage, block_size/0 ); + icvCreateMemStorage( &temp_storage, block_size/0 ); + + ... + + icvStartFindContours8uC1R + ( , temp_storage, + header_size, approx_method, + [retrival_mode], + &scanner ); + + for(;;) + { + CvSeq* temp_contour; + CvSeq* new_contour; + result = icvFindNextContour( scanner, &temp_contour ); + + if( result != CV_OK ) break; + + ( temp_contour, contour_storage, + &new_contour, ); + + icvSubstituteContour( scanner, new_contour ); + ... + } + + if( result < 0 ) goto error_processing; + + cvEndFindContours( &scanner, &first_contour ); + ... + + ---------------------------------------------------------------------------- + Third method to retrieve contours may be applied if contours are irrelevant + themselves but some characteristics of them are used only. + The usage is similar to second except slightly different internal loop + + for(;;) + { + CvSeq* temp_contour; + result = icvFindNextContour( &scanner, &temp_contour ); + + if( result != CV_OK ) break; + + // calculate some characteristics of temp_contour + + icvSubstituteContour( scanner, 0 ); + ... + } + + new_storage variable is not needed here. + + Note, that the second and the third methods can interleave. I.e. it is possible to + retain contours that satisfy with some criteria and reject others. + In hierarchic case the resulting tree is the part of original tree with + some nodes absent. But in the resulting tree the contour1 is a child + (may be indirect) of contour2 iff in the original tree the contour1 + is a child (may be indirect) of contour2. +*/ +static void +icvEndProcessContour( CvContourScanner scanner ) +{ + _CvContourInfo *l_cinfo = scanner->l_cinfo; + + if( l_cinfo ) + { + if( scanner->subst_flag ) + { + CvMemStoragePos temp; + + cvSaveMemStoragePos( scanner->storage2, &temp ); + + if( temp.top == scanner->backup_pos2.top && + temp.free_space == scanner->backup_pos2.free_space ) + { + cvRestoreMemStoragePos( scanner->storage2, &scanner->backup_pos ); + } + scanner->subst_flag = 0; + } + + if( l_cinfo->contour ) + { + cvInsertNodeIntoTree( l_cinfo->contour, l_cinfo->parent->contour, + &(scanner->frame) ); + } + scanner->l_cinfo = 0; + } +} + +/* replaces one contour with another */ +CV_IMPL void +cvSubstituteContour( CvContourScanner scanner, CvSeq * new_contour ) +{ + _CvContourInfo *l_cinfo; + + if( !scanner ) + CV_Error( CV_StsNullPtr, "" ); + + l_cinfo = scanner->l_cinfo; + if( l_cinfo && l_cinfo->contour && l_cinfo->contour != new_contour ) + { + l_cinfo->contour = new_contour; + scanner->subst_flag = 1; + } +} + + +/* + marks domain border with +/- and stores the contour into CvSeq. + method: + <0 - chain + ==0 - direct + >0 - simple approximation +*/ +static void +icvFetchContour( schar *ptr, + int step, + CvPoint pt, + CvSeq* contour, + int _method ) +{ + const schar nbd = 2; + int deltas[16]; + CvSeqWriter writer; + schar *i0 = ptr, *i1, *i3, *i4 = 0; + int prev_s = -1, s, s_end; + int method = _method - 1; + + assert( (unsigned) _method <= CV_CHAIN_APPROX_SIMPLE ); + + /* initialize local state */ + CV_INIT_3X3_DELTAS( deltas, step, 1 ); + memcpy( deltas + 8, deltas, 8 * sizeof( deltas[0] )); + + /* initialize writer */ + cvStartAppendToSeq( contour, &writer ); + + if( method < 0 ) + ((CvChain *) contour)->origin = pt; + + s_end = s = CV_IS_SEQ_HOLE( contour ) ? 0 : 4; + + do + { + s = (s - 1) & 7; + i1 = i0 + deltas[s]; + if( *i1 != 0 ) + break; + } + while( s != s_end ); + + if( s == s_end ) /* single pixel domain */ + { + *i0 = (schar) (nbd | -128); + if( method >= 0 ) + { + CV_WRITE_SEQ_ELEM( pt, writer ); + } + } + else + { + i3 = i0; + prev_s = s ^ 4; + + /* follow border */ + for( ;; ) + { + s_end = s; + + for( ;; ) + { + i4 = i3 + deltas[++s]; + if( *i4 != 0 ) + break; + } + s &= 7; + + /* check "right" bound */ + if( (unsigned) (s - 1) < (unsigned) s_end ) + { + *i3 = (schar) (nbd | -128); + } + else if( *i3 == 1 ) + { + *i3 = nbd; + } + + if( method < 0 ) + { + schar _s = (schar) s; + + CV_WRITE_SEQ_ELEM( _s, writer ); + } + else + { + if( s != prev_s || method == 0 ) + { + CV_WRITE_SEQ_ELEM( pt, writer ); + prev_s = s; + } + + pt.x += icvCodeDeltas[s].x; + pt.y += icvCodeDeltas[s].y; + + } + + if( i4 == i0 && i3 == i1 ) + break; + + i3 = i4; + s = (s + 4) & 7; + } /* end of border following loop */ + } + + cvEndWriteSeq( &writer ); + + if( _method != CV_CHAIN_CODE ) + cvBoundingRect( contour, 1 ); + + assert( (writer.seq->total == 0 && writer.seq->first == 0) || + writer.seq->total > writer.seq->first->count || + (writer.seq->first->prev == writer.seq->first && + writer.seq->first->next == writer.seq->first) ); +} + + + +/* + trace contour until certain point is met. + returns 1 if met, 0 else. +*/ +static int +icvTraceContour( schar *ptr, int step, schar *stop_ptr, int is_hole ) +{ + int deltas[16]; + schar *i0 = ptr, *i1, *i3, *i4; + int s, s_end; + + /* initialize local state */ + CV_INIT_3X3_DELTAS( deltas, step, 1 ); + memcpy( deltas + 8, deltas, 8 * sizeof( deltas[0] )); + + assert( (*i0 & -2) != 0 ); + + s_end = s = is_hole ? 0 : 4; + + do + { + s = (s - 1) & 7; + i1 = i0 + deltas[s]; + if( *i1 != 0 ) + break; + } + while( s != s_end ); + + i3 = i0; + + /* check single pixel domain */ + if( s != s_end ) + { + /* follow border */ + for( ;; ) + { + s_end = s; + + for( ;; ) + { + i4 = i3 + deltas[++s]; + if( *i4 != 0 ) + break; + } + + if( i3 == stop_ptr || (i4 == i0 && i3 == i1) ) + break; + + i3 = i4; + s = (s + 4) & 7; + } /* end of border following loop */ + } + return i3 == stop_ptr; +} + + +static void +icvFetchContourEx( schar* ptr, + int step, + CvPoint pt, + CvSeq* contour, + int _method, + int nbd, + CvRect* _rect ) +{ + int deltas[16]; + CvSeqWriter writer; + schar *i0 = ptr, *i1, *i3, *i4; + CvRect rect; + int prev_s = -1, s, s_end; + int method = _method - 1; + + assert( (unsigned) _method <= CV_CHAIN_APPROX_SIMPLE ); + assert( 1 < nbd && nbd < 128 ); + + /* initialize local state */ + CV_INIT_3X3_DELTAS( deltas, step, 1 ); + memcpy( deltas + 8, deltas, 8 * sizeof( deltas[0] )); + + /* initialize writer */ + cvStartAppendToSeq( contour, &writer ); + + if( method < 0 ) + ((CvChain *)contour)->origin = pt; + + rect.x = rect.width = pt.x; + rect.y = rect.height = pt.y; + + s_end = s = CV_IS_SEQ_HOLE( contour ) ? 0 : 4; + + do + { + s = (s - 1) & 7; + i1 = i0 + deltas[s]; + if( *i1 != 0 ) + break; + } + while( s != s_end ); + + if( s == s_end ) /* single pixel domain */ + { + *i0 = (schar) (nbd | 0x80); + if( method >= 0 ) + { + CV_WRITE_SEQ_ELEM( pt, writer ); + } + } + else + { + i3 = i0; + + prev_s = s ^ 4; + + /* follow border */ + for( ;; ) + { + s_end = s; + + for( ;; ) + { + i4 = i3 + deltas[++s]; + if( *i4 != 0 ) + break; + } + s &= 7; + + /* check "right" bound */ + if( (unsigned) (s - 1) < (unsigned) s_end ) + { + *i3 = (schar) (nbd | 0x80); + } + else if( *i3 == 1 ) + { + *i3 = (schar) nbd; + } + + if( method < 0 ) + { + schar _s = (schar) s; + CV_WRITE_SEQ_ELEM( _s, writer ); + } + else if( s != prev_s || method == 0 ) + { + CV_WRITE_SEQ_ELEM( pt, writer ); + } + + if( s != prev_s ) + { + /* update bounds */ + if( pt.x < rect.x ) + rect.x = pt.x; + else if( pt.x > rect.width ) + rect.width = pt.x; + + if( pt.y < rect.y ) + rect.y = pt.y; + else if( pt.y > rect.height ) + rect.height = pt.y; + } + + prev_s = s; + pt.x += icvCodeDeltas[s].x; + pt.y += icvCodeDeltas[s].y; + + if( i4 == i0 && i3 == i1 ) break; + + i3 = i4; + s = (s + 4) & 7; + } /* end of border following loop */ + } + + rect.width -= rect.x - 1; + rect.height -= rect.y - 1; + + cvEndWriteSeq( &writer ); + + if( _method != CV_CHAIN_CODE ) + ((CvContour*)contour)->rect = rect; + + assert( (writer.seq->total == 0 && writer.seq->first == 0) || + writer.seq->total > writer.seq->first->count || + (writer.seq->first->prev == writer.seq->first && + writer.seq->first->next == writer.seq->first) ); + + if( _rect ) *_rect = rect; +} + + +CvSeq * +cvFindNextContour( CvContourScanner scanner ) +{ + schar *img0; + schar *img; + int step; + int width, height; + int x, y; + int prev; + CvPoint lnbd; + int nbd; + int mode; + + if( !scanner ) + CV_Error( CV_StsNullPtr, "" ); + icvEndProcessContour( scanner ); + + /* initialize local state */ + img0 = scanner->img0; + img = scanner->img; + step = scanner->img_step; + x = scanner->pt.x; + y = scanner->pt.y; + width = scanner->img_size.width; + height = scanner->img_size.height; + mode = scanner->mode; + lnbd = scanner->lnbd; + nbd = scanner->nbd; + + prev = img[x - 1]; + + for( ; y < height; y++, img += step ) + { + for( ; x < width; x++ ) + { + int p = img[x]; + + if( p != prev ) + { + _CvContourInfo *par_info = 0; + _CvContourInfo *l_cinfo = 0; + CvSeq *seq = 0; + int is_hole = 0; + CvPoint origin; + + if( !(prev == 0 && p == 1) ) /* if not external contour */ + { + /* check hole */ + if( p != 0 || prev < 1 ) + goto resume_scan; + + if( prev & -2 ) + { + lnbd.x = x - 1; + } + is_hole = 1; + } + + if( mode == 0 && (is_hole || img0[lnbd.y * step + lnbd.x] > 0) ) + goto resume_scan; + + origin.y = y; + origin.x = x - is_hole; + + /* find contour parent */ + if( mode <= 1 || (!is_hole && mode == 2) || lnbd.x <= 0 ) + { + par_info = &(scanner->frame_info); + } + else + { + int lval = img0[lnbd.y * step + lnbd.x] & 0x7f; + _CvContourInfo *cur = scanner->cinfo_table[lval - 2]; + + assert( lval >= 2 ); + + /* find the first bounding contour */ + while( cur ) + { + if( (unsigned) (lnbd.x - cur->rect.x) < (unsigned) cur->rect.width && + (unsigned) (lnbd.y - cur->rect.y) < (unsigned) cur->rect.height ) + { + if( par_info ) + { + if( icvTraceContour( scanner->img0 + + par_info->origin.y * step + + par_info->origin.x, step, img + lnbd.x, + par_info->is_hole ) > 0 ) + break; + } + par_info = cur; + } + cur = cur->next; + } + + assert( par_info != 0 ); + + /* if current contour is a hole and previous contour is a hole or + current contour is external and previous contour is external then + the parent of the contour is the parent of the previous contour else + the parent is the previous contour itself. */ + if( par_info->is_hole == is_hole ) + { + par_info = par_info->parent; + /* every contour must have a parent + (at least, the frame of the image) */ + if( !par_info ) + par_info = &(scanner->frame_info); + } + + /* hole flag of the parent must differ from the flag of the contour */ + assert( par_info->is_hole != is_hole ); + if( par_info->contour == 0 ) /* removed contour */ + goto resume_scan; + } + + lnbd.x = x - is_hole; + + cvSaveMemStoragePos( scanner->storage2, &(scanner->backup_pos) ); + + seq = cvCreateSeq( scanner->seq_type1, scanner->header_size1, + scanner->elem_size1, scanner->storage1 ); + seq->flags |= is_hole ? CV_SEQ_FLAG_HOLE : 0; + + /* initialize header */ + if( mode <= 1 ) + { + l_cinfo = &(scanner->cinfo_temp); + icvFetchContour( img + x - is_hole, step, + cvPoint( origin.x + scanner->offset.x, + origin.y + scanner->offset.y), + seq, scanner->approx_method1 ); + } + else + { + union { _CvContourInfo* ci; CvSetElem* se; } v; + v.ci = l_cinfo; + cvSetAdd( scanner->cinfo_set, 0, &v.se ); + l_cinfo = v.ci; + + icvFetchContourEx( img + x - is_hole, step, + cvPoint( origin.x + scanner->offset.x, + origin.y + scanner->offset.y), + seq, scanner->approx_method1, + nbd, &(l_cinfo->rect) ); + l_cinfo->rect.x -= scanner->offset.x; + l_cinfo->rect.y -= scanner->offset.y; + + l_cinfo->next = scanner->cinfo_table[nbd - 2]; + scanner->cinfo_table[nbd - 2] = l_cinfo; + + /* change nbd */ + nbd = (nbd + 1) & 127; + nbd += nbd == 0 ? 3 : 0; + } + + l_cinfo->is_hole = is_hole; + l_cinfo->contour = seq; + l_cinfo->origin = origin; + l_cinfo->parent = par_info; + + if( scanner->approx_method1 != scanner->approx_method2 ) + { + l_cinfo->contour = icvApproximateChainTC89( (CvChain *) seq, + scanner->header_size2, + scanner->storage2, + scanner->approx_method2 ); + cvClearMemStorage( scanner->storage1 ); + } + + l_cinfo->contour->v_prev = l_cinfo->parent->contour; + + if( par_info->contour == 0 ) + { + l_cinfo->contour = 0; + if( scanner->storage1 == scanner->storage2 ) + { + cvRestoreMemStoragePos( scanner->storage1, &(scanner->backup_pos) ); + } + else + { + cvClearMemStorage( scanner->storage1 ); + } + p = img[x]; + goto resume_scan; + } + + cvSaveMemStoragePos( scanner->storage2, &(scanner->backup_pos2) ); + scanner->l_cinfo = l_cinfo; + scanner->pt.x = x + 1; + scanner->pt.y = y; + scanner->lnbd = lnbd; + scanner->img = (schar *) img; + scanner->nbd = nbd; + return l_cinfo->contour; + + resume_scan: + + prev = p; + /* update lnbd */ + if( prev & -2 ) + { + lnbd.x = x; + } + } /* end of prev != p */ + } /* end of loop on x */ + + lnbd.x = 0; + lnbd.y = y + 1; + x = 1; + prev = 0; + + } /* end of loop on y */ + + return 0; +} + + +/* + The function add to tree the last retrieved/substituted contour, + releases temp_storage, restores state of dst_storage (if needed), and + returns pointer to root of the contour tree */ +CV_IMPL CvSeq * +cvEndFindContours( CvContourScanner * _scanner ) +{ + CvContourScanner scanner; + CvSeq *first = 0; + + if( !_scanner ) + CV_Error( CV_StsNullPtr, "" ); + scanner = *_scanner; + + if( scanner ) + { + icvEndProcessContour( scanner ); + + if( scanner->storage1 != scanner->storage2 ) + cvReleaseMemStorage( &(scanner->storage1) ); + + if( scanner->cinfo_storage ) + cvReleaseMemStorage( &(scanner->cinfo_storage) ); + + first = scanner->frame.v_next; + cvFree( _scanner ); + } + + return first; +} + + +#define ICV_SINGLE 0 +#define ICV_CONNECTING_ABOVE 1 +#define ICV_CONNECTING_BELOW -1 +#define ICV_IS_COMPONENT_POINT(val) ((val) != 0) + +#define CV_GET_WRITTEN_ELEM( writer ) ((writer).ptr - (writer).seq->elem_size) + +typedef struct CvLinkedRunPoint +{ + struct CvLinkedRunPoint* link; + struct CvLinkedRunPoint* next; + CvPoint pt; +} +CvLinkedRunPoint; + + +static int +icvFindContoursInInterval( const CvArr* src, + /*int minValue, int maxValue,*/ + CvMemStorage* storage, + CvSeq** result, + int contourHeaderSize ) +{ + int count = 0; + cv::Ptr storage00; + cv::Ptr storage01; + CvSeq* first = 0; + + int i, j, k, n; + + uchar* src_data = 0; + int img_step = 0; + CvSize img_size; + + int connect_flag; + int lower_total; + int upper_total; + int all_total; + + CvSeq* runs; + CvLinkedRunPoint tmp; + CvLinkedRunPoint* tmp_prev; + CvLinkedRunPoint* upper_line = 0; + CvLinkedRunPoint* lower_line = 0; + CvLinkedRunPoint* last_elem; + + CvLinkedRunPoint* upper_run = 0; + CvLinkedRunPoint* lower_run = 0; + CvLinkedRunPoint* prev_point = 0; + + CvSeqWriter writer_ext; + CvSeqWriter writer_int; + CvSeqWriter writer; + CvSeqReader reader; + + CvSeq* external_contours; + CvSeq* internal_contours; + CvSeq* prev = 0; + + if( !storage ) + CV_Error( CV_StsNullPtr, "NULL storage pointer" ); + + if( !result ) + CV_Error( CV_StsNullPtr, "NULL double CvSeq pointer" ); + + if( contourHeaderSize < (int)sizeof(CvContour)) + CV_Error( CV_StsBadSize, "Contour header size must be >= sizeof(CvContour)" ); + + storage00 = cvCreateChildMemStorage(storage); + storage01 = cvCreateChildMemStorage(storage); + + CvMat stub, *mat; + + mat = cvGetMat( src, &stub ); + if( !CV_IS_MASK_ARR(mat)) + CV_Error( CV_StsBadArg, "Input array must be 8uC1 or 8sC1" ); + src_data = mat->data.ptr; + img_step = mat->step; + img_size = cvGetMatSize( mat ); + + // Create temporary sequences + runs = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvLinkedRunPoint), storage00 ); + cvStartAppendToSeq( runs, &writer ); + + cvStartWriteSeq( 0, sizeof(CvSeq), sizeof(CvLinkedRunPoint*), storage01, &writer_ext ); + cvStartWriteSeq( 0, sizeof(CvSeq), sizeof(CvLinkedRunPoint*), storage01, &writer_int ); + + tmp_prev = &(tmp); + tmp_prev->next = 0; + tmp_prev->link = 0; + + // First line. None of runs is binded + tmp.pt.y = 0; + i = 0; + CV_WRITE_SEQ_ELEM( tmp, writer ); + upper_line = (CvLinkedRunPoint*)CV_GET_WRITTEN_ELEM( writer ); + + tmp_prev = upper_line; + for( j = 0; j < img_size.width; ) + { + for( ; j < img_size.width && !ICV_IS_COMPONENT_POINT(src_data[j]); j++ ) + ; + if( j == img_size.width ) + break; + + tmp.pt.x = j; + CV_WRITE_SEQ_ELEM( tmp, writer ); + tmp_prev->next = (CvLinkedRunPoint*)CV_GET_WRITTEN_ELEM( writer ); + tmp_prev = tmp_prev->next; + + for( ; j < img_size.width && ICV_IS_COMPONENT_POINT(src_data[j]); j++ ) + ; + + tmp.pt.x = j-1; + CV_WRITE_SEQ_ELEM( tmp, writer ); + tmp_prev->next = (CvLinkedRunPoint*)CV_GET_WRITTEN_ELEM( writer ); + tmp_prev->link = tmp_prev->next; + // First point of contour + CV_WRITE_SEQ_ELEM( tmp_prev, writer_ext ); + tmp_prev = tmp_prev->next; + } + cvFlushSeqWriter( &writer ); + upper_line = upper_line->next; + upper_total = runs->total - 1; + last_elem = tmp_prev; + tmp_prev->next = 0; + + for( i = 1; i < img_size.height; i++ ) + { +//------// Find runs in next line + src_data += img_step; + tmp.pt.y = i; + all_total = runs->total; + for( j = 0; j < img_size.width; ) + { + for( ; j < img_size.width && !ICV_IS_COMPONENT_POINT(src_data[j]); j++ ) + ; + if( j == img_size.width ) break; + + tmp.pt.x = j; + CV_WRITE_SEQ_ELEM( tmp, writer ); + tmp_prev->next = (CvLinkedRunPoint*)CV_GET_WRITTEN_ELEM( writer ); + tmp_prev = tmp_prev->next; + + for( ; j < img_size.width && ICV_IS_COMPONENT_POINT(src_data[j]); j++ ) + ; + + tmp.pt.x = j-1; + CV_WRITE_SEQ_ELEM( tmp, writer ); + tmp_prev = tmp_prev->next = (CvLinkedRunPoint*)CV_GET_WRITTEN_ELEM( writer ); + }//j + cvFlushSeqWriter( &writer ); + lower_line = last_elem->next; + lower_total = runs->total - all_total; + last_elem = tmp_prev; + tmp_prev->next = 0; +//------// +//------// Find links between runs of lower_line and upper_line + upper_run = upper_line; + lower_run = lower_line; + connect_flag = ICV_SINGLE; + + for( k = 0, n = 0; k < upper_total/2 && n < lower_total/2; ) + { + switch( connect_flag ) + { + case ICV_SINGLE: + if( upper_run->next->pt.x < lower_run->next->pt.x ) + { + if( upper_run->next->pt.x >= lower_run->pt.x -1 ) + { + lower_run->link = upper_run; + connect_flag = ICV_CONNECTING_ABOVE; + prev_point = upper_run->next; + } + else + upper_run->next->link = upper_run; + k++; + upper_run = upper_run->next->next; + } + else + { + if( upper_run->pt.x <= lower_run->next->pt.x +1 ) + { + lower_run->link = upper_run; + connect_flag = ICV_CONNECTING_BELOW; + prev_point = lower_run->next; + } + else + { + lower_run->link = lower_run->next; + // First point of contour + CV_WRITE_SEQ_ELEM( lower_run, writer_ext ); + } + n++; + lower_run = lower_run->next->next; + } + break; + case ICV_CONNECTING_ABOVE: + if( upper_run->pt.x > lower_run->next->pt.x +1 ) + { + prev_point->link = lower_run->next; + connect_flag = ICV_SINGLE; + n++; + lower_run = lower_run->next->next; + } + else + { + prev_point->link = upper_run; + if( upper_run->next->pt.x < lower_run->next->pt.x ) + { + k++; + prev_point = upper_run->next; + upper_run = upper_run->next->next; + } + else + { + connect_flag = ICV_CONNECTING_BELOW; + prev_point = lower_run->next; + n++; + lower_run = lower_run->next->next; + } + } + break; + case ICV_CONNECTING_BELOW: + if( lower_run->pt.x > upper_run->next->pt.x +1 ) + { + upper_run->next->link = prev_point; + connect_flag = ICV_SINGLE; + k++; + upper_run = upper_run->next->next; + } + else + { + // First point of contour + CV_WRITE_SEQ_ELEM( lower_run, writer_int ); + + lower_run->link = prev_point; + if( lower_run->next->pt.x < upper_run->next->pt.x ) + { + n++; + prev_point = lower_run->next; + lower_run = lower_run->next->next; + } + else + { + connect_flag = ICV_CONNECTING_ABOVE; + k++; + prev_point = upper_run->next; + upper_run = upper_run->next->next; + } + } + break; + } + }// k, n + + for( ; n < lower_total/2; n++ ) + { + if( connect_flag != ICV_SINGLE ) + { + prev_point->link = lower_run->next; + connect_flag = ICV_SINGLE; + lower_run = lower_run->next->next; + continue; + } + lower_run->link = lower_run->next; + + //First point of contour + CV_WRITE_SEQ_ELEM( lower_run, writer_ext ); + + lower_run = lower_run->next->next; + } + + for( ; k < upper_total/2; k++ ) + { + if( connect_flag != ICV_SINGLE ) + { + upper_run->next->link = prev_point; + connect_flag = ICV_SINGLE; + upper_run = upper_run->next->next; + continue; + } + upper_run->next->link = upper_run; + upper_run = upper_run->next->next; + } + upper_line = lower_line; + upper_total = lower_total; + }//i + + upper_run = upper_line; + + //the last line of image + for( k = 0; k < upper_total/2; k++ ) + { + upper_run->next->link = upper_run; + upper_run = upper_run->next->next; + } + +//------// +//------//Find end read contours + external_contours = cvEndWriteSeq( &writer_ext ); + internal_contours = cvEndWriteSeq( &writer_int ); + + for( k = 0; k < 2; k++ ) + { + CvSeq* contours = k == 0 ? external_contours : internal_contours; + + cvStartReadSeq( contours, &reader ); + + for( j = 0; j < contours->total; j++, count++ ) + { + CvLinkedRunPoint* p_temp; + CvLinkedRunPoint* p00; + CvLinkedRunPoint* p01; + CvSeq* contour; + + CV_READ_SEQ_ELEM( p00, reader ); + p01 = p00; + + if( !p00->link ) + continue; + + cvStartWriteSeq( CV_SEQ_ELTYPE_POINT | CV_SEQ_POLYLINE | CV_SEQ_FLAG_CLOSED, + contourHeaderSize, sizeof(CvPoint), storage, &writer ); + do + { + CV_WRITE_SEQ_ELEM( p00->pt, writer ); + p_temp = p00; + p00 = p00->link; + p_temp->link = 0; + } + while( p00 != p01 ); + + contour = cvEndWriteSeq( &writer ); + cvBoundingRect( contour, 1 ); + + if( k != 0 ) + contour->flags |= CV_SEQ_FLAG_HOLE; + + if( !first ) + prev = first = contour; + else + { + contour->h_prev = prev; + prev = prev->h_next = contour; + } + } + } + + if( !first ) + count = -1; + + if( result ) + *result = first; + + return count; +} + + + +/*F/////////////////////////////////////////////////////////////////////////////////////// +// Name: cvFindContours +// Purpose: +// Finds all the contours on the bi-level image. +// Context: +// Parameters: +// img - source image. +// Non-zero pixels are considered as 1-pixels +// and zero pixels as 0-pixels. +// step - full width of source image in bytes. +// size - width and height of the image in pixels +// storage - pointer to storage where will the output contours be placed. +// header_size - header size of resulting contours +// mode - mode of contour retrieval. +// method - method of approximation that is applied to contours +// first_contour - pointer to first contour pointer +// Returns: +// CV_OK or error code +// Notes: +//F*/ +CV_IMPL int +cvFindContours( void* img, CvMemStorage* storage, + CvSeq** firstContour, int cntHeaderSize, + int mode, + int method, CvPoint offset ) +{ + CvContourScanner scanner = 0; + CvSeq *contour = 0; + int count = -1; + + if( !firstContour ) + CV_Error( CV_StsNullPtr, "NULL double CvSeq pointer" ); + + *firstContour = 0; + + if( method == CV_LINK_RUNS ) + { + if( offset.x != 0 || offset.y != 0 ) + CV_Error( CV_StsOutOfRange, + "Nonzero offset is not supported in CV_LINK_RUNS yet" ); + + count = icvFindContoursInInterval( img, storage, firstContour, cntHeaderSize ); + } + else + { + try + { + scanner = cvStartFindContours( img, storage, cntHeaderSize, mode, method, offset ); + + do + { + count++; + contour = cvFindNextContour( scanner ); + } + while( contour != 0 ); + } + catch(...) + { + if( scanner ) + cvEndFindContours(&scanner); + throw; + } + + *firstContour = cvEndFindContours( &scanner ); + } + + return count; +} + +void cv::findContours( InputOutputArray _image, OutputArrayOfArrays _contours, + OutputArray _hierarchy, int mode, int method, Point offset ) +{ + Mat image = _image.getMat(); + MemStorage storage(cvCreateMemStorage()); + CvMat _cimage = image; + CvSeq* _ccontours = 0; + if( _hierarchy.needed() ) + _hierarchy.clear(); + cvFindContours(&_cimage, storage, &_ccontours, sizeof(CvContour), mode, method, offset); + if( !_ccontours ) + { + _contours.clear(); + return; + } + Seq all_contours(cvTreeToNodeSeq( _ccontours, sizeof(CvSeq), storage )); + int i, total = (int)all_contours.size(); + _contours.create(total, 1, 0, -1, true); + SeqIterator it = all_contours.begin(); + for( i = 0; i < total; i++, ++it ) + { + CvSeq* c = *it; + ((CvContour*)c)->color = (int)i; + _contours.create((int)c->total, 1, CV_32SC2, i, true); + Mat ci = _contours.getMat(i); + CV_Assert( ci.isContinuous() ); + cvCvtSeqToArray(c, ci.data); + } + + if( _hierarchy.needed() ) + { + _hierarchy.create(1, total, CV_32SC4, -1, true); + Vec4i* hierarchy = _hierarchy.getMat().ptr(); + + it = all_contours.begin(); + for( i = 0; i < total; i++, ++it ) + { + CvSeq* c = *it; + int h_next = c->h_next ? ((CvContour*)c->h_next)->color : -1; + int h_prev = c->h_prev ? ((CvContour*)c->h_prev)->color : -1; + int v_next = c->v_next ? ((CvContour*)c->v_next)->color : -1; + int v_prev = c->v_prev ? ((CvContour*)c->v_prev)->color : -1; + hierarchy[i] = Vec4i(h_next, h_prev, v_next, v_prev); + } + } +} + +void cv::findContours( InputOutputArray _image, OutputArrayOfArrays _contours, + int mode, int method, Point offset) +{ + findContours(_image, _contours, noArray(), mode, method, offset); +} + +namespace cv +{ + +static void addChildContour(InputArrayOfArrays contours, + size_t ncontours, + const Vec4i* hierarchy, + int i, vector& seq, + vector& block) +{ + for( ; i >= 0; i = hierarchy[i][0] ) + { + Mat ci = contours.getMat(i); + cvMakeSeqHeaderForArray(CV_SEQ_POLYGON, sizeof(CvSeq), sizeof(Point), + !ci.empty() ? (void*)ci.data : 0, (int)ci.total(), + &seq[i], &block[i] ); + + int h_next = hierarchy[i][0], h_prev = hierarchy[i][1], + v_next = hierarchy[i][2], v_prev = hierarchy[i][3]; + seq[i].h_next = (size_t)h_next < ncontours ? &seq[h_next] : 0; + seq[i].h_prev = (size_t)h_prev < ncontours ? &seq[h_prev] : 0; + seq[i].v_next = (size_t)v_next < ncontours ? &seq[v_next] : 0; + seq[i].v_prev = (size_t)v_prev < ncontours ? &seq[v_prev] : 0; + + if( v_next >= 0 ) + addChildContour(contours, ncontours, hierarchy, v_next, seq, block); + } +} + +} + +void cv::drawContours( InputOutputArray _image, InputArrayOfArrays _contours, + int contourIdx, const Scalar& color, int thickness, + int lineType, InputArray _hierarchy, + int maxLevel, Point offset ) +{ + Mat image = _image.getMat(), hierarchy = _hierarchy.getMat(); + CvMat _cimage = image; + + size_t ncontours = _contours.total(); + size_t i = 0, first = 0, last = ncontours; + vector seq; + vector block; + + if( !last ) + return; + + seq.resize(last); + block.resize(last); + + for( i = first; i < last; i++ ) + seq[i].first = 0; + + if( contourIdx >= 0 ) + { + CV_Assert( 0 <= contourIdx && contourIdx < (int)last ); + first = contourIdx; + last = contourIdx + 1; + } + + for( i = first; i < last; i++ ) + { + Mat ci = _contours.getMat((int)i); + if( ci.empty() ) + continue; + int npoints = ci.checkVector(2, CV_32S); + CV_Assert( npoints > 0 ); + cvMakeSeqHeaderForArray( CV_SEQ_POLYGON, sizeof(CvSeq), sizeof(Point), + ci.data, npoints, &seq[i], &block[i] ); + } + + if( hierarchy.empty() || maxLevel == 0 ) + for( i = first; i < last; i++ ) + { + seq[i].h_next = i < last-1 ? &seq[i+1] : 0; + seq[i].h_prev = i > first ? &seq[i-1] : 0; + } + else + { + size_t count = last - first; + CV_Assert(hierarchy.total() == ncontours && hierarchy.type() == CV_32SC4 ); + const Vec4i* h = hierarchy.ptr(); + + if( count == ncontours ) + { + for( i = first; i < last; i++ ) + { + int h_next = h[i][0], h_prev = h[i][1], + v_next = h[i][2], v_prev = h[i][3]; + seq[i].h_next = (size_t)h_next < count ? &seq[h_next] : 0; + seq[i].h_prev = (size_t)h_prev < count ? &seq[h_prev] : 0; + seq[i].v_next = (size_t)v_next < count ? &seq[v_next] : 0; + seq[i].v_prev = (size_t)v_prev < count ? &seq[v_prev] : 0; + } + } + else + { + int child = h[first][2]; + if( child >= 0 ) + { + addChildContour(_contours, ncontours, h, child, seq, block); + seq[first].v_next = &seq[child]; + } + } + } + + cvDrawContours( &_cimage, &seq[first], color, color, contourIdx >= 0 ? + -maxLevel : maxLevel, thickness, lineType, offset ); +} + + +void cv::approxPolyDP( InputArray _curve, OutputArray _approxCurve, + double epsilon, bool closed ) +{ + Mat curve = _curve.getMat(); + int npoints = curve.checkVector(2), depth = curve.depth(); + CV_Assert( npoints >= 0 && (depth == CV_32S || depth == CV_32F)); + CvMat _ccurve = curve; + MemStorage storage(cvCreateMemStorage()); + CvSeq* result = cvApproxPoly(&_ccurve, sizeof(CvContour), storage, CV_POLY_APPROX_DP, epsilon, closed); + if( result->total > 0 ) + { + _approxCurve.create(result->total, 1, CV_MAKETYPE(curve.depth(), 2), -1, true); + cvCvtSeqToArray(result, _approxCurve.getMat().data ); + } +} + + +double cv::arcLength( InputArray _curve, bool closed ) +{ + Mat curve = _curve.getMat(); + CV_Assert(curve.checkVector(2) >= 0 && (curve.depth() == CV_32F || curve.depth() == CV_32S)); + CvMat _ccurve = curve; + return cvArcLength(&_ccurve, CV_WHOLE_SEQ, closed); +} + + +cv::Rect cv::boundingRect( InputArray _points ) +{ + Mat points = _points.getMat(); + CV_Assert(points.checkVector(2) >= 0 && (points.depth() == CV_32F || points.depth() == CV_32S)); + CvMat _cpoints = points; + return cvBoundingRect(&_cpoints, 0); +} + + +double cv::contourArea( InputArray _contour, bool oriented ) +{ + Mat contour = _contour.getMat(); + CV_Assert(contour.checkVector(2) >= 0 && (contour.depth() == CV_32F || contour.depth() == CV_32S)); + CvMat _ccontour = contour; + return cvContourArea(&_ccontour, CV_WHOLE_SEQ, oriented); +} + + +cv::RotatedRect cv::minAreaRect( InputArray _points ) +{ + Mat points = _points.getMat(); + CV_Assert(points.checkVector(2) >= 0 && (points.depth() == CV_32F || points.depth() == CV_32S)); + CvMat _cpoints = points; + return cvMinAreaRect2(&_cpoints, 0); +} + + +void cv::minEnclosingCircle( InputArray _points, + Point2f& center, float& radius ) +{ + Mat points = _points.getMat(); + CV_Assert(points.checkVector(2) >= 0 && (points.depth() == CV_32F || points.depth() == CV_32S)); + CvMat _cpoints = points; + cvMinEnclosingCircle( &_cpoints, (CvPoint2D32f*)¢er, &radius ); +} + + +double cv::matchShapes( InputArray _contour1, + InputArray _contour2, + int method, double parameter ) +{ + Mat contour1 = _contour1.getMat(), contour2 = _contour2.getMat(); + CV_Assert(contour1.checkVector(2) >= 0 && contour2.checkVector(2) >= 0 && + (contour1.depth() == CV_32F || contour1.depth() == CV_32S) && + contour1.depth() == contour2.depth()); + + CvMat c1 = Mat(contour1), c2 = Mat(contour2); + return cvMatchShapes(&c1, &c2, method, parameter); +} + + +void cv::convexHull( InputArray _points, OutputArray _hull, bool clockwise, bool returnPoints ) +{ + Mat points = _points.getMat(); + int nelems = points.checkVector(2), depth = points.depth(); + CV_Assert(nelems >= 0 && (depth == CV_32F || depth == CV_32S)); + + if( nelems == 0 ) + { + _hull.release(); + return; + } + + returnPoints = !_hull.fixedType() ? returnPoints : _hull.type() != CV_32S; + Mat hull(nelems, 1, returnPoints ? CV_MAKETYPE(depth, 2) : CV_32S); + CvMat _cpoints = points, _chull = hull; + cvConvexHull2(&_cpoints, &_chull, clockwise ? CV_CLOCKWISE : CV_COUNTER_CLOCKWISE, returnPoints); + _hull.create(_chull.rows, 1, hull.type(), -1, true); + Mat dhull = _hull.getMat(), shull(dhull.size(), dhull.type(), hull.data); + shull.copyTo(dhull); +} + +bool cv::isContourConvex( InputArray _contour ) +{ + Mat contour = _contour.getMat(); + CV_Assert(contour.checkVector(2) >= 0 && + (contour.depth() == CV_32F || contour.depth() == CV_32S)); + CvMat c = Mat(contour); + return cvCheckContourConvexity(&c) > 0; +} + +cv::RotatedRect cv::fitEllipse( InputArray _points ) +{ + Mat points = _points.getMat(); + CV_Assert(points.checkVector(2) >= 0 && + (points.depth() == CV_32F || points.depth() == CV_32S)); + CvMat _cpoints = points; + return cvFitEllipse2(&_cpoints); +} + + +void cv::fitLine( InputArray _points, OutputArray _line, int distType, + double param, double reps, double aeps ) +{ + Mat points = _points.getMat(); + bool is3d = points.checkVector(3) >= 0, is2d = is3d ? false : points.checkVector(2) >= 0; + + CV_Assert((is2d || is3d) && (points.depth() == CV_32F || points.depth() == CV_32S)); + CvMat _cpoints = points; + float line[6]; + cvFitLine(&_cpoints, distType, param, reps, aeps, &line[0]); + + _line.create(is2d ? 4 : 6, 1, CV_32F, -1, true); + Mat l = _line.getMat(); + CV_Assert( l.isContinuous() ); + memcpy( l.data, line, (is2d ? 4 : 6)*sizeof(line[0]) ); +} + + +double cv::pointPolygonTest( InputArray _contour, + Point2f pt, bool measureDist ) +{ + Mat contour = _contour.getMat(); + CV_Assert(contour.checkVector(2) >= 0 && + (contour.depth() == CV_32F || contour.depth() == CV_32S)); + CvMat c = Mat(contour); + return cvPointPolygonTest( &c, pt, measureDist ); +} + +/* End of file. */ diff --git a/opencv/imgproc/convhull.cpp b/opencv/imgproc/convhull.cpp new file mode 100644 index 0000000..145c55e --- /dev/null +++ b/opencv/imgproc/convhull.cpp @@ -0,0 +1,815 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +static int +icvSklansky_32s( CvPoint** array, int start, int end, int* stack, int nsign, int sign2 ) +{ + int incr = end > start ? 1 : -1; + /* prepare first triangle */ + int pprev = start, pcur = pprev + incr, pnext = pcur + incr; + int stacksize = 3; + + if( start == end || + (array[start]->x == array[end]->x && + array[start]->y == array[end]->y) ) + { + stack[0] = start; + return 1; + } + + stack[0] = pprev; + stack[1] = pcur; + stack[2] = pnext; + + end += incr; /* make end = afterend */ + + while( pnext != end ) + { + /* check the angle p1,p2,p3 */ + int cury = array[pcur]->y; + int nexty = array[pnext]->y; + int by = nexty - cury; + + if( CV_SIGN(by) != nsign ) + { + int ax = array[pcur]->x - array[pprev]->x; + int bx = array[pnext]->x - array[pcur]->x; + int ay = cury - array[pprev]->y; + int convexity = ay*bx - ax*by;/* if >0 then convex angle */ + + if( CV_SIGN(convexity) == sign2 && (ax != 0 || ay != 0) ) + { + pprev = pcur; + pcur = pnext; + pnext += incr; + stack[stacksize] = pnext; + stacksize++; + } + else + { + if( pprev == start ) + { + pcur = pnext; + stack[1] = pcur; + pnext += incr; + stack[2] = pnext; + } + else + { + stack[stacksize-2] = pnext; + pcur = pprev; + pprev = stack[stacksize-4]; + stacksize--; + } + } + } + else + { + pnext += incr; + stack[stacksize-1] = pnext; + } + } + + return --stacksize; +} + + +static int +icvSklansky_32f( CvPoint2D32f** array, int start, int end, int* stack, int nsign, int sign2 ) +{ + int incr = end > start ? 1 : -1; + /* prepare first triangle */ + int pprev = start, pcur = pprev + incr, pnext = pcur + incr; + int stacksize = 3; + + if( start == end || + (array[start]->x == array[end]->x && + array[start]->y == array[end]->y) ) + { + stack[0] = start; + return 1; + } + + stack[0] = pprev; + stack[1] = pcur; + stack[2] = pnext; + + end += incr; /* make end = afterend */ + + while( pnext != end ) + { + /* check the angle p1,p2,p3 */ + float cury = array[pcur]->y; + float nexty = array[pnext]->y; + float by = nexty - cury; + + if( CV_SIGN( by ) != nsign ) + { + float ax = array[pcur]->x - array[pprev]->x; + float bx = array[pnext]->x - array[pcur]->x; + float ay = cury - array[pprev]->y; + float convexity = ay*bx - ax*by;/* if >0 then convex angle */ + + if( CV_SIGN( convexity ) == sign2 && (ax != 0 || ay != 0) ) + { + pprev = pcur; + pcur = pnext; + pnext += incr; + stack[stacksize] = pnext; + stacksize++; + } + else + { + if( pprev == start ) + { + pcur = pnext; + stack[1] = pcur; + pnext += incr; + stack[2] = pnext; + + } + else + { + stack[stacksize-2] = pnext; + pcur = pprev; + pprev = stack[stacksize-4]; + stacksize--; + } + } + } + else + { + pnext += incr; + stack[stacksize-1] = pnext; + } + } + + return --stacksize; +} + +typedef int (*sklansky_func)( CvPoint** points, int start, int end, + int* stack, int sign, int sign2 ); + +#define cmp_pts( pt1, pt2 ) \ + ((pt1)->x < (pt2)->x || ((pt1)->x <= (pt2)->x && (pt1)->y < (pt2)->y)) +static CV_IMPLEMENT_QSORT( icvSortPointsByPointers_32s, CvPoint*, cmp_pts ) +static CV_IMPLEMENT_QSORT( icvSortPointsByPointers_32f, CvPoint2D32f*, cmp_pts ) + +static void +icvCalcAndWritePtIndices( CvPoint** pointer, int* stack, int start, int end, + CvSeq* ptseq, CvSeqWriter* writer ) +{ + int i, incr = start < end ? 1 : -1; + int idx, first_idx = ptseq->first->start_index; + + for( i = start; i != end; i += incr ) + { + CvPoint* ptr = (CvPoint*)pointer[stack[i]]; + CvSeqBlock* block = ptseq->first; + while( (unsigned)(idx = (int)(ptr - (CvPoint*)block->data)) >= (unsigned)block->count ) + { + block = block->next; + if( block == ptseq->first ) + CV_Error( CV_StsError, "Internal error" ); + } + idx += block->start_index - first_idx; + CV_WRITE_SEQ_ELEM( idx, *writer ); + } +} + + +CV_IMPL CvSeq* +cvConvexHull2( const CvArr* array, void* hull_storage, + int orientation, int return_points ) +{ + union { CvContour* c; CvSeq* s; } hull; + cv::AutoBuffer _pointer; + CvPoint** pointer; + CvPoint2D32f** pointerf = 0; + cv::AutoBuffer _stack; + int* stack; + + hull.s = 0; + + CvMat* mat = 0; + CvSeqReader reader; + CvSeqWriter writer; + CvContour contour_header; + union { CvContour c; CvSeq s; } hull_header; + CvSeqBlock block, hullblock; + CvSeq* ptseq = 0; + CvSeq* hullseq = 0; + int is_float; + int* t_stack; + int t_count; + int i, miny_ind = 0, maxy_ind = 0, total; + int hulltype; + int stop_idx; + sklansky_func sklansky; + + if( CV_IS_SEQ( array )) + { + ptseq = (CvSeq*)array; + if( !CV_IS_SEQ_POINT_SET( ptseq )) + CV_Error( CV_StsBadArg, "Unsupported sequence type" ); + if( hull_storage == 0 ) + hull_storage = ptseq->storage; + } + else + { + ptseq = cvPointSeqFromMat( CV_SEQ_KIND_GENERIC, array, &contour_header, &block ); + } + + if( CV_IS_STORAGE( hull_storage )) + { + if( return_points ) + { + hullseq = cvCreateSeq( + CV_SEQ_KIND_CURVE|CV_SEQ_ELTYPE(ptseq)| + CV_SEQ_FLAG_CLOSED|CV_SEQ_FLAG_CONVEX, + sizeof(CvContour), sizeof(CvPoint),(CvMemStorage*)hull_storage ); + } + else + { + hullseq = cvCreateSeq( + CV_SEQ_KIND_CURVE|CV_SEQ_ELTYPE_PPOINT| + CV_SEQ_FLAG_CLOSED|CV_SEQ_FLAG_CONVEX, + sizeof(CvContour), sizeof(CvPoint*), (CvMemStorage*)hull_storage ); + } + } + else + { + if( !CV_IS_MAT( hull_storage )) + CV_Error(CV_StsBadArg, "Destination must be valid memory storage or matrix"); + + mat = (CvMat*)hull_storage; + + if( (mat->cols != 1 && mat->rows != 1) || !CV_IS_MAT_CONT(mat->type)) + CV_Error( CV_StsBadArg, + "The hull matrix should be continuous and have a single row or a single column" ); + + if( mat->cols + mat->rows - 1 < ptseq->total ) + CV_Error( CV_StsBadSize, "The hull matrix size might be not enough to fit the hull" ); + + if( CV_MAT_TYPE(mat->type) != CV_SEQ_ELTYPE(ptseq) && + CV_MAT_TYPE(mat->type) != CV_32SC1 ) + CV_Error( CV_StsUnsupportedFormat, + "The hull matrix must have the same type as input or 32sC1 (integers)" ); + + hullseq = cvMakeSeqHeaderForArray( + CV_SEQ_KIND_CURVE|CV_MAT_TYPE(mat->type)|CV_SEQ_FLAG_CLOSED, + sizeof(contour_header), CV_ELEM_SIZE(mat->type), mat->data.ptr, + mat->cols + mat->rows - 1, &hull_header.s, &hullblock ); + + cvClearSeq( hullseq ); + } + + total = ptseq->total; + if( total == 0 ) + { + if( mat ) + CV_Error( CV_StsBadSize, + "Point sequence can not be empty if the output is matrix" ); + return hull.s; + } + + cvStartAppendToSeq( hullseq, &writer ); + + is_float = CV_SEQ_ELTYPE(ptseq) == CV_32FC2; + hulltype = CV_SEQ_ELTYPE(hullseq); + sklansky = !is_float ? (sklansky_func)icvSklansky_32s : + (sklansky_func)icvSklansky_32f; + + _pointer.allocate( ptseq->total ); + _stack.allocate( ptseq->total + 2); + pointer = _pointer; + pointerf = (CvPoint2D32f**)pointer; + stack = _stack; + + cvStartReadSeq( ptseq, &reader ); + + for( i = 0; i < total; i++ ) + { + pointer[i] = (CvPoint*)reader.ptr; + CV_NEXT_SEQ_ELEM( ptseq->elem_size, reader ); + } + + // sort the point set by x-coordinate, find min and max y + if( !is_float ) + { + icvSortPointsByPointers_32s( pointer, total, 0 ); + for( i = 1; i < total; i++ ) + { + int y = pointer[i]->y; + if( pointer[miny_ind]->y > y ) + miny_ind = i; + if( pointer[maxy_ind]->y < y ) + maxy_ind = i; + } + } + else + { + icvSortPointsByPointers_32f( pointerf, total, 0 ); + for( i = 1; i < total; i++ ) + { + float y = pointerf[i]->y; + if( pointerf[miny_ind]->y > y ) + miny_ind = i; + if( pointerf[maxy_ind]->y < y ) + maxy_ind = i; + } + } + + if( pointer[0]->x == pointer[total-1]->x && + pointer[0]->y == pointer[total-1]->y ) + { + if( hulltype == CV_SEQ_ELTYPE_PPOINT ) + { + CV_WRITE_SEQ_ELEM( pointer[0], writer ); + } + else if( hulltype == CV_SEQ_ELTYPE_INDEX ) + { + int index = 0; + CV_WRITE_SEQ_ELEM( index, writer ); + } + else + { + CvPoint pt = pointer[0][0]; + CV_WRITE_SEQ_ELEM( pt, writer ); + } + goto finish_hull; + } + + /*upper half */ + { + int *tl_stack = stack; + int tl_count = sklansky( pointer, 0, maxy_ind, tl_stack, -1, 1 ); + int *tr_stack = tl_stack + tl_count; + int tr_count = sklansky( pointer, ptseq->total - 1, maxy_ind, tr_stack, -1, -1 ); + + /* gather upper part of convex hull to output */ + if( orientation == CV_COUNTER_CLOCKWISE ) + { + CV_SWAP( tl_stack, tr_stack, t_stack ); + CV_SWAP( tl_count, tr_count, t_count ); + } + + if( hulltype == CV_SEQ_ELTYPE_PPOINT ) + { + for( i = 0; i < tl_count - 1; i++ ) + CV_WRITE_SEQ_ELEM( pointer[tl_stack[i]], writer ); + + for( i = tr_count - 1; i > 0; i-- ) + CV_WRITE_SEQ_ELEM( pointer[tr_stack[i]], writer ); + } + else if( hulltype == CV_SEQ_ELTYPE_INDEX ) + { + icvCalcAndWritePtIndices( pointer, tl_stack, 0, tl_count-1, ptseq, &writer ); + icvCalcAndWritePtIndices( pointer, tr_stack, tr_count-1, 0, ptseq, &writer ); + } + else + { + for( i = 0; i < tl_count - 1; i++ ) + CV_WRITE_SEQ_ELEM( pointer[tl_stack[i]][0], writer ); + + for( i = tr_count - 1; i > 0; i-- ) + CV_WRITE_SEQ_ELEM( pointer[tr_stack[i]][0], writer ); + } + stop_idx = tr_count > 2 ? tr_stack[1] : tl_count > 2 ? tl_stack[tl_count - 2] : -1; + } + + /* lower half */ + { + int *bl_stack = stack; + int bl_count = sklansky( pointer, 0, miny_ind, bl_stack, 1, -1 ); + int *br_stack = stack + bl_count; + int br_count = sklansky( pointer, ptseq->total - 1, miny_ind, br_stack, 1, 1 ); + + if( orientation != CV_COUNTER_CLOCKWISE ) + { + CV_SWAP( bl_stack, br_stack, t_stack ); + CV_SWAP( bl_count, br_count, t_count ); + } + + if( stop_idx >= 0 ) + { + int check_idx = bl_count > 2 ? bl_stack[1] : + bl_count + br_count > 2 ? br_stack[2-bl_count] : -1; + if( check_idx == stop_idx || (check_idx >= 0 && + pointer[check_idx]->x == pointer[stop_idx]->x && + pointer[check_idx]->y == pointer[stop_idx]->y) ) + { + /* if all the points lie on the same line, then + the bottom part of the convex hull is the mirrored top part + (except the exteme points).*/ + bl_count = MIN( bl_count, 2 ); + br_count = MIN( br_count, 2 ); + } + } + + if( hulltype == CV_SEQ_ELTYPE_PPOINT ) + { + for( i = 0; i < bl_count - 1; i++ ) + CV_WRITE_SEQ_ELEM( pointer[bl_stack[i]], writer ); + + for( i = br_count - 1; i > 0; i-- ) + CV_WRITE_SEQ_ELEM( pointer[br_stack[i]], writer ); + } + else if( hulltype == CV_SEQ_ELTYPE_INDEX ) + { + icvCalcAndWritePtIndices( pointer, bl_stack, 0, bl_count-1, ptseq, &writer ); + icvCalcAndWritePtIndices( pointer, br_stack, br_count-1, 0, ptseq, &writer ); + } + else + { + for( i = 0; i < bl_count - 1; i++ ) + CV_WRITE_SEQ_ELEM( pointer[bl_stack[i]][0], writer ); + + for( i = br_count - 1; i > 0; i-- ) + CV_WRITE_SEQ_ELEM( pointer[br_stack[i]][0], writer ); + } + } + +finish_hull: + cvEndWriteSeq( &writer ); + + if( mat ) + { + if( mat->rows > mat->cols ) + mat->rows = hullseq->total; + else + mat->cols = hullseq->total; + } + else + { + hull.s = hullseq; + hull.c->rect = cvBoundingRect( ptseq, + ptseq->header_size < (int)sizeof(CvContour) || + &ptseq->flags == &contour_header.flags ); + + /*if( ptseq != (CvSeq*)&contour_header ) + hullseq->v_prev = ptseq;*/ + } + + return hull.s; +} + + +/* contour must be a simple polygon */ +/* it must have more than 3 points */ +CV_IMPL CvSeq* cvConvexityDefects( const CvArr* array, + const CvArr* hullarray, + CvMemStorage* storage ) +{ + CvSeq* defects = 0; + + int i, index; + CvPoint* hull_cur; + + /* is orientation of hull different from contour one */ + int rev_orientation; + + CvContour contour_header; + union { CvContour c; CvSeq s; } hull_header; + CvSeqBlock block, hullblock; + CvSeq *ptseq = (CvSeq*)array, *hull = (CvSeq*)hullarray; + + CvSeqReader hull_reader; + CvSeqReader ptseq_reader; + CvSeqWriter writer; + int is_index; + + if( CV_IS_SEQ( ptseq )) + { + if( !CV_IS_SEQ_POINT_SET( ptseq )) + CV_Error( CV_StsUnsupportedFormat, + "Input sequence is not a sequence of points" ); + if( !storage ) + storage = ptseq->storage; + } + else + { + ptseq = cvPointSeqFromMat( CV_SEQ_KIND_GENERIC, array, &contour_header, &block ); + } + + if( CV_SEQ_ELTYPE( ptseq ) != CV_32SC2 ) + CV_Error( CV_StsUnsupportedFormat, "Floating-point coordinates are not supported here" ); + + if( CV_IS_SEQ( hull )) + { + int hulltype = CV_SEQ_ELTYPE( hull ); + if( hulltype != CV_SEQ_ELTYPE_PPOINT && hulltype != CV_SEQ_ELTYPE_INDEX ) + CV_Error( CV_StsUnsupportedFormat, + "Convex hull must represented as a sequence " + "of indices or sequence of pointers" ); + if( !storage ) + storage = hull->storage; + } + else + { + CvMat* mat = (CvMat*)hull; + + if( !CV_IS_MAT( hull )) + CV_Error(CV_StsBadArg, "Convex hull is neither sequence nor matrix"); + + if( (mat->cols != 1 && mat->rows != 1) || + !CV_IS_MAT_CONT(mat->type) || CV_MAT_TYPE(mat->type) != CV_32SC1 ) + CV_Error( CV_StsBadArg, + "The matrix should be 1-dimensional and continuous array of int's" ); + + if( mat->cols + mat->rows - 1 > ptseq->total ) + CV_Error( CV_StsBadSize, "Convex hull is larger than the point sequence" ); + + hull = cvMakeSeqHeaderForArray( + CV_SEQ_KIND_CURVE|CV_MAT_TYPE(mat->type)|CV_SEQ_FLAG_CLOSED, + sizeof(CvContour), CV_ELEM_SIZE(mat->type), mat->data.ptr, + mat->cols + mat->rows - 1, &hull_header.s, &hullblock ); + } + + is_index = CV_SEQ_ELTYPE(hull) == CV_SEQ_ELTYPE_INDEX; + + if( !storage ) + CV_Error( CV_StsNullPtr, "NULL storage pointer" ); + + defects = cvCreateSeq( CV_SEQ_KIND_GENERIC, sizeof(CvSeq), sizeof(CvConvexityDefect), storage ); + + if( ptseq->total < 4 || hull->total < 3) + { + //CV_ERROR( CV_StsBadSize, + // "point seq size must be >= 4, convex hull size must be >= 3" ); + return defects; + } + + /* recognize co-orientation of ptseq and its hull */ + { + int sign = 0; + int index1, index2, index3; + + if( !is_index ) + { + CvPoint* pos = *CV_SEQ_ELEM( hull, CvPoint*, 0 ); + index1 = cvSeqElemIdx( ptseq, pos ); + + pos = *CV_SEQ_ELEM( hull, CvPoint*, 1 ); + index2 = cvSeqElemIdx( ptseq, pos ); + + pos = *CV_SEQ_ELEM( hull, CvPoint*, 2 ); + index3 = cvSeqElemIdx( ptseq, pos ); + } + else + { + index1 = *CV_SEQ_ELEM( hull, int, 0 ); + index2 = *CV_SEQ_ELEM( hull, int, 1 ); + index3 = *CV_SEQ_ELEM( hull, int, 2 ); + } + + sign += (index2 > index1) ? 1 : 0; + sign += (index3 > index2) ? 1 : 0; + sign += (index1 > index3) ? 1 : 0; + + rev_orientation = (sign == 2) ? 0 : 1; + } + + cvStartReadSeq( ptseq, &ptseq_reader, 0 ); + cvStartReadSeq( hull, &hull_reader, rev_orientation ); + + if( !is_index ) + { + hull_cur = *(CvPoint**)hull_reader.prev_elem; + index = cvSeqElemIdx( ptseq, (char*)hull_cur, 0 ); + } + else + { + index = *(int*)hull_reader.prev_elem; + hull_cur = CV_GET_SEQ_ELEM( CvPoint, ptseq, index ); + } + cvSetSeqReaderPos( &ptseq_reader, index ); + cvStartAppendToSeq( defects, &writer ); + + /* cycle through ptseq and hull with computing defects */ + for( i = 0; i < hull->total; i++ ) + { + CvConvexityDefect defect; + int is_defect = 0; + double dx0, dy0; + double depth = 0, scale; + CvPoint* hull_next; + + if( !is_index ) + hull_next = *(CvPoint**)hull_reader.ptr; + else + { + int t = *(int*)hull_reader.ptr; + hull_next = CV_GET_SEQ_ELEM( CvPoint, ptseq, t ); + } + + dx0 = (double)hull_next->x - (double)hull_cur->x; + dy0 = (double)hull_next->y - (double)hull_cur->y; + assert( dx0 != 0 || dy0 != 0 ); + scale = 1./sqrt(dx0*dx0 + dy0*dy0); + + defect.start = hull_cur; + defect.end = hull_next; + + for(;;) + { + /* go through ptseq to achieve next hull point */ + CV_NEXT_SEQ_ELEM( sizeof(CvPoint), ptseq_reader ); + + if( ptseq_reader.ptr == (schar*)hull_next ) + break; + else + { + CvPoint* cur = (CvPoint*)ptseq_reader.ptr; + + /* compute distance from current point to hull edge */ + double dx = (double)cur->x - (double)hull_cur->x; + double dy = (double)cur->y - (double)hull_cur->y; + + /* compute depth */ + double dist = fabs(-dy0*dx + dx0*dy) * scale; + + if( dist > depth ) + { + depth = dist; + defect.depth_point = cur; + defect.depth = (float)depth; + is_defect = 1; + } + } + } + if( is_defect ) + { + CV_WRITE_SEQ_ELEM( defect, writer ); + } + + hull_cur = hull_next; + if( rev_orientation ) + { + CV_PREV_SEQ_ELEM( hull->elem_size, hull_reader ); + } + else + { + CV_NEXT_SEQ_ELEM( hull->elem_size, hull_reader ); + } + } + + return cvEndWriteSeq( &writer ); +} + + +CV_IMPL int +cvCheckContourConvexity( const CvArr* array ) +{ + int flag = -1; + + int i; + int orientation = 0; + CvSeqReader reader; + CvContour contour_header; + CvSeqBlock block; + CvSeq* contour = (CvSeq*)array; + + if( CV_IS_SEQ(contour) ) + { + if( !CV_IS_SEQ_POINT_SET(contour)) + CV_Error( CV_StsUnsupportedFormat, + "Input sequence must be polygon (closed 2d curve)" ); + } + else + { + contour = cvPointSeqFromMat(CV_SEQ_KIND_CURVE|CV_SEQ_FLAG_CLOSED, array, &contour_header, &block ); + } + + if( contour->total == 0 ) + return -1; + + cvStartReadSeq( contour, &reader, 0 ); + flag = 1; + + if( CV_SEQ_ELTYPE( contour ) == CV_32SC2 ) + { + CvPoint *prev_pt = (CvPoint*)reader.prev_elem; + CvPoint *cur_pt = (CvPoint*)reader.ptr; + + int dx0 = cur_pt->x - prev_pt->x; + int dy0 = cur_pt->y - prev_pt->y; + + for( i = 0; i < contour->total; i++ ) + { + int dxdy0, dydx0; + int dx, dy; + + /*int orient; */ + CV_NEXT_SEQ_ELEM( sizeof(CvPoint), reader ); + prev_pt = cur_pt; + cur_pt = (CvPoint *) reader.ptr; + + dx = cur_pt->x - prev_pt->x; + dy = cur_pt->y - prev_pt->y; + dxdy0 = dx * dy0; + dydx0 = dy * dx0; + + /* find orientation */ + /*orient = -dy0 * dx + dx0 * dy; + orientation |= (orient > 0) ? 1 : 2; + */ + orientation |= (dydx0 > dxdy0) ? 1 : ((dydx0 < dxdy0) ? 2 : 3); + + if( orientation == 3 ) + { + flag = 0; + break; + } + + dx0 = dx; + dy0 = dy; + } + } + else + { + CV_Assert( CV_SEQ_ELTYPE(contour) == CV_32FC2 ); + + CvPoint2D32f *prev_pt = (CvPoint2D32f*)reader.prev_elem; + CvPoint2D32f *cur_pt = (CvPoint2D32f*)reader.ptr; + + float dx0 = cur_pt->x - prev_pt->x; + float dy0 = cur_pt->y - prev_pt->y; + + for( i = 0; i < contour->total; i++ ) + { + float dxdy0, dydx0; + float dx, dy; + + /*int orient; */ + CV_NEXT_SEQ_ELEM( sizeof(CvPoint2D32f), reader ); + prev_pt = cur_pt; + cur_pt = (CvPoint2D32f*) reader.ptr; + + dx = cur_pt->x - prev_pt->x; + dy = cur_pt->y - prev_pt->y; + dxdy0 = dx * dy0; + dydx0 = dy * dx0; + + /* find orientation */ + /*orient = -dy0 * dx + dx0 * dy; + orientation |= (orient > 0) ? 1 : 2; + */ + orientation |= (dydx0 > dxdy0) ? 1 : ((dydx0 < dxdy0) ? 2 : 3); + + if( orientation == 3 ) + { + flag = 0; + break; + } + + dx0 = dx; + dy0 = dy; + } + } + + return flag; +} + + +/* End of file. */ diff --git a/opencv/imgproc/corner.cpp b/opencv/imgproc/corner.cpp new file mode 100644 index 0000000..76b0ece --- /dev/null +++ b/opencv/imgproc/corner.cpp @@ -0,0 +1,413 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include + + +namespace cv +{ + +static void +calcMinEigenVal( const Mat& _cov, Mat& _dst ) +{ + int i, j; + Size size = _cov.size(); +#if CV_SSE + volatile bool simd = checkHardwareSupport(CV_CPU_SSE); +#endif + + if( _cov.isContinuous() && _dst.isContinuous() ) + { + size.width *= size.height; + size.height = 1; + } + + for( i = 0; i < size.height; i++ ) + { + const float* cov = (const float*)(_cov.data + _cov.step*i); + float* dst = (float*)(_dst.data + _dst.step*i); + j = 0; + #if CV_SSE + if( simd ) + { + __m128 half = _mm_set1_ps(0.5f); + for( ; j <= size.width - 5; j += 4 ) + { + __m128 t0 = _mm_loadu_ps(cov + j*3); // a0 b0 c0 x + __m128 t1 = _mm_loadu_ps(cov + j*3 + 3); // a1 b1 c1 x + __m128 t2 = _mm_loadu_ps(cov + j*3 + 6); // a2 b2 c2 x + __m128 t3 = _mm_loadu_ps(cov + j*3 + 9); // a3 b3 c3 x + __m128 a, b, c, t; + t = _mm_unpacklo_ps(t0, t1); // a0 a1 b0 b1 + c = _mm_unpackhi_ps(t0, t1); // c0 c1 x x + b = _mm_unpacklo_ps(t2, t3); // a2 a3 b2 b3 + c = _mm_movelh_ps(c, _mm_unpackhi_ps(t2, t3)); // c0 c1 c2 c3 + a = _mm_movelh_ps(t, b); + b = _mm_movehl_ps(b, t); + a = _mm_mul_ps(a, half); + c = _mm_mul_ps(c, half); + t = _mm_sub_ps(a, c); + t = _mm_add_ps(_mm_mul_ps(t, t), _mm_mul_ps(b,b)); + a = _mm_sub_ps(_mm_add_ps(a, c), _mm_sqrt_ps(t)); + _mm_storeu_ps(dst + j, a); + } + } + #endif + for( ; j < size.width; j++ ) + { + float a = cov[j*3]*0.5f; + float b = cov[j*3+1]; + float c = cov[j*3+2]*0.5f; + dst[j] = (float)((a + c) - std::sqrt((a - c)*(a - c) + b*b)); + } + } +} + + +static void +calcHarris( const Mat& _cov, Mat& _dst, double k ) +{ + int i, j; + Size size = _cov.size(); +#if CV_SSE + volatile bool simd = checkHardwareSupport(CV_CPU_SSE); +#endif + + if( _cov.isContinuous() && _dst.isContinuous() ) + { + size.width *= size.height; + size.height = 1; + } + + for( i = 0; i < size.height; i++ ) + { + const float* cov = (const float*)(_cov.data + _cov.step*i); + float* dst = (float*)(_dst.data + _dst.step*i); + j = 0; + + #if CV_SSE + if( simd ) + { + __m128 k4 = _mm_set1_ps((float)k); + for( ; j <= size.width - 5; j += 4 ) + { + __m128 t0 = _mm_loadu_ps(cov + j*3); // a0 b0 c0 x + __m128 t1 = _mm_loadu_ps(cov + j*3 + 3); // a1 b1 c1 x + __m128 t2 = _mm_loadu_ps(cov + j*3 + 6); // a2 b2 c2 x + __m128 t3 = _mm_loadu_ps(cov + j*3 + 9); // a3 b3 c3 x + __m128 a, b, c, t; + t = _mm_unpacklo_ps(t0, t1); // a0 a1 b0 b1 + c = _mm_unpackhi_ps(t0, t1); // c0 c1 x x + b = _mm_unpacklo_ps(t2, t3); // a2 a3 b2 b3 + c = _mm_movelh_ps(c, _mm_unpackhi_ps(t2, t3)); // c0 c1 c2 c3 + a = _mm_movelh_ps(t, b); + b = _mm_movehl_ps(b, t); + t = _mm_add_ps(a, c); + a = _mm_sub_ps(_mm_mul_ps(a, c), _mm_mul_ps(b, b)); + t = _mm_mul_ps(_mm_mul_ps(k4, t), t); + a = _mm_sub_ps(a, t); + _mm_storeu_ps(dst + j, a); + } + } + #endif + + for( ; j < size.width; j++ ) + { + float a = cov[j*3]; + float b = cov[j*3+1]; + float c = cov[j*3+2]; + dst[j] = (float)(a*c - b*b - k*(a + c)*(a + c)); + } + } +} + + +static void +calcEigenValsVecs( const Mat& _cov, Mat& _dst ) +{ + int i, j; + Size size = _cov.size(); + if( _cov.isContinuous() && _dst.isContinuous() ) + { + size.width *= size.height; + size.height = 1; + } + + for( i = 0; i < size.height; i++ ) + { + const float* cov = (const float*)(_cov.data + _cov.step*i); + float* dst = (float*)(_dst.data + _dst.step*i); + + for( j = 0; j < size.width; j++ ) + { + double a = cov[j*3]; + double b = cov[j*3+1]; + double c = cov[j*3+2]; + + double u = (a + c)*0.5; + double v = std::sqrt((a - c)*(a - c)*0.25 + b*b); + double l1 = u + v; + double l2 = u - v; + + double x = b; + double y = l1 - a; + double e = fabs(x); + + if( e + fabs(y) < 1e-4 ) + { + y = b; + x = l1 - c; + e = fabs(x); + if( e + fabs(y) < 1e-4 ) + { + e = 1./(e + fabs(y) + FLT_EPSILON); + x *= e, y *= e; + } + } + + double d = 1./std::sqrt(x*x + y*y + DBL_EPSILON); + dst[6*j] = (float)l1; + dst[6*j + 2] = (float)(x*d); + dst[6*j + 3] = (float)(y*d); + + x = b; + y = l2 - a; + e = fabs(x); + + if( e + fabs(y) < 1e-4 ) + { + y = b; + x = l2 - c; + e = fabs(x); + if( e + fabs(y) < 1e-4 ) + { + e = 1./(e + fabs(y) + FLT_EPSILON); + x *= e, y *= e; + } + } + + d = 1./std::sqrt(x*x + y*y + DBL_EPSILON); + dst[6*j + 1] = (float)l2; + dst[6*j + 4] = (float)(x*d); + dst[6*j + 5] = (float)(y*d); + } + } +} + + +enum { MINEIGENVAL=0, HARRIS=1, EIGENVALSVECS=2 }; + + +static void +cornerEigenValsVecs( const Mat& src, Mat& eigenv, int block_size, + int aperture_size, int op_type, double k=0., + int borderType=BORDER_DEFAULT ) +{ + int depth = src.depth(); + double scale = (double)(1 << ((aperture_size > 0 ? aperture_size : 3) - 1)) * block_size; + if( aperture_size < 0 ) + scale *= 2.; + if( depth == CV_8U ) + scale *= 255.; + scale = 1./scale; + + CV_Assert( src.type() == CV_8UC1 || src.type() == CV_32FC1 ); + + Mat Dx, Dy; + if( aperture_size > 0 ) + { + Sobel( src, Dx, CV_32F, 1, 0, aperture_size, scale, 0, borderType ); + Sobel( src, Dy, CV_32F, 0, 1, aperture_size, scale, 0, borderType ); + } + else + { + Scharr( src, Dx, CV_32F, 1, 0, scale, 0, borderType ); + Scharr( src, Dy, CV_32F, 0, 1, scale, 0, borderType ); + } + + Size size = src.size(); + Mat cov( size, CV_32FC3 ); + int i, j; + + for( i = 0; i < size.height; i++ ) + { + float* cov_data = (float*)(cov.data + i*cov.step); + const float* dxdata = (const float*)(Dx.data + i*Dx.step); + const float* dydata = (const float*)(Dy.data + i*Dy.step); + + for( j = 0; j < size.width; j++ ) + { + float dx = dxdata[j]; + float dy = dydata[j]; + + cov_data[j*3] = dx*dx; + cov_data[j*3+1] = dx*dy; + cov_data[j*3+2] = dy*dy; + } + } + + boxFilter(cov, cov, cov.depth(), Size(block_size, block_size), + Point(-1,-1), false, borderType ); + + if( op_type == MINEIGENVAL ) + calcMinEigenVal( cov, eigenv ); + else if( op_type == HARRIS ) + calcHarris( cov, eigenv, k ); + else if( op_type == EIGENVALSVECS ) + calcEigenValsVecs( cov, eigenv ); +} + +} + +void cv::cornerMinEigenVal( InputArray _src, OutputArray _dst, int blockSize, int ksize, int borderType ) +{ + Mat src = _src.getMat(); + _dst.create( src.size(), CV_32F ); + Mat dst = _dst.getMat(); + cornerEigenValsVecs( src, dst, blockSize, ksize, MINEIGENVAL, 0, borderType ); +} + + +void cv::cornerHarris( InputArray _src, OutputArray _dst, int blockSize, int ksize, double k, int borderType ) +{ + Mat src = _src.getMat(); + _dst.create( src.size(), CV_32F ); + Mat dst = _dst.getMat(); + cornerEigenValsVecs( src, dst, blockSize, ksize, HARRIS, k, borderType ); +} + + +void cv::cornerEigenValsAndVecs( InputArray _src, OutputArray _dst, int blockSize, int ksize, int borderType ) +{ + Mat src = _src.getMat(); + Size dsz = _dst.size(); + int dtype = _dst.type(); + + if( dsz.height != src.rows || dsz.width*CV_MAT_CN(dtype) != src.cols*6 || CV_MAT_DEPTH(dtype) != CV_32F ) + _dst.create( src.size(), CV_32FC(6) ); + Mat dst = _dst.getMat(); + cornerEigenValsVecs( src, dst, blockSize, ksize, EIGENVALSVECS, 0, borderType ); +} + + +void cv::preCornerDetect( InputArray _src, OutputArray _dst, int ksize, int borderType ) +{ + Mat Dx, Dy, D2x, D2y, Dxy, src = _src.getMat(); + + CV_Assert( src.type() == CV_8UC1 || src.type() == CV_32FC1 ); + _dst.create( src.size(), CV_32F ); + Mat dst = _dst.getMat(); + + Sobel( src, Dx, CV_32F, 1, 0, ksize, 1, 0, borderType ); + Sobel( src, Dy, CV_32F, 0, 1, ksize, 1, 0, borderType ); + Sobel( src, D2x, CV_32F, 2, 0, ksize, 1, 0, borderType ); + Sobel( src, D2y, CV_32F, 0, 2, ksize, 1, 0, borderType ); + Sobel( src, Dxy, CV_32F, 1, 1, ksize, 1, 0, borderType ); + + double factor = 1 << (ksize - 1); + if( src.depth() == CV_8U ) + factor *= 255; + factor = 1./(factor * factor * factor); + + Size size = src.size(); + int i, j; + for( i = 0; i < size.height; i++ ) + { + float* dstdata = (float*)(dst.data + i*dst.step); + const float* dxdata = (const float*)(Dx.data + i*Dx.step); + const float* dydata = (const float*)(Dy.data + i*Dy.step); + const float* d2xdata = (const float*)(D2x.data + i*D2x.step); + const float* d2ydata = (const float*)(D2y.data + i*D2y.step); + const float* dxydata = (const float*)(Dxy.data + i*Dxy.step); + + for( j = 0; j < size.width; j++ ) + { + float dx = dxdata[j]; + float dy = dydata[j]; + dstdata[j] = (float)(factor*(dx*dx*d2ydata[j] + dy*dy*d2xdata[j] - 2*dx*dy*dxydata[j])); + } + } +} + +CV_IMPL void +cvCornerMinEigenVal( const CvArr* srcarr, CvArr* dstarr, + int block_size, int aperture_size ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + + CV_Assert( src.size() == dst.size() && dst.type() == CV_32FC1 ); + cv::cornerMinEigenVal( src, dst, block_size, aperture_size, cv::BORDER_REPLICATE ); +} + +CV_IMPL void +cvCornerHarris( const CvArr* srcarr, CvArr* dstarr, + int block_size, int aperture_size, double k ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + + CV_Assert( src.size() == dst.size() && dst.type() == CV_32FC1 ); + cv::cornerHarris( src, dst, block_size, aperture_size, k, cv::BORDER_REPLICATE ); +} + + +CV_IMPL void +cvCornerEigenValsAndVecs( const void* srcarr, void* dstarr, + int block_size, int aperture_size ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + + CV_Assert( src.rows == dst.rows && src.cols*6 == dst.cols*dst.channels() && dst.depth() == CV_32F ); + cv::cornerEigenValsAndVecs( src, dst, block_size, aperture_size, cv::BORDER_REPLICATE ); +} + + +CV_IMPL void +cvPreCornerDetect( const void* srcarr, void* dstarr, int aperture_size ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + + CV_Assert( src.size() == dst.size() && dst.type() == CV_32FC1 ); + cv::preCornerDetect( src, dst, aperture_size, cv::BORDER_REPLICATE ); +} + +/* End of file */ diff --git a/opencv/imgproc/cornersubpix.cpp b/opencv/imgproc/cornersubpix.cpp new file mode 100644 index 0000000..2e18702 --- /dev/null +++ b/opencv/imgproc/cornersubpix.cpp @@ -0,0 +1,271 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" + +CV_IMPL void +cvFindCornerSubPix( const void* srcarr, CvPoint2D32f* corners, + int count, CvSize win, CvSize zeroZone, + CvTermCriteria criteria ) +{ + cv::AutoBuffer buffer; + + const int MAX_ITERS = 100; + const float drv_x[] = { -1.f, 0.f, 1.f }; + const float drv_y[] = { 0.f, 0.5f, 0.f }; + float *maskX; + float *maskY; + float *mask; + float *src_buffer; + float *gx_buffer; + float *gy_buffer; + int win_w = win.width * 2 + 1, win_h = win.height * 2 + 1; + int win_rect_size = (win_w + 4) * (win_h + 4); + double coeff; + CvSize size, src_buf_size; + int i, j, k, pt_i; + int max_iters = 10; + double eps = 0; + + CvMat stub, *src = (CvMat*)srcarr; + src = cvGetMat( srcarr, &stub ); + + if( CV_MAT_TYPE( src->type ) != CV_8UC1 ) + CV_Error( CV_StsBadMask, "" ); + + if( !corners ) + CV_Error( CV_StsNullPtr, "" ); + + if( count < 0 ) + CV_Error( CV_StsBadSize, "" ); + + if( count == 0 ) + return; + + if( win.width <= 0 || win.height <= 0 ) + CV_Error( CV_StsBadSize, "" ); + + size = cvGetMatSize( src ); + + if( size.width < win_w + 4 || size.height < win_h + 4 ) + CV_Error( CV_StsBadSize, "" ); + + /* initialize variables, controlling loop termination */ + switch( criteria.type ) + { + case CV_TERMCRIT_ITER: + eps = 0.f; + max_iters = criteria.max_iter; + break; + case CV_TERMCRIT_EPS: + eps = criteria.epsilon; + max_iters = MAX_ITERS; + break; + case CV_TERMCRIT_ITER | CV_TERMCRIT_EPS: + eps = criteria.epsilon; + max_iters = criteria.max_iter; + break; + default: + assert( 0 ); + CV_Error( CV_StsBadFlag, "" ); + } + + eps = MAX( eps, 0 ); + eps *= eps; /* use square of error in comparsion operations. */ + + max_iters = MAX( max_iters, 1 ); + max_iters = MIN( max_iters, MAX_ITERS ); + + buffer.allocate( win_rect_size * 5 + win_w + win_h + 32 ); + + /* assign pointers */ + maskX = buffer; + maskY = maskX + win_w + 4; + mask = maskY + win_h + 4; + src_buffer = mask + win_w * win_h; + gx_buffer = src_buffer + win_rect_size; + gy_buffer = gx_buffer + win_rect_size; + + coeff = 1. / (win.width * win.width); + + /* calculate mask */ + for( i = -win.width, k = 0; i <= win.width; i++, k++ ) + { + maskX[k] = (float)exp( -i * i * coeff ); + } + + if( win.width == win.height ) + { + maskY = maskX; + } + else + { + coeff = 1. / (win.height * win.height); + for( i = -win.height, k = 0; i <= win.height; i++, k++ ) + { + maskY[k] = (float) exp( -i * i * coeff ); + } + } + + for( i = 0; i < win_h; i++ ) + { + for( j = 0; j < win_w; j++ ) + { + mask[i * win_w + j] = maskX[j] * maskY[i]; + } + } + + + /* make zero_zone */ + if( zeroZone.width >= 0 && zeroZone.height >= 0 && + zeroZone.width * 2 + 1 < win_w && zeroZone.height * 2 + 1 < win_h ) + { + for( i = win.height - zeroZone.height; i <= win.height + zeroZone.height; i++ ) + { + for( j = win.width - zeroZone.width; j <= win.width + zeroZone.width; j++ ) + { + mask[i * win_w + j] = 0; + } + } + } + + /* set sizes of image rectangles, used in convolutions */ + src_buf_size.width = win_w + 2; + src_buf_size.height = win_h + 2; + + /* do optimization loop for all the points */ + for( pt_i = 0; pt_i < count; pt_i++ ) + { + CvPoint2D32f cT = corners[pt_i], cI = cT; + int iter = 0; + double err; + + do + { + CvPoint2D32f cI2; + double a, b, c, bb1, bb2; + + IPPI_CALL( icvGetRectSubPix_8u32f_C1R( (uchar*)src->data.ptr, src->step, size, + src_buffer, (win_w + 2) * sizeof( src_buffer[0] ), + cvSize( win_w + 2, win_h + 2 ), cI )); + + /* calc derivatives */ + icvSepConvSmall3_32f( src_buffer, src_buf_size.width * sizeof(src_buffer[0]), + gx_buffer, win_w * sizeof(gx_buffer[0]), + src_buf_size, drv_x, drv_y, buffer ); + + icvSepConvSmall3_32f( src_buffer, src_buf_size.width * sizeof(src_buffer[0]), + gy_buffer, win_w * sizeof(gy_buffer[0]), + src_buf_size, drv_y, drv_x, buffer ); + + a = b = c = bb1 = bb2 = 0; + + /* process gradient */ + for( i = 0, k = 0; i < win_h; i++ ) + { + double py = i - win.height; + + for( j = 0; j < win_w; j++, k++ ) + { + double m = mask[k]; + double tgx = gx_buffer[k]; + double tgy = gy_buffer[k]; + double gxx = tgx * tgx * m; + double gxy = tgx * tgy * m; + double gyy = tgy * tgy * m; + double px = j - win.width; + + a += gxx; + b += gxy; + c += gyy; + + bb1 += gxx * px + gxy * py; + bb2 += gxy * px + gyy * py; + } + } + + { + double A[4]; + double InvA[4]; + CvMat matA, matInvA; + + A[0] = a; + A[1] = A[2] = b; + A[3] = c; + + cvInitMatHeader( &matA, 2, 2, CV_64F, A ); + cvInitMatHeader( &matInvA, 2, 2, CV_64FC1, InvA ); + + cvInvert( &matA, &matInvA, CV_SVD ); + cI2.x = (float)(cI.x + InvA[0]*bb1 + InvA[1]*bb2); + cI2.y = (float)(cI.y + InvA[2]*bb1 + InvA[3]*bb2); + } + + err = (cI2.x - cI.x) * (cI2.x - cI.x) + (cI2.y - cI.y) * (cI2.y - cI.y); + cI = cI2; + } + while( ++iter < max_iters && err > eps ); + + /* if new point is too far from initial, it means poor convergence. + leave initial point as the result */ + if( fabs( cI.x - cT.x ) > win.width || fabs( cI.y - cT.y ) > win.height ) + { + cI = cT; + } + + corners[pt_i] = cI; /* store result */ + } +} + +void cv::cornerSubPix( InputArray _image, InputOutputArray _corners, + Size winSize, Size zeroZone, + TermCriteria criteria ) +{ + Mat corners = _corners.getMat(); + int ncorners = corners.checkVector(2); + CV_Assert( ncorners >= 0 && corners.depth() == CV_32F ); + Mat image = _image.getMat(); + CvMat c_image = image; + + cvFindCornerSubPix( &c_image, (CvPoint2D32f*)corners.data, ncorners, + winSize, zeroZone, criteria ); +} + +/* End of file. */ diff --git a/opencv/imgproc/deriv.cpp b/opencv/imgproc/deriv.cpp new file mode 100644 index 0000000..fef69f0 --- /dev/null +++ b/opencv/imgproc/deriv.cpp @@ -0,0 +1,629 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) +static IppStatus sts = ippInit(); +#endif +/****************************************************************************************/ + +/* lightweight convolution with 3x3 kernel */ +void icvSepConvSmall3_32f( float* src, int src_step, float* dst, int dst_step, + CvSize src_size, const float* kx, const float* ky, float* buffer ) +{ + int dst_width, buffer_step = 0; + int x, y; + + assert( src && dst && src_size.width > 2 && src_size.height > 2 && + (src_step & 3) == 0 && (dst_step & 3) == 0 && + (kx || ky) && (buffer || !kx || !ky)); + + src_step /= sizeof(src[0]); + dst_step /= sizeof(dst[0]); + + dst_width = src_size.width - 2; + + if( !kx ) + { + /* set vars, so that vertical convolution + will write results into destination ROI and + horizontal convolution won't run */ + src_size.width = dst_width; + buffer_step = dst_step; + buffer = dst; + dst_width = 0; + } + + assert( src_step >= src_size.width && dst_step >= dst_width ); + + src_size.height -= 3; + if( !ky ) + { + /* set vars, so that vertical convolution won't run and + horizontal convolution will write results into destination ROI */ + src_size.height += 3; + buffer_step = src_step; + buffer = src; + src_size.width = 0; + } + + for( y = 0; y <= src_size.height; y++, src += src_step, + dst += dst_step, + buffer += buffer_step ) + { + float* src2 = src + src_step; + float* src3 = src + src_step*2; + for( x = 0; x < src_size.width; x++ ) + { + buffer[x] = (float)(ky[0]*src[x] + ky[1]*src2[x] + ky[2]*src3[x]); + } + + for( x = 0; x < dst_width; x++ ) + { + dst[x] = (float)(kx[0]*buffer[x] + kx[1]*buffer[x+1] + kx[2]*buffer[x+2]); + } + } +} + + +/****************************************************************************************\ + Sobel & Scharr Derivative Filters +\****************************************************************************************/ + +namespace cv +{ + +static void getScharrKernels( OutputArray _kx, OutputArray _ky, + int dx, int dy, bool normalize, int ktype ) +{ + const int ksize = 3; + + CV_Assert( ktype == CV_32F || ktype == CV_64F ); + _kx.create(ksize, 1, ktype, -1, true); + _ky.create(ksize, 1, ktype, -1, true); + Mat kx = _kx.getMat(); + Mat ky = _ky.getMat(); + + CV_Assert( dx >= 0 && dy >= 0 && dx+dy == 1 ); + + for( int k = 0; k < 2; k++ ) + { + Mat* kernel = k == 0 ? &kx : &ky; + int order = k == 0 ? dx : dy; + int kerI[3]; + + if( order == 0 ) + kerI[0] = 3, kerI[1] = 10, kerI[2] = 3; + else if( order == 1 ) + kerI[0] = -1, kerI[1] = 0, kerI[2] = 1; + + Mat temp(kernel->rows, kernel->cols, CV_32S, &kerI[0]); + double scale = !normalize || order == 1 ? 1. : 1./32; + temp.convertTo(*kernel, ktype, scale); + } +} + + +static void getSobelKernels( OutputArray _kx, OutputArray _ky, + int dx, int dy, int _ksize, bool normalize, int ktype ) +{ + int i, j, ksizeX = _ksize, ksizeY = _ksize; + if( ksizeX == 1 && dx > 0 ) + ksizeX = 3; + if( ksizeY == 1 && dy > 0 ) + ksizeY = 3; + + CV_Assert( ktype == CV_32F || ktype == CV_64F ); + + _kx.create(ksizeX, 1, ktype, -1, true); + _ky.create(ksizeY, 1, ktype, -1, true); + Mat kx = _kx.getMat(); + Mat ky = _ky.getMat(); + + if( _ksize % 2 == 0 || _ksize > 31 ) + CV_Error( CV_StsOutOfRange, "The kernel size must be odd and not larger than 31" ); + vector kerI(std::max(ksizeX, ksizeY) + 1); + + CV_Assert( dx >= 0 && dy >= 0 && dx+dy > 0 ); + + for( int k = 0; k < 2; k++ ) + { + Mat* kernel = k == 0 ? &kx : &ky; + int order = k == 0 ? dx : dy; + int ksize = k == 0 ? ksizeX : ksizeY; + + CV_Assert( ksize > order ); + + if( ksize == 1 ) + kerI[0] = 1; + else if( ksize == 3 ) + { + if( order == 0 ) + kerI[0] = 1, kerI[1] = 2, kerI[2] = 1; + else if( order == 1 ) + kerI[0] = -1, kerI[1] = 0, kerI[2] = 1; + else + kerI[0] = 1, kerI[1] = -2, kerI[2] = 1; + } + else + { + int oldval, newval; + kerI[0] = 1; + for( i = 0; i < ksize; i++ ) + kerI[i+1] = 0; + + for( i = 0; i < ksize - order - 1; i++ ) + { + oldval = kerI[0]; + for( j = 1; j <= ksize; j++ ) + { + newval = kerI[j]+kerI[j-1]; + kerI[j-1] = oldval; + oldval = newval; + } + } + + for( i = 0; i < order; i++ ) + { + oldval = -kerI[0]; + for( j = 1; j <= ksize; j++ ) + { + newval = kerI[j-1] - kerI[j]; + kerI[j-1] = oldval; + oldval = newval; + } + } + } + + Mat temp(kernel->rows, kernel->cols, CV_32S, &kerI[0]); + double scale = !normalize ? 1. : 1./(1 << (ksize-order-1)); + temp.convertTo(*kernel, ktype, scale); + } +} + +} + +void cv::getDerivKernels( OutputArray kx, OutputArray ky, int dx, int dy, + int ksize, bool normalize, int ktype ) +{ + if( ksize <= 0 ) + getScharrKernels( kx, ky, dx, dy, normalize, ktype ); + else + getSobelKernels( kx, ky, dx, dy, ksize, normalize, ktype ); +} + + +cv::Ptr cv::createDerivFilter(int srcType, int dstType, + int dx, int dy, int ksize, int borderType ) +{ + Mat kx, ky; + getDerivKernels( kx, ky, dx, dy, ksize, false, CV_32F ); + return createSeparableLinearFilter(srcType, dstType, + kx, ky, Point(-1,-1), 0, borderType ); +} + +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + +namespace cv +{ + +static bool IPPDerivScharr(const Mat& src, Mat& dst, int ddepth, int dx, int dy, double scale) +{ + int bufSize = 0; + cv::AutoBuffer buffer; + IppiSize roi = ippiSize(src.cols, src.rows); + + if( ddepth < 0 ) + ddepth = src.depth(); + + dst.create( src.size(), CV_MAKETYPE(ddepth, src.channels()) ); + + switch(src.type()) + { + case CV_8U: + { + if(scale != 1) + return false; + + switch(dst.type()) + { + case CV_16S: + { + if((dx == 1) && (dy == 0)) + { + ippiFilterScharrVertGetBufferSize_8u16s_C1R(roi,&bufSize); + buffer.allocate(bufSize); + + ippiFilterScharrVertBorder_8u16s_C1R((const Ipp8u*)src.data, src.step, + (Ipp16s*)dst.data, dst.step, roi, ippBorderRepl, 0, (Ipp8u*)(char*)buffer); + + return true; + } + + if((dx == 0) && (dy == 1)) + { + ippiFilterScharrHorizGetBufferSize_8u16s_C1R(roi,&bufSize); + buffer.allocate(bufSize); + + ippiFilterScharrHorizBorder_8u16s_C1R((const Ipp8u*)src.data, src.step, + (Ipp16s*)dst.data, dst.step, roi, ippBorderRepl, 0, (Ipp8u*)(char*)buffer); + + return true; + } + } + + default: + return false; + } + } + + case CV_32F: + { + switch(dst.type()) + { + case CV_32F: + if((dx == 1) && (dy == 0)) + { + ippiFilterScharrVertGetBufferSize_32f_C1R(ippiSize(src.cols, src.rows),&bufSize); + buffer.allocate(bufSize); + + ippiFilterScharrVertBorder_32f_C1R((const Ipp32f*)src.data, src.step, + (Ipp32f*)dst.data, dst.step, ippiSize(src.cols, src.rows), + ippBorderRepl, 0, (Ipp8u*)(char*)buffer); + if(scale != 1) + /* IPP is fast, so MulC produce very little perf degradation */ + ippiMulC_32f_C1IR((Ipp32f)scale,(Ipp32f*)dst.data,dst.step,ippiSize(dst.cols*dst.channels(),dst.rows)); + + return true; + } + + if((dx == 0) && (dy == 1)) + { + ippiFilterScharrHorizGetBufferSize_32f_C1R(ippiSize(src.cols, src.rows),&bufSize); + buffer.allocate(bufSize); + + ippiFilterScharrHorizBorder_32f_C1R((const Ipp32f*)src.data, src.step, + (Ipp32f*)dst.data, dst.step, ippiSize(src.cols, src.rows), + ippBorderRepl, 0, (Ipp8u*)(char*)buffer); + if(scale != 1) + ippiMulC_32f_C1IR((Ipp32f)scale,(Ipp32f *)dst.data,dst.step,ippiSize(dst.cols*dst.channels(),dst.rows)); + + return true; + } + + default: + return false; + } + } + + default: + return false; + } +} + + +static bool IPPDeriv(const Mat& src, Mat& dst, int ddepth, int dx, int dy, int ksize, double scale) +{ + int bufSize = 0; + cv::AutoBuffer buffer; + + if(ksize == 3 || ksize == 5) + { + if( ddepth < 0 ) + ddepth = src.depth(); + + if(src.type() == CV_8U && dst.type() == CV_16S && scale == 1) + { + if((dx == 1) && (dy == 0)) + { + ippiFilterSobelNegVertGetBufferSize_8u16s_C1R(ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),&bufSize); + buffer.allocate(bufSize); + + ippiFilterSobelNegVertBorder_8u16s_C1R((const Ipp8u*)src.data, src.step, + (Ipp16s*)dst.data, dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize), + ippBorderRepl, 0, (Ipp8u*)(char*)buffer); + return true; + } + + if((dx == 0) && (dy == 1)) + { + ippiFilterSobelHorizGetBufferSize_8u16s_C1R(ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),&bufSize); + buffer.allocate(bufSize); + + ippiFilterSobelHorizBorder_8u16s_C1R((const Ipp8u*)src.data, src.step, + (Ipp16s*)dst.data, dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize), + ippBorderRepl, 0, (Ipp8u*)(char*)buffer); + + return true; + } + + if((dx == 2) && (dy == 0)) + { + ippiFilterSobelVertSecondGetBufferSize_8u16s_C1R(ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),&bufSize); + buffer.allocate(bufSize); + + ippiFilterSobelVertSecondBorder_8u16s_C1R((const Ipp8u*)src.data, src.step, + (Ipp16s*)dst.data, dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize), + ippBorderRepl, 0, (Ipp8u*)(char*)buffer); + + return true; + } + + if((dx == 0) && (dy == 2)) + { + ippiFilterSobelHorizSecondGetBufferSize_8u16s_C1R(ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),&bufSize); + buffer.allocate(bufSize); + + ippiFilterSobelHorizSecondBorder_8u16s_C1R((const Ipp8u*)src.data, src.step, + (Ipp16s*)dst.data, dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize), + ippBorderRepl, 0, (Ipp8u*)(char*)buffer); + + return true; + } + } + + if(src.type() == CV_32F && dst.type() == CV_32F) + { + if((dx == 1) && (dy == 0)) + { + ippiFilterSobelNegVertGetBufferSize_32f_C1R(ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),&bufSize); + buffer.allocate(bufSize); + + ippiFilterSobelNegVertBorder_32f_C1R((const Ipp32f*)src.data, src.step, + (Ipp32f*)dst.data, dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize), + ippBorderRepl, 0, (Ipp8u*)(char*)buffer); + if(scale != 1) + ippiMulC_32f_C1IR((Ipp32f)scale,(Ipp32f *)dst.data,dst.step,ippiSize(dst.cols*dst.channels(),dst.rows)); + + return true; + } + + if((dx == 0) && (dy == 1)) + { + ippiFilterSobelHorizGetBufferSize_32f_C1R(ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),&bufSize); + buffer.allocate(bufSize); + + ippiFilterSobelHorizBorder_32f_C1R((const Ipp32f*)src.data, src.step, + (Ipp32f*)dst.data, dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize), + ippBorderRepl, 0, (Ipp8u*)(char*)buffer); + if(scale != 1) + ippiMulC_32f_C1IR((Ipp32f)scale,(Ipp32f *)dst.data,dst.step,ippiSize(dst.cols*dst.channels(),dst.rows)); + + return true; + } + + if((dx == 2) && (dy == 0)) + { + ippiFilterSobelVertSecondGetBufferSize_32f_C1R(ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),&bufSize); + buffer.allocate(bufSize); + + ippiFilterSobelVertSecondBorder_32f_C1R((const Ipp32f*)src.data, src.step, + (Ipp32f*)dst.data, dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize), + ippBorderRepl, 0, (Ipp8u*)(char*)buffer); + if(scale != 1) + ippiMulC_32f_C1IR((Ipp32f)scale,(Ipp32f *)dst.data,dst.step,ippiSize(dst.cols*dst.channels(),dst.rows)); + + return true; + } + + if((dx == 0) && (dy == 2)) + { + ippiFilterSobelHorizSecondGetBufferSize_32f_C1R(ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize),&bufSize); + buffer.allocate(bufSize); + + ippiFilterSobelHorizSecondBorder_32f_C1R((const Ipp32f*)src.data, src.step, + (Ipp32f*)dst.data, dst.step, ippiSize(src.cols, src.rows), (IppiMaskSize)(ksize*10+ksize), + ippBorderRepl, 0, (Ipp8u*)(char*)buffer); + if(scale != 1) + ippiMulC_32f_C1IR((Ipp32f)scale,(Ipp32f *)dst.data,dst.step,ippiSize(dst.cols*dst.channels(),dst.rows)); + + return true; + } + } + } + + if(ksize <= 0) + return IPPDerivScharr(src, dst, ddepth, dx, dy, scale); + + return false; +} + +} + +#endif + +void cv::Sobel( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy, + int ksize, double scale, double delta, int borderType ) +{ + Mat src = _src.getMat(); + if (ddepth < 0) + ddepth = src.depth(); + _dst.create( src.size(), CV_MAKETYPE(ddepth, src.channels()) ); + Mat dst = _dst.getMat(); + +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + if(dx < 3 && dy < 3 && src.channels() == 1 && borderType == 1) + { + if(IPPDeriv(src, dst, ddepth, dx, dy, ksize,scale)) + return; + } +#endif + int ktype = std::max(CV_32F, std::max(ddepth, src.depth())); + + Mat kx, ky; + getDerivKernels( kx, ky, dx, dy, ksize, false, ktype ); + if( scale != 1 ) + { + // usually the smoothing part is the slowest to compute, + // so try to scale it instead of the faster differenciating part + if( dx == 0 ) + kx *= scale; + else + ky *= scale; + } + sepFilter2D( src, dst, ddepth, kx, ky, Point(-1,-1), delta, borderType ); +} + + +void cv::Scharr( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy, + double scale, double delta, int borderType ) +{ + Mat src = _src.getMat(); + if (ddepth < 0) + ddepth = src.depth(); + _dst.create( src.size(), CV_MAKETYPE(ddepth, src.channels()) ); + Mat dst = _dst.getMat(); + +#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7) + if(dx < 2 && dy < 2 && src.channels() == 1 && borderType == 1) + { + if(IPPDerivScharr(src, dst, ddepth, dx, dy, scale)) + return; + } +#endif + int ktype = std::max(CV_32F, std::max(ddepth, src.depth())); + + Mat kx, ky; + getScharrKernels( kx, ky, dx, dy, false, ktype ); + if( scale != 1 ) + { + // usually the smoothing part is the slowest to compute, + // so try to scale it instead of the faster differenciating part + if( dx == 0 ) + kx *= scale; + else + ky *= scale; + } + sepFilter2D( src, dst, ddepth, kx, ky, Point(-1,-1), delta, borderType ); +} + + +void cv::Laplacian( InputArray _src, OutputArray _dst, int ddepth, int ksize, + double scale, double delta, int borderType ) +{ + Mat src = _src.getMat(); + if (ddepth < 0) + ddepth = src.depth(); + _dst.create( src.size(), CV_MAKETYPE(ddepth, src.channels()) ); + Mat dst = _dst.getMat(); + + if( ksize == 1 || ksize == 3 ) + { + float K[2][9] = + {{0, 1, 0, 1, -4, 1, 0, 1, 0}, + {2, 0, 2, 0, -8, 0, 2, 0, 2}}; + Mat kernel(3, 3, CV_32F, K[ksize == 3]); + if( scale != 1 ) + kernel *= scale; + filter2D( src, dst, ddepth, kernel, Point(-1,-1), delta, borderType ); + } + else + { + const size_t STRIPE_SIZE = 1 << 14; + + int depth = src.depth(); + int ktype = std::max(CV_32F, std::max(ddepth, depth)); + int wdepth = depth == CV_8U && ksize <= 5 ? CV_16S : depth <= CV_32F ? CV_32F : CV_64F; + int wtype = CV_MAKETYPE(wdepth, src.channels()); + Mat kd, ks; + getSobelKernels( kd, ks, 2, 0, ksize, false, ktype ); + if( ddepth < 0 ) + ddepth = src.depth(); + int dtype = CV_MAKETYPE(ddepth, src.channels()); + + int dy0 = std::min(std::max((int)(STRIPE_SIZE/(getElemSize(src.type())*src.cols)), 1), src.rows); + Ptr fx = createSeparableLinearFilter(src.type(), + wtype, kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() ); + Ptr fy = createSeparableLinearFilter(src.type(), + wtype, ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() ); + + int y = fx->start(src), dsty = 0, dy = 0; + fy->start(src); + const uchar* sptr = src.data + y*src.step; + + Mat d2x( dy0 + kd.rows - 1, src.cols, wtype ); + Mat d2y( dy0 + kd.rows - 1, src.cols, wtype ); + + for( ; dsty < src.rows; sptr += dy0*src.step, dsty += dy ) + { + fx->proceed( sptr, (int)src.step, dy0, d2x.data, (int)d2x.step ); + dy = fy->proceed( sptr, (int)src.step, dy0, d2y.data, (int)d2y.step ); + if( dy > 0 ) + { + Mat dstripe = dst.rowRange(dsty, dsty + dy); + d2x.rows = d2y.rows = dy; // modify the headers, which should work + d2x += d2y; + d2x.convertTo( dstripe, dtype, scale, delta ); + } + } + } +} + +///////////////////////////////////////////////////////////////////////////////////////// + +CV_IMPL void +cvSobel( const void* srcarr, void* dstarr, int dx, int dy, int aperture_size ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + + CV_Assert( src.size() == dst.size() && src.channels() == dst.channels() && + ((src.depth() == CV_8U && (dst.depth() == CV_16S || dst.depth() == CV_32F)) || + (src.depth() == CV_32F && dst.depth() == CV_32F))); + + cv::Sobel( src, dst, dst.depth(), dx, dy, aperture_size, 1, 0, cv::BORDER_REPLICATE ); + if( CV_IS_IMAGE(srcarr) && ((IplImage*)srcarr)->origin && dy % 2 != 0 ) + dst *= -1; +} + + +CV_IMPL void +cvLaplace( const void* srcarr, void* dstarr, int aperture_size ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + + CV_Assert( src.size() == dst.size() && src.channels() == dst.channels() && + ((src.depth() == CV_8U && (dst.depth() == CV_16S || dst.depth() == CV_32F)) || + (src.depth() == CV_32F && dst.depth() == CV_32F))); + + cv::Laplacian( src, dst, dst.depth(), aperture_size, 1, 0, cv::BORDER_REPLICATE ); +} + +/* End of file. */ diff --git a/opencv/imgproc/distransform.cpp b/opencv/imgproc/distransform.cpp new file mode 100644 index 0000000..6a5e886 --- /dev/null +++ b/opencv/imgproc/distransform.cpp @@ -0,0 +1,873 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" + +#define ICV_DIST_SHIFT 16 +#define ICV_INIT_DIST0 (INT_MAX >> 2) + +static CvStatus +icvInitTopBottom( int* temp, int tempstep, CvSize size, int border ) +{ + int i, j; + for( i = 0; i < border; i++ ) + { + int* ttop = (int*)(temp + i*tempstep); + int* tbottom = (int*)(temp + (size.height + border*2 - i - 1)*tempstep); + + for( j = 0; j < size.width + border*2; j++ ) + { + ttop[j] = ICV_INIT_DIST0; + tbottom[j] = ICV_INIT_DIST0; + } + } + + return CV_OK; +} + + +static CvStatus CV_STDCALL +icvDistanceTransform_3x3_C1R( const uchar* src, int srcstep, int* temp, + int step, float* dist, int dststep, CvSize size, const float* metrics ) +{ + const int BORDER = 1; + int i, j; + const int HV_DIST = CV_FLT_TO_FIX( metrics[0], ICV_DIST_SHIFT ); + const int DIAG_DIST = CV_FLT_TO_FIX( metrics[1], ICV_DIST_SHIFT ); + const float scale = 1.f/(1 << ICV_DIST_SHIFT); + + srcstep /= sizeof(src[0]); + step /= sizeof(temp[0]); + dststep /= sizeof(dist[0]); + + icvInitTopBottom( temp, step, size, BORDER ); + + // forward pass + for( i = 0; i < size.height; i++ ) + { + const uchar* s = src + i*srcstep; + int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER; + + for( j = 0; j < BORDER; j++ ) + tmp[-j-1] = tmp[size.width + j] = ICV_INIT_DIST0; + + for( j = 0; j < size.width; j++ ) + { + if( !s[j] ) + tmp[j] = 0; + else + { + int t0 = tmp[j-step-1] + DIAG_DIST; + int t = tmp[j-step] + HV_DIST; + if( t0 > t ) t0 = t; + t = tmp[j-step+1] + DIAG_DIST; + if( t0 > t ) t0 = t; + t = tmp[j-1] + HV_DIST; + if( t0 > t ) t0 = t; + tmp[j] = t0; + } + } + } + + // backward pass + for( i = size.height - 1; i >= 0; i-- ) + { + float* d = (float*)(dist + i*dststep); + int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER; + + for( j = size.width - 1; j >= 0; j-- ) + { + int t0 = tmp[j]; + if( t0 > HV_DIST ) + { + int t = tmp[j+step+1] + DIAG_DIST; + if( t0 > t ) t0 = t; + t = tmp[j+step] + HV_DIST; + if( t0 > t ) t0 = t; + t = tmp[j+step-1] + DIAG_DIST; + if( t0 > t ) t0 = t; + t = tmp[j+1] + HV_DIST; + if( t0 > t ) t0 = t; + tmp[j] = t0; + } + d[j] = (float)(t0 * scale); + } + } + + return CV_OK; +} + + +static CvStatus CV_STDCALL +icvDistanceTransform_5x5_C1R( const uchar* src, int srcstep, int* temp, + int step, float* dist, int dststep, CvSize size, const float* metrics ) +{ + const int BORDER = 2; + int i, j; + const int HV_DIST = CV_FLT_TO_FIX( metrics[0], ICV_DIST_SHIFT ); + const int DIAG_DIST = CV_FLT_TO_FIX( metrics[1], ICV_DIST_SHIFT ); + const int LONG_DIST = CV_FLT_TO_FIX( metrics[2], ICV_DIST_SHIFT ); + const float scale = 1.f/(1 << ICV_DIST_SHIFT); + + srcstep /= sizeof(src[0]); + step /= sizeof(temp[0]); + dststep /= sizeof(dist[0]); + + icvInitTopBottom( temp, step, size, BORDER ); + + // forward pass + for( i = 0; i < size.height; i++ ) + { + const uchar* s = src + i*srcstep; + int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER; + + for( j = 0; j < BORDER; j++ ) + tmp[-j-1] = tmp[size.width + j] = ICV_INIT_DIST0; + + for( j = 0; j < size.width; j++ ) + { + if( !s[j] ) + tmp[j] = 0; + else + { + int t0 = tmp[j-step*2-1] + LONG_DIST; + int t = tmp[j-step*2+1] + LONG_DIST; + if( t0 > t ) t0 = t; + t = tmp[j-step-2] + LONG_DIST; + if( t0 > t ) t0 = t; + t = tmp[j-step-1] + DIAG_DIST; + if( t0 > t ) t0 = t; + t = tmp[j-step] + HV_DIST; + if( t0 > t ) t0 = t; + t = tmp[j-step+1] + DIAG_DIST; + if( t0 > t ) t0 = t; + t = tmp[j-step+2] + LONG_DIST; + if( t0 > t ) t0 = t; + t = tmp[j-1] + HV_DIST; + if( t0 > t ) t0 = t; + tmp[j] = t0; + } + } + } + + // backward pass + for( i = size.height - 1; i >= 0; i-- ) + { + float* d = (float*)(dist + i*dststep); + int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER; + + for( j = size.width - 1; j >= 0; j-- ) + { + int t0 = tmp[j]; + if( t0 > HV_DIST ) + { + int t = tmp[j+step*2+1] + LONG_DIST; + if( t0 > t ) t0 = t; + t = tmp[j+step*2-1] + LONG_DIST; + if( t0 > t ) t0 = t; + t = tmp[j+step+2] + LONG_DIST; + if( t0 > t ) t0 = t; + t = tmp[j+step+1] + DIAG_DIST; + if( t0 > t ) t0 = t; + t = tmp[j+step] + HV_DIST; + if( t0 > t ) t0 = t; + t = tmp[j+step-1] + DIAG_DIST; + if( t0 > t ) t0 = t; + t = tmp[j+step-2] + LONG_DIST; + if( t0 > t ) t0 = t; + t = tmp[j+1] + HV_DIST; + if( t0 > t ) t0 = t; + tmp[j] = t0; + } + d[j] = (float)(t0 * scale); + } + } + + return CV_OK; +} + + +static CvStatus CV_STDCALL +icvDistanceTransformEx_5x5_C1R( const uchar* src, int srcstep, int* temp, + int step, float* dist, int dststep, int* labels, int lstep, + CvSize size, const float* metrics ) +{ + const int BORDER = 2; + + int i, j; + const int HV_DIST = CV_FLT_TO_FIX( metrics[0], ICV_DIST_SHIFT ); + const int DIAG_DIST = CV_FLT_TO_FIX( metrics[1], ICV_DIST_SHIFT ); + const int LONG_DIST = CV_FLT_TO_FIX( metrics[2], ICV_DIST_SHIFT ); + const float scale = 1.f/(1 << ICV_DIST_SHIFT); + + srcstep /= sizeof(src[0]); + step /= sizeof(temp[0]); + dststep /= sizeof(dist[0]); + lstep /= sizeof(labels[0]); + + icvInitTopBottom( temp, step, size, BORDER ); + + // forward pass + for( i = 0; i < size.height; i++ ) + { + const uchar* s = src + i*srcstep; + int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER; + int* lls = (int*)(labels + i*lstep); + + for( j = 0; j < BORDER; j++ ) + tmp[-j-1] = tmp[size.width + j] = ICV_INIT_DIST0; + + for( j = 0; j < size.width; j++ ) + { + if( !s[j] ) + { + tmp[j] = 0; + //assert( lls[j] != 0 ); + } + else + { + int t0 = ICV_INIT_DIST0, t; + int l0 = 0; + + t = tmp[j-step*2-1] + LONG_DIST; + if( t0 > t ) + { + t0 = t; + l0 = lls[j-lstep*2-1]; + } + t = tmp[j-step*2+1] + LONG_DIST; + if( t0 > t ) + { + t0 = t; + l0 = lls[j-lstep*2+1]; + } + t = tmp[j-step-2] + LONG_DIST; + if( t0 > t ) + { + t0 = t; + l0 = lls[j-lstep-2]; + } + t = tmp[j-step-1] + DIAG_DIST; + if( t0 > t ) + { + t0 = t; + l0 = lls[j-lstep-1]; + } + t = tmp[j-step] + HV_DIST; + if( t0 > t ) + { + t0 = t; + l0 = lls[j-lstep]; + } + t = tmp[j-step+1] + DIAG_DIST; + if( t0 > t ) + { + t0 = t; + l0 = lls[j-lstep+1]; + } + t = tmp[j-step+2] + LONG_DIST; + if( t0 > t ) + { + t0 = t; + l0 = lls[j-lstep+2]; + } + t = tmp[j-1] + HV_DIST; + if( t0 > t ) + { + t0 = t; + l0 = lls[j-1]; + } + + tmp[j] = t0; + lls[j] = l0; + } + } + } + + // backward pass + for( i = size.height - 1; i >= 0; i-- ) + { + float* d = (float*)(dist + i*dststep); + int* tmp = (int*)(temp + (i+BORDER)*step) + BORDER; + int* lls = (int*)(labels + i*lstep); + + for( j = size.width - 1; j >= 0; j-- ) + { + int t0 = tmp[j]; + int l0 = lls[j]; + if( t0 > HV_DIST ) + { + int t = tmp[j+step*2+1] + LONG_DIST; + if( t0 > t ) + { + t0 = t; + l0 = lls[j+lstep*2+1]; + } + t = tmp[j+step*2-1] + LONG_DIST; + if( t0 > t ) + { + t0 = t; + l0 = lls[j+lstep*2-1]; + } + t = tmp[j+step+2] + LONG_DIST; + if( t0 > t ) + { + t0 = t; + l0 = lls[j+lstep+2]; + } + t = tmp[j+step+1] + DIAG_DIST; + if( t0 > t ) + { + t0 = t; + l0 = lls[j+lstep+1]; + } + t = tmp[j+step] + HV_DIST; + if( t0 > t ) + { + t0 = t; + l0 = lls[j+lstep]; + } + t = tmp[j+step-1] + DIAG_DIST; + if( t0 > t ) + { + t0 = t; + l0 = lls[j+lstep-1]; + } + t = tmp[j+step-2] + LONG_DIST; + if( t0 > t ) + { + t0 = t; + l0 = lls[j+lstep-2]; + } + t = tmp[j+1] + HV_DIST; + if( t0 > t ) + { + t0 = t; + l0 = lls[j+1]; + } + tmp[j] = t0; + lls[j] = l0; + } + d[j] = (float)(t0 * scale); + } + } + + return CV_OK; +} + + +static CvStatus +icvGetDistanceTransformMask( int maskType, float *metrics ) +{ + if( !metrics ) + return CV_NULLPTR_ERR; + + switch (maskType) + { + case 30: + metrics[0] = 1.0f; + metrics[1] = 1.0f; + break; + + case 31: + metrics[0] = 1.0f; + metrics[1] = 2.0f; + break; + + case 32: + metrics[0] = 0.955f; + metrics[1] = 1.3693f; + break; + + case 50: + metrics[0] = 1.0f; + metrics[1] = 1.0f; + metrics[2] = 2.0f; + break; + + case 51: + metrics[0] = 1.0f; + metrics[1] = 2.0f; + metrics[2] = 3.0f; + break; + + case 52: + metrics[0] = 1.0f; + metrics[1] = 1.4f; + metrics[2] = 2.1969f; + break; + default: + return CV_BADRANGE_ERR; + } + + return CV_OK; +} + +namespace cv +{ + +struct DTColumnInvoker +{ + DTColumnInvoker( const CvMat* _src, CvMat* _dst, const int* _sat_tab, const float* _sqr_tab) + { + src = _src; + dst = _dst; + sat_tab = _sat_tab + src->rows*2 + 1; + sqr_tab = _sqr_tab; + } + + void operator()( const BlockedRange& range ) const + { + int i, i1 = range.begin(), i2 = range.end(); + int m = src->rows; + size_t sstep = src->step, dstep = dst->step/sizeof(float); + AutoBuffer _d(m); + int* d = _d; + + for( i = i1; i < i2; i++ ) + { + const uchar* sptr = src->data.ptr + i + (m-1)*sstep; + float* dptr = dst->data.fl + i; + int j, dist = m-1; + + for( j = m-1; j >= 0; j--, sptr -= sstep ) + { + dist = (dist + 1) & (sptr[0] == 0 ? 0 : -1); + d[j] = dist; + } + + dist = m-1; + for( j = 0; j < m; j++, dptr += dstep ) + { + dist = dist + 1 - sat_tab[dist - d[j]]; + d[j] = dist; + dptr[0] = sqr_tab[dist]; + } + } + } + + const CvMat* src; + CvMat* dst; + const int* sat_tab; + const float* sqr_tab; +}; + + +struct DTRowInvoker +{ + DTRowInvoker( CvMat* _dst, const float* _sqr_tab, const float* _inv_tab ) + { + dst = _dst; + sqr_tab = _sqr_tab; + inv_tab = _inv_tab; + } + + void operator()( const BlockedRange& range ) const + { + const float inf = 1e15f; + int i, i1 = range.begin(), i2 = range.end(); + int n = dst->cols; + AutoBuffer _buf((n+2)*2*sizeof(float) + (n+2)*sizeof(int)); + float* f = (float*)(uchar*)_buf; + float* z = f + n; + int* v = alignPtr((int*)(z + n + 1), sizeof(int)); + + for( i = i1; i < i2; i++ ) + { + float* d = (float*)(dst->data.ptr + i*dst->step); + int p, q, k; + + v[0] = 0; + z[0] = -inf; + z[1] = inf; + f[0] = d[0]; + + for( q = 1, k = 0; q < n; q++ ) + { + float fq = d[q]; + f[q] = fq; + + for(;;k--) + { + p = v[k]; + float s = (fq + sqr_tab[q] - d[p] - sqr_tab[p])*inv_tab[q - p]; + if( s > z[k] ) + { + k++; + v[k] = q; + z[k] = s; + z[k+1] = inf; + break; + } + } + } + + for( q = 0, k = 0; q < n; q++ ) + { + while( z[k+1] < q ) + k++; + p = v[k]; + d[q] = std::sqrt(sqr_tab[std::abs(q - p)] + f[p]); + } + } + } + + CvMat* dst; + const float* sqr_tab; + const float* inv_tab; +}; + +} + +static void +icvTrueDistTrans( const CvMat* src, CvMat* dst ) +{ + const float inf = 1e15f; + + if( !CV_ARE_SIZES_EQ( src, dst )) + CV_Error( CV_StsUnmatchedSizes, "" ); + + if( CV_MAT_TYPE(src->type) != CV_8UC1 || + CV_MAT_TYPE(dst->type) != CV_32FC1 ) + CV_Error( CV_StsUnsupportedFormat, + "The input image must have 8uC1 type and the output one must have 32fC1 type" ); + + int i, m = src->rows, n = src->cols; + + cv::AutoBuffer _buf(std::max(m*2*sizeof(float) + (m*3+1)*sizeof(int), n*2*sizeof(float))); + // stage 1: compute 1d distance transform of each column + float* sqr_tab = (float*)(uchar*)_buf; + int* sat_tab = cv::alignPtr((int*)(sqr_tab + m*2), sizeof(int)); + int shift = m*2; + + for( i = 0; i < m; i++ ) + sqr_tab[i] = (float)(i*i); + for( i = m; i < m*2; i++ ) + sqr_tab[i] = inf; + for( i = 0; i < shift; i++ ) + sat_tab[i] = 0; + for( ; i <= m*3; i++ ) + sat_tab[i] = i - shift; + + cv::parallel_for(cv::BlockedRange(0, n), cv::DTColumnInvoker(src, dst, sat_tab, sqr_tab)); + + // stage 2: compute modified distance transform for each row + float* inv_tab = sqr_tab + n; + + inv_tab[0] = sqr_tab[0] = 0.f; + for( i = 1; i < n; i++ ) + { + inv_tab[i] = (float)(0.5/i); + sqr_tab[i] = (float)(i*i); + } + + cv::parallel_for(cv::BlockedRange(0, m), cv::DTRowInvoker(dst, sqr_tab, inv_tab)); +} + + +/*********************************** IPP functions *********************************/ + +typedef CvStatus (CV_STDCALL * CvIPPDistTransFunc)( const uchar* src, int srcstep, + void* dst, int dststep, + CvSize size, const void* metrics ); + +typedef CvStatus (CV_STDCALL * CvIPPDistTransFunc2)( uchar* src, int srcstep, + CvSize size, const int* metrics ); + +/***********************************************************************************/ + +typedef CvStatus (CV_STDCALL * CvDistTransFunc)( const uchar* src, int srcstep, + int* temp, int tempstep, + float* dst, int dststep, + CvSize size, const float* metrics ); + + +/****************************************************************************************\ + Non-inplace and Inplace 8u->8u Distance Transform for CityBlock (a.k.a. L1) metric + (C) 2006 by Jay Stavinzky. +\****************************************************************************************/ + +//BEGIN ATS ADDITION +/* 8-bit grayscale distance transform function */ +static void +icvDistanceATS_L1_8u( const CvMat* src, CvMat* dst ) +{ + int width = src->cols, height = src->rows; + + int a; + uchar lut[256]; + int x, y; + + const uchar *sbase = src->data.ptr; + uchar *dbase = dst->data.ptr; + int srcstep = src->step; + int dststep = dst->step; + + CV_Assert( CV_IS_MASK_ARR( src ) && CV_MAT_TYPE( dst->type ) == CV_8UC1 ); + CV_Assert( CV_ARE_SIZES_EQ( src, dst )); + + ////////////////////// forward scan //////////////////////// + for( x = 0; x < 256; x++ ) + lut[x] = CV_CAST_8U(x+1); + + //init first pixel to max (we're going to be skipping it) + dbase[0] = (uchar)(sbase[0] == 0 ? 0 : 255); + + //first row (scan west only, skip first pixel) + for( x = 1; x < width; x++ ) + dbase[x] = (uchar)(sbase[x] == 0 ? 0 : lut[dbase[x-1]]); + + for( y = 1; y < height; y++ ) + { + sbase += srcstep; + dbase += dststep; + + //for left edge, scan north only + a = sbase[0] == 0 ? 0 : lut[dbase[-dststep]]; + dbase[0] = (uchar)a; + + for( x = 1; x < width; x++ ) + { + a = sbase[x] == 0 ? 0 : lut[MIN(a, dbase[x - dststep])]; + dbase[x] = (uchar)a; + } + } + + ////////////////////// backward scan /////////////////////// + + a = dbase[width-1]; + + // do last row east pixel scan here (skip bottom right pixel) + for( x = width - 2; x >= 0; x-- ) + { + a = lut[a]; + dbase[x] = (uchar)(CV_CALC_MIN_8U(a, dbase[x])); + } + + // right edge is the only error case + for( y = height - 2; y >= 0; y-- ) + { + dbase -= dststep; + + // do right edge + a = lut[dbase[width-1+dststep]]; + dbase[width-1] = (uchar)(MIN(a, dbase[width-1])); + + for( x = width - 2; x >= 0; x-- ) + { + int b = dbase[x+dststep]; + a = lut[MIN(a, b)]; + dbase[x] = (uchar)(MIN(a, dbase[x])); + } + } +} +//END ATS ADDITION + + +/* Wrapper function for distance transform group */ +CV_IMPL void +cvDistTransform( const void* srcarr, void* dstarr, + int distType, int maskSize, + const float *mask, + void* labelsarr ) +{ + cv::Ptr temp; + cv::Ptr src_copy; + cv::Ptr st; + + float _mask[5] = {0}; + CvMat srcstub, *src = (CvMat*)srcarr; + CvMat dststub, *dst = (CvMat*)dstarr; + CvMat lstub, *labels = (CvMat*)labelsarr; + CvSize size; + //CvIPPDistTransFunc ipp_func = 0; + //CvIPPDistTransFunc2 ipp_inp_func = 0; + + src = cvGetMat( src, &srcstub ); + dst = cvGetMat( dst, &dststub ); + + if( !CV_IS_MASK_ARR( src ) || (CV_MAT_TYPE( dst->type ) != CV_32FC1 && + (CV_MAT_TYPE(dst->type) != CV_8UC1 || distType != CV_DIST_L1 || labels)) ) + CV_Error( CV_StsUnsupportedFormat, + "source image must be 8uC1 and the distance map must be 32fC1 " + "(or 8uC1 in case of simple L1 distance transform)" ); + + if( !CV_ARE_SIZES_EQ( src, dst )) + CV_Error( CV_StsUnmatchedSizes, "the source and the destination images must be of the same size" ); + + if( maskSize != CV_DIST_MASK_3 && maskSize != CV_DIST_MASK_5 && maskSize != CV_DIST_MASK_PRECISE ) + CV_Error( CV_StsBadSize, "Mask size should be 3 or 5 or 0 (presize)" ); + + if( distType == CV_DIST_C || distType == CV_DIST_L1 ) + maskSize = !labels ? CV_DIST_MASK_3 : CV_DIST_MASK_5; + else if( distType == CV_DIST_L2 && labels ) + maskSize = CV_DIST_MASK_5; + + if( maskSize == CV_DIST_MASK_PRECISE ) + { + icvTrueDistTrans( src, dst ); + return; + } + + if( labels ) + { + labels = cvGetMat( labels, &lstub ); + if( CV_MAT_TYPE( labels->type ) != CV_32SC1 ) + CV_Error( CV_StsUnsupportedFormat, "the output array of labels must be 32sC1" ); + + if( !CV_ARE_SIZES_EQ( labels, dst )) + CV_Error( CV_StsUnmatchedSizes, "the array of labels has a different size" ); + + if( maskSize == CV_DIST_MASK_3 ) + CV_Error( CV_StsNotImplemented, + "3x3 mask can not be used for \"labeled\" distance transform. Use 5x5 mask" ); + } + + if( distType == CV_DIST_C || distType == CV_DIST_L1 || distType == CV_DIST_L2 ) + { + icvGetDistanceTransformMask( (distType == CV_DIST_C ? 0 : + distType == CV_DIST_L1 ? 1 : 2) + maskSize*10, _mask ); + } + else if( distType == CV_DIST_USER ) + { + if( !mask ) + CV_Error( CV_StsNullPtr, "" ); + + memcpy( _mask, mask, (maskSize/2 + 1)*sizeof(float)); + } + + /*if( !labels ) + { + if( CV_MAT_TYPE(dst->type) == CV_32FC1 ) + ipp_func = (CvIPPDistTransFunc)(maskSize == CV_DIST_MASK_3 ? + icvDistanceTransform_3x3_8u32f_C1R_p : icvDistanceTransform_5x5_8u32f_C1R_p); + else if( src->data.ptr != dst->data.ptr ) + ipp_func = (CvIPPDistTransFunc)icvDistanceTransform_3x3_8u_C1R_p; + else + ipp_inp_func = icvDistanceTransform_3x3_8u_C1IR_p; + }*/ + + size = cvGetMatSize(src); + + /*if( (ipp_func || ipp_inp_func) && src->cols >= 4 && src->rows >= 2 ) + { + int _imask[3]; + _imask[0] = cvRound(_mask[0]); + _imask[1] = cvRound(_mask[1]); + _imask[2] = cvRound(_mask[2]); + + if( ipp_func ) + { + IPPI_CALL( ipp_func( src->data.ptr, src->step, + dst->data.fl, dst->step, size, + CV_MAT_TYPE(dst->type) == CV_8UC1 ? + (void*)_imask : (void*)_mask )); + } + else + { + IPPI_CALL( ipp_inp_func( src->data.ptr, src->step, size, _imask )); + } + } + else*/ if( CV_MAT_TYPE(dst->type) == CV_8UC1 ) + { + icvDistanceATS_L1_8u( src, dst ); + } + else + { + int border = maskSize == CV_DIST_MASK_3 ? 1 : 2; + temp = cvCreateMat( size.height + border*2, size.width + border*2, CV_32SC1 ); + + if( !labels ) + { + CvDistTransFunc func = maskSize == CV_DIST_MASK_3 ? + icvDistanceTransform_3x3_C1R : + icvDistanceTransform_5x5_C1R; + + func( src->data.ptr, src->step, temp->data.i, temp->step, + dst->data.fl, dst->step, size, _mask ); + } + else + { + CvSeq *contours = 0; + CvPoint top_left = {0,0}, bottom_right = {size.width-1,size.height-1}; + int label; + + st = cvCreateMemStorage(); + src_copy = cvCreateMat( size.height, size.width, src->type ); + cvCmpS( src, 0, src_copy, CV_CMP_EQ ); + cvFindContours( src_copy, st, &contours, sizeof(CvContour), + CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE ); + cvZero( labels ); + for( label = 1; contours != 0; contours = contours->h_next, label++ ) + { + CvScalar area_color = cvScalarAll(label); + cvDrawContours( labels, contours, area_color, area_color, -255, -1, 8 ); + } + + cvCopy( src, src_copy ); + cvRectangle( src_copy, top_left, bottom_right, cvScalarAll(255), 1, 8 ); + + icvDistanceTransformEx_5x5_C1R( src_copy->data.ptr, src_copy->step, temp->data.i, temp->step, + dst->data.fl, dst->step, labels->data.i, labels->step, size, _mask ); + } + } +} + +void cv::distanceTransform( InputArray _src, OutputArray _dst, OutputArray _labels, + int distanceType, int maskSize ) +{ + Mat src = _src.getMat(); + _dst.create(src.size(), CV_32F); + _labels.create(src.size(), CV_32S); + CvMat c_src = src, c_dst = _dst.getMat(), c_labels = _labels.getMat(); + cvDistTransform(&c_src, &c_dst, distanceType, maskSize, 0, &c_labels); +} + +void cv::distanceTransform( InputArray _src, OutputArray _dst, + int distanceType, int maskSize ) +{ + Mat src = _src.getMat(); + _dst.create(src.size(), CV_32F); + Mat dst = _dst.getMat(); + CvMat c_src = src, c_dst = _dst.getMat(); + cvDistTransform(&c_src, &c_dst, distanceType, maskSize, 0, 0); +} + +/* End of file. */ diff --git a/opencv/imgproc/emd.cpp b/opencv/imgproc/emd.cpp new file mode 100644 index 0000000..734e7aa --- /dev/null +++ b/opencv/imgproc/emd.cpp @@ -0,0 +1,1162 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* + Partially based on Yossi Rubner code: + ========================================================================= + emd.c + + Last update: 3/14/98 + + An implementation of the Earth Movers Distance. + Based of the solution for the Transportation problem as described in + "Introduction to Mathematical Programming" by F. S. Hillier and + G. J. Lieberman, McGraw-Hill, 1990. + + Copyright (C) 1998 Yossi Rubner + Computer Science Department, Stanford University + E-Mail: rubner@cs.stanford.edu URL: http://vision.stanford.edu/~rubner + ========================================================================== +*/ +#include "precomp.hpp" + +#define MAX_ITERATIONS 500 +#define CV_EMD_INF ((float)1e20) +#define CV_EMD_EPS ((float)1e-5) + +/* CvNode1D is used for lists, representing 1D sparse array */ +typedef struct CvNode1D +{ + float val; + struct CvNode1D *next; +} +CvNode1D; + +/* CvNode2D is used for lists, representing 2D sparse matrix */ +typedef struct CvNode2D +{ + float val; + struct CvNode2D *next[2]; /* next row & next column */ + int i, j; +} +CvNode2D; + + +typedef struct CvEMDState +{ + int ssize, dsize; + + float **cost; + CvNode2D *_x; + CvNode2D *end_x; + CvNode2D *enter_x; + char **is_x; + + CvNode2D **rows_x; + CvNode2D **cols_x; + + CvNode1D *u; + CvNode1D *v; + + int* idx1; + int* idx2; + + /* find_loop buffers */ + CvNode2D **loop; + char *is_used; + + /* russel buffers */ + float *s; + float *d; + float **delta; + + float weight, max_cost; + char *buffer; +} +CvEMDState; + +/* static function declaration */ +static int icvInitEMD( const float *signature1, int size1, + const float *signature2, int size2, + int dims, CvDistanceFunction dist_func, void *user_param, + const float* cost, int cost_step, + CvEMDState * state, float *lower_bound, + cv::AutoBuffer& _buffer ); + +static int icvFindBasicVariables( float **cost, char **is_x, + CvNode1D * u, CvNode1D * v, int ssize, int dsize ); + +static float icvIsOptimal( float **cost, char **is_x, + CvNode1D * u, CvNode1D * v, + int ssize, int dsize, CvNode2D * enter_x ); + +static void icvRussel( CvEMDState * state ); + + +static bool icvNewSolution( CvEMDState * state ); +static int icvFindLoop( CvEMDState * state ); + +static void icvAddBasicVariable( CvEMDState * state, + int min_i, int min_j, + CvNode1D * prev_u_min_i, + CvNode1D * prev_v_min_j, + CvNode1D * u_head ); + +static float icvDistL2( const float *x, const float *y, void *user_param ); +static float icvDistL1( const float *x, const float *y, void *user_param ); +static float icvDistC( const float *x, const float *y, void *user_param ); + +/* The main function */ +CV_IMPL float cvCalcEMD2( const CvArr* signature_arr1, + const CvArr* signature_arr2, + int dist_type, + CvDistanceFunction dist_func, + const CvArr* cost_matrix, + CvArr* flow_matrix, + float *lower_bound, + void *user_param ) +{ + cv::AutoBuffer local_buf; + CvEMDState state; + float emd = 0; + + memset( &state, 0, sizeof(state)); + + double total_cost = 0; + int result = 0; + float eps, min_delta; + CvNode2D *xp = 0; + CvMat sign_stub1, *signature1 = (CvMat*)signature_arr1; + CvMat sign_stub2, *signature2 = (CvMat*)signature_arr2; + CvMat cost_stub, *cost = &cost_stub; + CvMat flow_stub, *flow = (CvMat*)flow_matrix; + int dims, size1, size2; + + signature1 = cvGetMat( signature1, &sign_stub1 ); + signature2 = cvGetMat( signature2, &sign_stub2 ); + + if( signature1->cols != signature2->cols ) + CV_Error( CV_StsUnmatchedSizes, "The arrays must have equal number of columns (which is number of dimensions but 1)" ); + + dims = signature1->cols - 1; + size1 = signature1->rows; + size2 = signature2->rows; + + if( !CV_ARE_TYPES_EQ( signature1, signature2 )) + CV_Error( CV_StsUnmatchedFormats, "The array must have equal types" ); + + if( CV_MAT_TYPE( signature1->type ) != CV_32FC1 ) + CV_Error( CV_StsUnsupportedFormat, "The signatures must be 32fC1" ); + + if( flow ) + { + flow = cvGetMat( flow, &flow_stub ); + + if( flow->rows != size1 || flow->cols != size2 ) + CV_Error( CV_StsUnmatchedSizes, + "The flow matrix size does not match to the signatures' sizes" ); + + if( CV_MAT_TYPE( flow->type ) != CV_32FC1 ) + CV_Error( CV_StsUnsupportedFormat, "The flow matrix must be 32fC1" ); + } + + cost->data.fl = 0; + cost->step = 0; + + if( dist_type < 0 ) + { + if( cost_matrix ) + { + if( dist_func ) + CV_Error( CV_StsBadArg, + "Only one of cost matrix or distance function should be non-NULL in case of user-defined distance" ); + + if( lower_bound ) + CV_Error( CV_StsBadArg, + "The lower boundary can not be calculated if the cost matrix is used" ); + + cost = cvGetMat( cost_matrix, &cost_stub ); + if( cost->rows != size1 || cost->cols != size2 ) + CV_Error( CV_StsUnmatchedSizes, + "The cost matrix size does not match to the signatures' sizes" ); + + if( CV_MAT_TYPE( cost->type ) != CV_32FC1 ) + CV_Error( CV_StsUnsupportedFormat, "The cost matrix must be 32fC1" ); + } + else if( !dist_func ) + CV_Error( CV_StsNullPtr, "In case of user-defined distance Distance function is undefined" ); + } + else + { + if( dims == 0 ) + CV_Error( CV_StsBadSize, + "Number of dimensions can be 0 only if a user-defined metric is used" ); + user_param = (void *) (size_t)dims; + switch (dist_type) + { + case CV_DIST_L1: + dist_func = icvDistL1; + break; + case CV_DIST_L2: + dist_func = icvDistL2; + break; + case CV_DIST_C: + dist_func = icvDistC; + break; + default: + CV_Error( CV_StsBadFlag, "Bad or unsupported metric type" ); + } + } + + result = icvInitEMD( signature1->data.fl, size1, + signature2->data.fl, size2, + dims, dist_func, user_param, + cost->data.fl, cost->step, + &state, lower_bound, local_buf ); + + if( result > 0 && lower_bound ) + { + emd = *lower_bound; + return emd; + } + + eps = CV_EMD_EPS * state.max_cost; + + /* if ssize = 1 or dsize = 1 then we are done, else ... */ + if( state.ssize > 1 && state.dsize > 1 ) + { + int itr; + + for( itr = 1; itr < MAX_ITERATIONS; itr++ ) + { + /* find basic variables */ + result = icvFindBasicVariables( state.cost, state.is_x, + state.u, state.v, state.ssize, state.dsize ); + if( result < 0 ) + break; + + /* check for optimality */ + min_delta = icvIsOptimal( state.cost, state.is_x, + state.u, state.v, + state.ssize, state.dsize, state.enter_x ); + + if( min_delta == CV_EMD_INF ) + CV_Error( CV_StsNoConv, "" ); + + /* if no negative deltamin, we found the optimal solution */ + if( min_delta >= -eps ) + break; + + /* improve solution */ + if(!icvNewSolution( &state )) + CV_Error( CV_StsNoConv, "" ); + } + } + + /* compute the total flow */ + for( xp = state._x; xp < state.end_x; xp++ ) + { + float val = xp->val; + int i = xp->i; + int j = xp->j; + + if( xp == state.enter_x ) + continue; + + int ci = state.idx1[i]; + int cj = state.idx2[j]; + + if( ci >= 0 && cj >= 0 ) + { + total_cost += (double)val * state.cost[i][j]; + if( flow ) + ((float*)(flow->data.ptr + flow->step*ci))[cj] = val; + } + } + + emd = (float) (total_cost / state.weight); + return emd; +} + + +/************************************************************************************\ +* initialize structure, allocate buffers and generate initial golution * +\************************************************************************************/ +static int icvInitEMD( const float* signature1, int size1, + const float* signature2, int size2, + int dims, CvDistanceFunction dist_func, void* user_param, + const float* cost, int cost_step, + CvEMDState* state, float* lower_bound, + cv::AutoBuffer& _buffer ) +{ + float s_sum = 0, d_sum = 0, diff; + int i, j; + int ssize = 0, dsize = 0; + int equal_sums = 1; + int buffer_size; + float max_cost = 0; + char *buffer, *buffer_end; + + memset( state, 0, sizeof( *state )); + assert( cost_step % sizeof(float) == 0 ); + cost_step /= sizeof(float); + + /* calculate buffer size */ + buffer_size = (size1+1) * (size2+1) * (sizeof( float ) + /* cost */ + sizeof( char ) + /* is_x */ + sizeof( float )) + /* delta matrix */ + (size1 + size2 + 2) * (sizeof( CvNode2D ) + /* _x */ + sizeof( CvNode2D * ) + /* cols_x & rows_x */ + sizeof( CvNode1D ) + /* u & v */ + sizeof( float ) + /* s & d */ + sizeof( int ) + sizeof(CvNode2D*)) + /* idx1 & idx2 */ + (size1+1) * (sizeof( float * ) + sizeof( char * ) + /* rows pointers for */ + sizeof( float * )) + 256; /* cost, is_x and delta */ + + if( buffer_size < (int) (dims * 2 * sizeof( float ))) + { + buffer_size = dims * 2 * sizeof( float ); + } + + /* allocate buffers */ + _buffer.allocate(buffer_size); + + state->buffer = buffer = _buffer; + buffer_end = buffer + buffer_size; + + state->idx1 = (int*) buffer; + buffer += (size1 + 1) * sizeof( int ); + + state->idx2 = (int*) buffer; + buffer += (size2 + 1) * sizeof( int ); + + state->s = (float *) buffer; + buffer += (size1 + 1) * sizeof( float ); + + state->d = (float *) buffer; + buffer += (size2 + 1) * sizeof( float ); + + /* sum up the supply and demand */ + for( i = 0; i < size1; i++ ) + { + float weight = signature1[i * (dims + 1)]; + + if( weight > 0 ) + { + s_sum += weight; + state->s[ssize] = weight; + state->idx1[ssize++] = i; + + } + else if( weight < 0 ) + CV_Error(CV_StsOutOfRange, ""); + } + + for( i = 0; i < size2; i++ ) + { + float weight = signature2[i * (dims + 1)]; + + if( weight > 0 ) + { + d_sum += weight; + state->d[dsize] = weight; + state->idx2[dsize++] = i; + } + else if( weight < 0 ) + CV_Error(CV_StsOutOfRange, ""); + } + + if( ssize == 0 || dsize == 0 ) + CV_Error(CV_StsOutOfRange, ""); + + /* if supply different than the demand, add a zero-cost dummy cluster */ + diff = s_sum - d_sum; + if( fabs( diff ) >= CV_EMD_EPS * s_sum ) + { + equal_sums = 0; + if( diff < 0 ) + { + state->s[ssize] = -diff; + state->idx1[ssize++] = -1; + } + else + { + state->d[dsize] = diff; + state->idx2[dsize++] = -1; + } + } + + state->ssize = ssize; + state->dsize = dsize; + state->weight = s_sum > d_sum ? s_sum : d_sum; + + if( lower_bound && equal_sums ) /* check lower bound */ + { + int sz1 = size1 * (dims + 1), sz2 = size2 * (dims + 1); + float lb = 0; + + float* xs = (float *) buffer; + float* xd = xs + dims; + + memset( xs, 0, dims*sizeof(xs[0])); + memset( xd, 0, dims*sizeof(xd[0])); + + for( j = 0; j < sz1; j += dims + 1 ) + { + float weight = signature1[j]; + for( i = 0; i < dims; i++ ) + xs[i] += signature1[j + i + 1] * weight; + } + + for( j = 0; j < sz2; j += dims + 1 ) + { + float weight = signature2[j]; + for( i = 0; i < dims; i++ ) + xd[i] += signature2[j + i + 1] * weight; + } + + lb = dist_func( xs, xd, user_param ) / state->weight; + i = *lower_bound <= lb; + *lower_bound = lb; + if( i ) + return 1; + } + + /* assign pointers */ + state->is_used = (char *) buffer; + /* init delta matrix */ + state->delta = (float **) buffer; + buffer += ssize * sizeof( float * ); + + for( i = 0; i < ssize; i++ ) + { + state->delta[i] = (float *) buffer; + buffer += dsize * sizeof( float ); + } + + state->loop = (CvNode2D **) buffer; + buffer += (ssize + dsize + 1) * sizeof(CvNode2D*); + + state->_x = state->end_x = (CvNode2D *) buffer; + buffer += (ssize + dsize) * sizeof( CvNode2D ); + + /* init cost matrix */ + state->cost = (float **) buffer; + buffer += ssize * sizeof( float * ); + + /* compute the distance matrix */ + for( i = 0; i < ssize; i++ ) + { + int ci = state->idx1[i]; + + state->cost[i] = (float *) buffer; + buffer += dsize * sizeof( float ); + + if( ci >= 0 ) + { + for( j = 0; j < dsize; j++ ) + { + int cj = state->idx2[j]; + if( cj < 0 ) + state->cost[i][j] = 0; + else + { + float val; + if( dist_func ) + { + val = dist_func( signature1 + ci * (dims + 1) + 1, + signature2 + cj * (dims + 1) + 1, + user_param ); + } + else + { + assert( cost ); + val = cost[cost_step*ci + cj]; + } + state->cost[i][j] = val; + if( max_cost < val ) + max_cost = val; + } + } + } + else + { + for( j = 0; j < dsize; j++ ) + state->cost[i][j] = 0; + } + } + + state->max_cost = max_cost; + + memset( buffer, 0, buffer_end - buffer ); + + state->rows_x = (CvNode2D **) buffer; + buffer += ssize * sizeof( CvNode2D * ); + + state->cols_x = (CvNode2D **) buffer; + buffer += dsize * sizeof( CvNode2D * ); + + state->u = (CvNode1D *) buffer; + buffer += ssize * sizeof( CvNode1D ); + + state->v = (CvNode1D *) buffer; + buffer += dsize * sizeof( CvNode1D ); + + /* init is_x matrix */ + state->is_x = (char **) buffer; + buffer += ssize * sizeof( char * ); + + for( i = 0; i < ssize; i++ ) + { + state->is_x[i] = buffer; + buffer += dsize; + } + + assert( buffer <= buffer_end ); + + icvRussel( state ); + + state->enter_x = (state->end_x)++; + return 0; +} + + +/****************************************************************************************\ +* icvFindBasicVariables * +\****************************************************************************************/ +static int icvFindBasicVariables( float **cost, char **is_x, + CvNode1D * u, CvNode1D * v, int ssize, int dsize ) +{ + int i, j, found; + int u_cfound, v_cfound; + CvNode1D u0_head, u1_head, *cur_u, *prev_u; + CvNode1D v0_head, v1_head, *cur_v, *prev_v; + + /* initialize the rows list (u) and the columns list (v) */ + u0_head.next = u; + for( i = 0; i < ssize; i++ ) + { + u[i].next = u + i + 1; + } + u[ssize - 1].next = 0; + u1_head.next = 0; + + v0_head.next = ssize > 1 ? v + 1 : 0; + for( i = 1; i < dsize; i++ ) + { + v[i].next = v + i + 1; + } + v[dsize - 1].next = 0; + v1_head.next = 0; + + /* there are ssize+dsize variables but only ssize+dsize-1 independent equations, + so set v[0]=0 */ + v[0].val = 0; + v1_head.next = v; + v1_head.next->next = 0; + + /* loop until all variables are found */ + u_cfound = v_cfound = 0; + while( u_cfound < ssize || v_cfound < dsize ) + { + found = 0; + if( v_cfound < dsize ) + { + /* loop over all marked columns */ + prev_v = &v1_head; + + for( found |= (cur_v = v1_head.next) != 0; cur_v != 0; cur_v = cur_v->next ) + { + float cur_v_val = cur_v->val; + + j = (int)(cur_v - v); + /* find the variables in column j */ + prev_u = &u0_head; + for( cur_u = u0_head.next; cur_u != 0; ) + { + i = (int)(cur_u - u); + if( is_x[i][j] ) + { + /* compute u[i] */ + cur_u->val = cost[i][j] - cur_v_val; + /* ...and add it to the marked list */ + prev_u->next = cur_u->next; + cur_u->next = u1_head.next; + u1_head.next = cur_u; + cur_u = prev_u->next; + } + else + { + prev_u = cur_u; + cur_u = cur_u->next; + } + } + prev_v->next = cur_v->next; + v_cfound++; + } + } + + if( u_cfound < ssize ) + { + /* loop over all marked rows */ + prev_u = &u1_head; + for( found |= (cur_u = u1_head.next) != 0; cur_u != 0; cur_u = cur_u->next ) + { + float cur_u_val = cur_u->val; + float *_cost; + char *_is_x; + + i = (int)(cur_u - u); + _cost = cost[i]; + _is_x = is_x[i]; + /* find the variables in rows i */ + prev_v = &v0_head; + for( cur_v = v0_head.next; cur_v != 0; ) + { + j = (int)(cur_v - v); + if( _is_x[j] ) + { + /* compute v[j] */ + cur_v->val = _cost[j] - cur_u_val; + /* ...and add it to the marked list */ + prev_v->next = cur_v->next; + cur_v->next = v1_head.next; + v1_head.next = cur_v; + cur_v = prev_v->next; + } + else + { + prev_v = cur_v; + cur_v = cur_v->next; + } + } + prev_u->next = cur_u->next; + u_cfound++; + } + } + + if( !found ) + return -1; + } + + return 0; +} + + +/****************************************************************************************\ +* icvIsOptimal * +\****************************************************************************************/ +static float +icvIsOptimal( float **cost, char **is_x, + CvNode1D * u, CvNode1D * v, int ssize, int dsize, CvNode2D * enter_x ) +{ + float delta, min_delta = CV_EMD_INF; + int i, j, min_i = 0, min_j = 0; + + /* find the minimal cij-ui-vj over all i,j */ + for( i = 0; i < ssize; i++ ) + { + float u_val = u[i].val; + float *_cost = cost[i]; + char *_is_x = is_x[i]; + + for( j = 0; j < dsize; j++ ) + { + if( !_is_x[j] ) + { + delta = _cost[j] - u_val - v[j].val; + if( min_delta > delta ) + { + min_delta = delta; + min_i = i; + min_j = j; + } + } + } + } + + enter_x->i = min_i; + enter_x->j = min_j; + + return min_delta; +} + +/****************************************************************************************\ +* icvNewSolution * +\****************************************************************************************/ +static bool +icvNewSolution( CvEMDState * state ) +{ + int i, j; + float min_val = CV_EMD_INF; + int steps; + CvNode2D head, *cur_x, *next_x, *leave_x = 0; + CvNode2D *enter_x = state->enter_x; + CvNode2D **loop = state->loop; + + /* enter the new basic variable */ + i = enter_x->i; + j = enter_x->j; + state->is_x[i][j] = 1; + enter_x->next[0] = state->rows_x[i]; + enter_x->next[1] = state->cols_x[j]; + enter_x->val = 0; + state->rows_x[i] = enter_x; + state->cols_x[j] = enter_x; + + /* find a chain reaction */ + steps = icvFindLoop( state ); + + if( steps == 0 ) + return false; + + /* find the largest value in the loop */ + for( i = 1; i < steps; i += 2 ) + { + float temp = loop[i]->val; + + if( min_val > temp ) + { + leave_x = loop[i]; + min_val = temp; + } + } + + /* update the loop */ + for( i = 0; i < steps; i += 2 ) + { + float temp0 = loop[i]->val + min_val; + float temp1 = loop[i + 1]->val - min_val; + + loop[i]->val = temp0; + loop[i + 1]->val = temp1; + } + + /* remove the leaving basic variable */ + i = leave_x->i; + j = leave_x->j; + state->is_x[i][j] = 0; + + head.next[0] = state->rows_x[i]; + cur_x = &head; + while( (next_x = cur_x->next[0]) != leave_x ) + { + cur_x = next_x; + assert( cur_x ); + } + cur_x->next[0] = next_x->next[0]; + state->rows_x[i] = head.next[0]; + + head.next[1] = state->cols_x[j]; + cur_x = &head; + while( (next_x = cur_x->next[1]) != leave_x ) + { + cur_x = next_x; + assert( cur_x ); + } + cur_x->next[1] = next_x->next[1]; + state->cols_x[j] = head.next[1]; + + /* set enter_x to be the new empty slot */ + state->enter_x = leave_x; + + return true; +} + + + +/****************************************************************************************\ +* icvFindLoop * +\****************************************************************************************/ +static int +icvFindLoop( CvEMDState * state ) +{ + int i, steps = 1; + CvNode2D *new_x; + CvNode2D **loop = state->loop; + CvNode2D *enter_x = state->enter_x, *_x = state->_x; + char *is_used = state->is_used; + + memset( is_used, 0, state->ssize + state->dsize ); + + new_x = loop[0] = enter_x; + is_used[enter_x - _x] = 1; + steps = 1; + + do + { + if( (steps & 1) == 1 ) + { + /* find an unused x in the row */ + new_x = state->rows_x[new_x->i]; + while( new_x != 0 && is_used[new_x - _x] ) + new_x = new_x->next[0]; + } + else + { + /* find an unused x in the column, or the entering x */ + new_x = state->cols_x[new_x->j]; + while( new_x != 0 && is_used[new_x - _x] && new_x != enter_x ) + new_x = new_x->next[1]; + if( new_x == enter_x ) + break; + } + + if( new_x != 0 ) /* found the next x */ + { + /* add x to the loop */ + loop[steps++] = new_x; + is_used[new_x - _x] = 1; + } + else /* didn't find the next x */ + { + /* backtrack */ + do + { + i = steps & 1; + new_x = loop[steps - 1]; + do + { + new_x = new_x->next[i]; + } + while( new_x != 0 && is_used[new_x - _x] ); + + if( new_x == 0 ) + { + is_used[loop[--steps] - _x] = 0; + } + } + while( new_x == 0 && steps > 0 ); + + is_used[loop[steps - 1] - _x] = 0; + loop[steps - 1] = new_x; + is_used[new_x - _x] = 1; + } + } + while( steps > 0 ); + + return steps; +} + + + +/****************************************************************************************\ +* icvRussel * +\****************************************************************************************/ +static void +icvRussel( CvEMDState * state ) +{ + int i, j, min_i = -1, min_j = -1; + float min_delta, diff; + CvNode1D u_head, *cur_u, *prev_u; + CvNode1D v_head, *cur_v, *prev_v; + CvNode1D *prev_u_min_i = 0, *prev_v_min_j = 0, *remember; + CvNode1D *u = state->u, *v = state->v; + int ssize = state->ssize, dsize = state->dsize; + float eps = CV_EMD_EPS * state->max_cost; + float **cost = state->cost; + float **delta = state->delta; + + /* initialize the rows list (ur), and the columns list (vr) */ + u_head.next = u; + for( i = 0; i < ssize; i++ ) + { + u[i].next = u + i + 1; + } + u[ssize - 1].next = 0; + + v_head.next = v; + for( i = 0; i < dsize; i++ ) + { + v[i].val = -CV_EMD_INF; + v[i].next = v + i + 1; + } + v[dsize - 1].next = 0; + + /* find the maximum row and column values (ur[i] and vr[j]) */ + for( i = 0; i < ssize; i++ ) + { + float u_val = -CV_EMD_INF; + float *cost_row = cost[i]; + + for( j = 0; j < dsize; j++ ) + { + float temp = cost_row[j]; + + if( u_val < temp ) + u_val = temp; + if( v[j].val < temp ) + v[j].val = temp; + } + u[i].val = u_val; + } + + /* compute the delta matrix */ + for( i = 0; i < ssize; i++ ) + { + float u_val = u[i].val; + float *delta_row = delta[i]; + float *cost_row = cost[i]; + + for( j = 0; j < dsize; j++ ) + { + delta_row[j] = cost_row[j] - u_val - v[j].val; + } + } + + /* find the basic variables */ + do + { + /* find the smallest delta[i][j] */ + min_i = -1; + min_delta = CV_EMD_INF; + prev_u = &u_head; + for( cur_u = u_head.next; cur_u != 0; cur_u = cur_u->next ) + { + i = (int)(cur_u - u); + float *delta_row = delta[i]; + + prev_v = &v_head; + for( cur_v = v_head.next; cur_v != 0; cur_v = cur_v->next ) + { + j = (int)(cur_v - v); + if( min_delta > delta_row[j] ) + { + min_delta = delta_row[j]; + min_i = i; + min_j = j; + prev_u_min_i = prev_u; + prev_v_min_j = prev_v; + } + prev_v = cur_v; + } + prev_u = cur_u; + } + + if( min_i < 0 ) + break; + + /* add x[min_i][min_j] to the basis, and adjust supplies and cost */ + remember = prev_u_min_i->next; + icvAddBasicVariable( state, min_i, min_j, prev_u_min_i, prev_v_min_j, &u_head ); + + /* update the necessary delta[][] */ + if( remember == prev_u_min_i->next ) /* line min_i was deleted */ + { + for( cur_v = v_head.next; cur_v != 0; cur_v = cur_v->next ) + { + j = (int)(cur_v - v); + if( cur_v->val == cost[min_i][j] ) /* column j needs updating */ + { + float max_val = -CV_EMD_INF; + + /* find the new maximum value in the column */ + for( cur_u = u_head.next; cur_u != 0; cur_u = cur_u->next ) + { + float temp = cost[cur_u - u][j]; + + if( max_val < temp ) + max_val = temp; + } + + /* if needed, adjust the relevant delta[*][j] */ + diff = max_val - cur_v->val; + cur_v->val = max_val; + if( fabs( diff ) < eps ) + { + for( cur_u = u_head.next; cur_u != 0; cur_u = cur_u->next ) + delta[cur_u - u][j] += diff; + } + } + } + } + else /* column min_j was deleted */ + { + for( cur_u = u_head.next; cur_u != 0; cur_u = cur_u->next ) + { + i = (int)(cur_u - u); + if( cur_u->val == cost[i][min_j] ) /* row i needs updating */ + { + float max_val = -CV_EMD_INF; + + /* find the new maximum value in the row */ + for( cur_v = v_head.next; cur_v != 0; cur_v = cur_v->next ) + { + float temp = cost[i][cur_v - v]; + + if( max_val < temp ) + max_val = temp; + } + + /* if needed, adjust the relevant delta[i][*] */ + diff = max_val - cur_u->val; + cur_u->val = max_val; + + if( fabs( diff ) < eps ) + { + for( cur_v = v_head.next; cur_v != 0; cur_v = cur_v->next ) + delta[i][cur_v - v] += diff; + } + } + } + } + } + while( u_head.next != 0 || v_head.next != 0 ); +} + + + +/****************************************************************************************\ +* icvAddBasicVariable * +\****************************************************************************************/ +static void +icvAddBasicVariable( CvEMDState * state, + int min_i, int min_j, + CvNode1D * prev_u_min_i, CvNode1D * prev_v_min_j, CvNode1D * u_head ) +{ + float temp; + CvNode2D *end_x = state->end_x; + + if( state->s[min_i] < state->d[min_j] + state->weight * CV_EMD_EPS ) + { /* supply exhausted */ + temp = state->s[min_i]; + state->s[min_i] = 0; + state->d[min_j] -= temp; + } + else /* demand exhausted */ + { + temp = state->d[min_j]; + state->d[min_j] = 0; + state->s[min_i] -= temp; + } + + /* x(min_i,min_j) is a basic variable */ + state->is_x[min_i][min_j] = 1; + + end_x->val = temp; + end_x->i = min_i; + end_x->j = min_j; + end_x->next[0] = state->rows_x[min_i]; + end_x->next[1] = state->cols_x[min_j]; + state->rows_x[min_i] = end_x; + state->cols_x[min_j] = end_x; + state->end_x = end_x + 1; + + /* delete supply row only if the empty, and if not last row */ + if( state->s[min_i] == 0 && u_head->next->next != 0 ) + prev_u_min_i->next = prev_u_min_i->next->next; /* remove row from list */ + else + prev_v_min_j->next = prev_v_min_j->next->next; /* remove column from list */ +} + + +/****************************************************************************************\ +* standard metrics * +\****************************************************************************************/ +static float +icvDistL1( const float *x, const float *y, void *user_param ) +{ + int i, dims = (int)(size_t)user_param; + double s = 0; + + for( i = 0; i < dims; i++ ) + { + double t = x[i] - y[i]; + + s += fabs( t ); + } + return (float)s; +} + +static float +icvDistL2( const float *x, const float *y, void *user_param ) +{ + int i, dims = (int)(size_t)user_param; + double s = 0; + + for( i = 0; i < dims; i++ ) + { + double t = x[i] - y[i]; + + s += t * t; + } + return cvSqrt( (float)s ); +} + +static float +icvDistC( const float *x, const float *y, void *user_param ) +{ + int i, dims = (int)(size_t)user_param; + double s = 0; + + for( i = 0; i < dims; i++ ) + { + double t = fabs( x[i] - y[i] ); + + if( s < t ) + s = t; + } + return (float)s; +} + + +float cv::EMD( InputArray _signature1, InputArray _signature2, + int distType, InputArray _cost, + float* lowerBound, OutputArray _flow ) +{ + Mat signature1 = _signature1.getMat(), signature2 = _signature2.getMat(); + Mat cost = _cost.getMat(), flow; + + CvMat _csignature1 = signature1; + CvMat _csignature2 = signature2; + CvMat _ccost = cost, _cflow; + if( _flow.needed() ) + { + _flow.create(signature1.rows, signature2.rows, CV_32F); + flow = _flow.getMat(); + _cflow = flow; + } + + return cvCalcEMD2( &_csignature1, &_csignature2, distType, 0, cost.empty() ? 0 : &_ccost, + _flow.needed() ? &_cflow : 0, lowerBound, 0 ); +} + +/* End of file. */ diff --git a/opencv/imgproc/featureselect.cpp b/opencv/imgproc/featureselect.cpp new file mode 100644 index 0000000..d19b04d --- /dev/null +++ b/opencv/imgproc/featureselect.cpp @@ -0,0 +1,242 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" +#include +#include + +namespace cv +{ + +template struct greaterThanPtr +{ + bool operator()(const T* a, const T* b) const { return *a > *b; } +}; + +} + +void cv::goodFeaturesToTrack( InputArray _image, OutputArray _corners, + int maxCorners, double qualityLevel, double minDistance, + InputArray _mask, int blockSize, + bool useHarrisDetector, double harrisK ) +{ + Mat image = _image.getMat(), mask = _mask.getMat(); + + CV_Assert( qualityLevel > 0 && minDistance >= 0 && maxCorners >= 0 ); + CV_Assert( mask.empty() || (mask.type() == CV_8UC1 && mask.size() == image.size()) ); + + Mat eig, tmp; + if( useHarrisDetector ) + cornerHarris( image, eig, blockSize, 3, harrisK ); + else + cornerMinEigenVal( image, eig, blockSize, 3 ); + + double maxVal = 0; + minMaxLoc( eig, 0, &maxVal, 0, 0, mask ); + threshold( eig, eig, maxVal*qualityLevel, 0, THRESH_TOZERO ); + dilate( eig, tmp, Mat()); + + Size imgsize = image.size(); + + vector tmpCorners; + + // collect list of pointers to features - put them into temporary image + for( int y = 1; y < imgsize.height - 1; y++ ) + { + const float* eig_data = (const float*)eig.ptr(y); + const float* tmp_data = (const float*)tmp.ptr(y); + const uchar* mask_data = mask.data ? mask.ptr(y) : 0; + + for( int x = 1; x < imgsize.width - 1; x++ ) + { + float val = eig_data[x]; + if( val != 0 && val == tmp_data[x] && (!mask_data || mask_data[x]) ) + tmpCorners.push_back(eig_data + x); + } + } + + sort( tmpCorners, greaterThanPtr() ); + vector corners; + size_t i, j, total = tmpCorners.size(), ncorners = 0; + + if(minDistance >= 1) + { + // Partition the image into larger grids + int w = image.cols; + int h = image.rows; + + const int cell_size = cvRound(minDistance); + const int grid_width = (w + cell_size - 1) / cell_size; + const int grid_height = (h + cell_size - 1) / cell_size; + + std::vector > grid(grid_width*grid_height); + + minDistance *= minDistance; + + for( i = 0; i < total; i++ ) + { + int ofs = (int)((const uchar*)tmpCorners[i] - eig.data); + int y = (int)(ofs / eig.step); + int x = (int)((ofs - y*eig.step)/sizeof(float)); + + bool good = true; + + int x_cell = x / cell_size; + int y_cell = y / cell_size; + + int x1 = x_cell - 1; + int y1 = y_cell - 1; + int x2 = x_cell + 1; + int y2 = y_cell + 1; + + // boundary check + x1 = std::max(0, x1); + y1 = std::max(0, y1); + x2 = std::min(grid_width-1, x2); + y2 = std::min(grid_height-1, y2); + + for( int yy = y1; yy <= y2; yy++ ) + { + for( int xx = x1; xx <= x2; xx++ ) + { + vector &m = grid[yy*grid_width + xx]; + + if( m.size() ) + { + for(j = 0; j < m.size(); j++) + { + float dx = x - m[j].x; + float dy = y - m[j].y; + + if( dx*dx + dy*dy < minDistance ) + { + good = false; + goto break_out; + } + } + } + } + } + + break_out: + + if(good) + { + // printf("%d: %d %d -> %d %d, %d, %d -- %d %d %d %d, %d %d, c=%d\n", + // i,x, y, x_cell, y_cell, (int)minDistance, cell_size,x1,y1,x2,y2, grid_width,grid_height,c); + grid[y_cell*grid_width + x_cell].push_back(Point2f((float)x, (float)y)); + + corners.push_back(Point2f((float)x, (float)y)); + ++ncorners; + + if( maxCorners > 0 && (int)ncorners == maxCorners ) + break; + } + } + } + else + { + for( i = 0; i < total; i++ ) + { + int ofs = (int)((const uchar*)tmpCorners[i] - eig.data); + int y = (int)(ofs / eig.step); + int x = (int)((ofs - y*eig.step)/sizeof(float)); + + corners.push_back(Point2f((float)x, (float)y)); + ++ncorners; + if( maxCorners > 0 && (int)ncorners == maxCorners ) + break; + } + } + + Mat(corners).convertTo(_corners, _corners.fixedType() ? _corners.type() : CV_32F); + + /* + for( i = 0; i < total; i++ ) + { + int ofs = (int)((const uchar*)tmpCorners[i] - eig.data); + int y = (int)(ofs / eig.step); + int x = (int)((ofs - y*eig.step)/sizeof(float)); + + if( minDistance > 0 ) + { + for( j = 0; j < ncorners; j++ ) + { + float dx = x - corners[j].x; + float dy = y - corners[j].y; + if( dx*dx + dy*dy < minDistance ) + break; + } + if( j < ncorners ) + continue; + } + + corners.push_back(Point2f((float)x, (float)y)); + ++ncorners; + if( maxCorners > 0 && (int)ncorners == maxCorners ) + break; + } +*/ +} + +CV_IMPL void +cvGoodFeaturesToTrack( const void* _image, void*, void*, + CvPoint2D32f* _corners, int *_corner_count, + double quality_level, double min_distance, + const void* _maskImage, int block_size, + int use_harris, double harris_k ) +{ + cv::Mat image = cv::cvarrToMat(_image), mask; + cv::vector corners; + + if( _maskImage ) + mask = cv::cvarrToMat(_maskImage); + + CV_Assert( _corners && _corner_count ); + cv::goodFeaturesToTrack( image, corners, *_corner_count, quality_level, + min_distance, mask, block_size, use_harris != 0, harris_k ); + + size_t i, ncorners = corners.size(); + for( i = 0; i < ncorners; i++ ) + _corners[i] = corners[i]; + *_corner_count = (int)ncorners; +} + +/* End of file. */ diff --git a/opencv/imgproc/featuretree.cpp b/opencv/imgproc/featuretree.cpp new file mode 100644 index 0000000..2c96d7b --- /dev/null +++ b/opencv/imgproc/featuretree.cpp @@ -0,0 +1,64 @@ +//M*////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include "_featuretree.h" + +void cvReleaseFeatureTree(CvFeatureTree* tr) +{ + delete tr; +} + +// desc is m x d set of candidate points. +// results is m x k set of row indices of matching points. +// dist is m x k distance to matching points. +void cvFindFeatures(CvFeatureTree* tr, const CvMat* desc, + CvMat* results, CvMat* dist, int k, int emax) +{ + tr->FindFeatures(desc, k, emax, results, dist); +} + +int cvFindFeaturesBoxed(CvFeatureTree* tr, + CvMat* bounds_min, CvMat* bounds_max, + CvMat* results) +{ + return tr->FindOrthoRange(bounds_min, bounds_max, results); +} diff --git a/opencv/imgproc/filter.cpp b/opencv/imgproc/filter.cpp new file mode 100644 index 0000000..6a90d3e --- /dev/null +++ b/opencv/imgproc/filter.cpp @@ -0,0 +1,3063 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +/****************************************************************************************\ + Base Image Filter +\****************************************************************************************/ + +/* + Various border types, image boundaries are denoted with '|' + + * BORDER_REPLICATE: aaaaaa|abcdefgh|hhhhhhh + * BORDER_REFLECT: fedcba|abcdefgh|hgfedcb + * BORDER_REFLECT_101: gfedcb|abcdefgh|gfedcba + * BORDER_WRAP: cdefgh|abcdefgh|abcdefg + * BORDER_CONSTANT: iiiiii|abcdefgh|iiiiiii with some specified 'i' + */ +int cv::borderInterpolate( int p, int len, int borderType ) +{ + if( (unsigned)p < (unsigned)len ) + ; + else if( borderType == BORDER_REPLICATE ) + p = p < 0 ? 0 : len - 1; + else if( borderType == BORDER_REFLECT || borderType == BORDER_REFLECT_101 ) + { + int delta = borderType == BORDER_REFLECT_101; + if( len == 1 ) + return 0; + do + { + if( p < 0 ) + p = -p - 1 + delta; + else + p = len - 1 - (p - len) - delta; + } + while( (unsigned)p >= (unsigned)len ); + } + else if( borderType == BORDER_WRAP ) + { + if( p < 0 ) + p -= ((p-len+1)/len)*len; + if( p >= len ) + p %= len; + } + else if( borderType == BORDER_CONSTANT ) + p = -1; + else + CV_Error( CV_StsBadArg, "Unknown/unsupported border type" ); + return p; +} + + +namespace cv +{ + +BaseRowFilter::BaseRowFilter() { ksize = anchor = -1; } +BaseRowFilter::~BaseRowFilter() {} + +BaseColumnFilter::BaseColumnFilter() { ksize = anchor = -1; } +BaseColumnFilter::~BaseColumnFilter() {} +void BaseColumnFilter::reset() {} + +BaseFilter::BaseFilter() { ksize = Size(-1,-1); anchor = Point(-1,-1); } +BaseFilter::~BaseFilter() {} +void BaseFilter::reset() {} + +FilterEngine::FilterEngine() +{ + srcType = dstType = bufType = -1; + rowBorderType = columnBorderType = BORDER_REPLICATE; + bufStep = startY = startY0 = endY = rowCount = dstY = 0; + maxWidth = 0; + + wholeSize = Size(-1,-1); +} + + +FilterEngine::FilterEngine( const Ptr& _filter2D, + const Ptr& _rowFilter, + const Ptr& _columnFilter, + int _srcType, int _dstType, int _bufType, + int _rowBorderType, int _columnBorderType, + const Scalar& _borderValue ) +{ + init(_filter2D, _rowFilter, _columnFilter, _srcType, _dstType, _bufType, + _rowBorderType, _columnBorderType, _borderValue); +} + +FilterEngine::~FilterEngine() +{ +} + + +void FilterEngine::init( const Ptr& _filter2D, + const Ptr& _rowFilter, + const Ptr& _columnFilter, + int _srcType, int _dstType, int _bufType, + int _rowBorderType, int _columnBorderType, + const Scalar& _borderValue ) +{ + _srcType = CV_MAT_TYPE(_srcType); + _bufType = CV_MAT_TYPE(_bufType); + _dstType = CV_MAT_TYPE(_dstType); + + srcType = _srcType; + int srcElemSize = (int)getElemSize(srcType); + dstType = _dstType; + bufType = _bufType; + + filter2D = _filter2D; + rowFilter = _rowFilter; + columnFilter = _columnFilter; + + if( _columnBorderType < 0 ) + _columnBorderType = _rowBorderType; + + rowBorderType = _rowBorderType; + columnBorderType = _columnBorderType; + + CV_Assert( columnBorderType != BORDER_WRAP ); + + if( isSeparable() ) + { + CV_Assert( !rowFilter.empty() && !columnFilter.empty() ); + ksize = Size(rowFilter->ksize, columnFilter->ksize); + anchor = Point(rowFilter->anchor, columnFilter->anchor); + } + else + { + CV_Assert( bufType == srcType ); + ksize = filter2D->ksize; + anchor = filter2D->anchor; + } + + CV_Assert( 0 <= anchor.x && anchor.x < ksize.width && + 0 <= anchor.y && anchor.y < ksize.height ); + + borderElemSize = srcElemSize/(CV_MAT_DEPTH(srcType) >= CV_32S ? sizeof(int) : 1); + int borderLength = std::max(ksize.width - 1, 1); + borderTab.resize(borderLength*borderElemSize); + + maxWidth = bufStep = 0; + constBorderRow.clear(); + + if( rowBorderType == BORDER_CONSTANT || columnBorderType == BORDER_CONSTANT ) + { + constBorderValue.resize(srcElemSize*borderLength); + scalarToRawData(_borderValue, &constBorderValue[0], srcType, + borderLength*CV_MAT_CN(srcType)); + } + + wholeSize = Size(-1,-1); +} + +static const int VEC_ALIGN = CV_MALLOC_ALIGN; + +int FilterEngine::start(Size _wholeSize, Rect _roi, int _maxBufRows) +{ + int i, j; + + wholeSize = _wholeSize; + roi = _roi; + CV_Assert( roi.x >= 0 && roi.y >= 0 && roi.width >= 0 && roi.height >= 0 && + roi.x + roi.width <= wholeSize.width && + roi.y + roi.height <= wholeSize.height ); + + int esz = (int)getElemSize(srcType); + int bufElemSize = (int)getElemSize(bufType); + const uchar* constVal = !constBorderValue.empty() ? &constBorderValue[0] : 0; + + if( _maxBufRows < 0 ) + _maxBufRows = ksize.height + 3; + _maxBufRows = std::max(_maxBufRows, std::max(anchor.y, ksize.height-anchor.y-1)*2+1); + + if( maxWidth < roi.width || _maxBufRows != (int)rows.size() ) + { + rows.resize(_maxBufRows); + maxWidth = std::max(maxWidth, roi.width); + int cn = CV_MAT_CN(srcType); + srcRow.resize(esz*(maxWidth + ksize.width - 1)); + if( columnBorderType == BORDER_CONSTANT ) + { + constBorderRow.resize(getElemSize(bufType)*(maxWidth + ksize.width - 1 + VEC_ALIGN)); + uchar *dst = alignPtr(&constBorderRow[0], VEC_ALIGN), *tdst; + int n = (int)constBorderValue.size(), N; + N = (maxWidth + ksize.width - 1)*esz; + tdst = isSeparable() ? &srcRow[0] : dst; + + for( i = 0; i < N; i += n ) + { + n = std::min( n, N - i ); + for(j = 0; j < n; j++) + tdst[i+j] = constVal[j]; + } + + if( isSeparable() ) + (*rowFilter)(&srcRow[0], dst, maxWidth, cn); + } + + int maxBufStep = bufElemSize*(int)alignSize(maxWidth + + (!isSeparable() ? ksize.width - 1 : 0),VEC_ALIGN); + ringBuf.resize(maxBufStep*rows.size()+VEC_ALIGN); + } + + // adjust bufstep so that the used part of the ring buffer stays compact in memory + bufStep = bufElemSize*(int)alignSize(roi.width + (!isSeparable() ? ksize.width - 1 : 0),16); + + dx1 = std::max(anchor.x - roi.x, 0); + dx2 = std::max(ksize.width - anchor.x - 1 + roi.x + roi.width - wholeSize.width, 0); + + // recompute border tables + if( dx1 > 0 || dx2 > 0 ) + { + if( rowBorderType == BORDER_CONSTANT ) + { + int nr = isSeparable() ? 1 : (int)rows.size(); + for( i = 0; i < nr; i++ ) + { + uchar* dst = isSeparable() ? &srcRow[0] : alignPtr(&ringBuf[0],VEC_ALIGN) + bufStep*i; + memcpy( dst, constVal, dx1*esz ); + memcpy( dst + (roi.width + ksize.width - 1 - dx2)*esz, constVal, dx2*esz ); + } + } + else + { + int xofs1 = std::min(roi.x, anchor.x) - roi.x; + + int btab_esz = borderElemSize, wholeWidth = wholeSize.width; + int* btab = (int*)&borderTab[0]; + + for( i = 0; i < dx1; i++ ) + { + int p0 = (borderInterpolate(i-dx1, wholeWidth, rowBorderType) + xofs1)*btab_esz; + for( j = 0; j < btab_esz; j++ ) + btab[i*btab_esz + j] = p0 + j; + } + + for( i = 0; i < dx2; i++ ) + { + int p0 = (borderInterpolate(wholeWidth + i, wholeWidth, rowBorderType) + xofs1)*btab_esz; + for( j = 0; j < btab_esz; j++ ) + btab[(i + dx1)*btab_esz + j] = p0 + j; + } + } + } + + rowCount = dstY = 0; + startY = startY0 = std::max(roi.y - anchor.y, 0); + endY = std::min(roi.y + roi.height + ksize.height - anchor.y - 1, wholeSize.height); + if( !columnFilter.empty() ) + columnFilter->reset(); + if( !filter2D.empty() ) + filter2D->reset(); + + return startY; +} + + +int FilterEngine::start(const Mat& src, const Rect& _srcRoi, + bool isolated, int maxBufRows) +{ + Rect srcRoi = _srcRoi; + + if( srcRoi == Rect(0,0,-1,-1) ) + srcRoi = Rect(0,0,src.cols,src.rows); + + CV_Assert( srcRoi.x >= 0 && srcRoi.y >= 0 && + srcRoi.width >= 0 && srcRoi.height >= 0 && + srcRoi.x + srcRoi.width <= src.cols && + srcRoi.y + srcRoi.height <= src.rows ); + + Point ofs; + Size wholeSize(src.cols, src.rows); + if( !isolated ) + src.locateROI( wholeSize, ofs ); + start( wholeSize, srcRoi + ofs, maxBufRows ); + + return startY - ofs.y; +} + + +int FilterEngine::remainingInputRows() const +{ + return endY - startY - rowCount; +} + +int FilterEngine::remainingOutputRows() const +{ + return roi.height - dstY; +} + +int FilterEngine::proceed( const uchar* src, int srcstep, int count, + uchar* dst, int dststep ) +{ + CV_Assert( wholeSize.width > 0 && wholeSize.height > 0 ); + + const int *btab = &borderTab[0]; + int esz = (int)getElemSize(srcType), btab_esz = borderElemSize; + uchar** brows = &rows[0]; + int bufRows = (int)rows.size(); + int cn = CV_MAT_CN(bufType); + int width = roi.width, kwidth = ksize.width; + int kheight = ksize.height, ay = anchor.y; + int _dx1 = dx1, _dx2 = dx2; + int width1 = roi.width + kwidth - 1; + int xofs1 = std::min(roi.x, anchor.x); + bool isSep = isSeparable(); + bool makeBorder = (_dx1 > 0 || _dx2 > 0) && rowBorderType != BORDER_CONSTANT; + int dy = 0, i = 0; + + src -= xofs1*esz; + count = std::min(count, remainingInputRows()); + + CV_Assert( src && dst && count > 0 ); + + for(;; dst += dststep*i, dy += i) + { + int dcount = bufRows - ay - startY - rowCount + roi.y; + dcount = dcount > 0 ? dcount : bufRows - kheight + 1; + dcount = std::min(dcount, count); + count -= dcount; + for( ; dcount-- > 0; src += srcstep ) + { + int bi = (startY - startY0 + rowCount) % bufRows; + uchar* brow = alignPtr(&ringBuf[0], VEC_ALIGN) + bi*bufStep; + uchar* row = isSep ? &srcRow[0] : brow; + + if( ++rowCount > bufRows ) + { + --rowCount; + ++startY; + } + + memcpy( row + _dx1*esz, src, (width1 - _dx2 - _dx1)*esz ); + + if( makeBorder ) + { + if( btab_esz*(int)sizeof(int) == esz ) + { + const int* isrc = (const int*)src; + int* irow = (int*)row; + + for( i = 0; i < _dx1*btab_esz; i++ ) + irow[i] = isrc[btab[i]]; + for( i = 0; i < _dx2*btab_esz; i++ ) + irow[i + (width1 - _dx2)*btab_esz] = isrc[btab[i+_dx1*btab_esz]]; + } + else + { + for( i = 0; i < _dx1*esz; i++ ) + row[i] = src[btab[i]]; + for( i = 0; i < _dx2*esz; i++ ) + row[i + (width1 - _dx2)*esz] = src[btab[i+_dx1*esz]]; + } + } + + if( isSep ) + (*rowFilter)(row, brow, width, CV_MAT_CN(srcType)); + } + + int max_i = std::min(bufRows, roi.height - (dstY + dy) + (kheight - 1)); + for( i = 0; i < max_i; i++ ) + { + int srcY = borderInterpolate(dstY + dy + i + roi.y - ay, + wholeSize.height, columnBorderType); + if( srcY < 0 ) // can happen only with constant border type + brows[i] = alignPtr(&constBorderRow[0], VEC_ALIGN); + else + { + CV_Assert( srcY >= startY ); + if( srcY >= startY + rowCount ) + break; + int bi = (srcY - startY0) % bufRows; + brows[i] = alignPtr(&ringBuf[0], VEC_ALIGN) + bi*bufStep; + } + } + if( i < kheight ) + break; + i -= kheight - 1; + if( isSeparable() ) + (*columnFilter)((const uchar**)brows, dst, dststep, i, roi.width*cn); + else + (*filter2D)((const uchar**)brows, dst, dststep, i, roi.width, cn); + } + + dstY += dy; + CV_Assert( dstY <= roi.height ); + return dy; +} + + +void FilterEngine::apply(const Mat& src, Mat& dst, + const Rect& _srcRoi, Point dstOfs, bool isolated) +{ + CV_Assert( src.type() == srcType && dst.type() == dstType ); + + Rect srcRoi = _srcRoi; + if( srcRoi == Rect(0,0,-1,-1) ) + srcRoi = Rect(0,0,src.cols,src.rows); + + if( srcRoi.area() == 0 ) + return; + + CV_Assert( dstOfs.x >= 0 && dstOfs.y >= 0 && + dstOfs.x + srcRoi.width <= dst.cols && + dstOfs.y + srcRoi.height <= dst.rows ); + + int y = start(src, srcRoi, isolated); + proceed( src.data + y*src.step, (int)src.step, endY - startY, + dst.data + dstOfs.y*dst.step + dstOfs.x*dst.elemSize(), (int)dst.step ); +} + +} + +/****************************************************************************************\ +* Separable linear filter * +\****************************************************************************************/ + +int cv::getKernelType(InputArray filter_kernel, Point anchor) +{ + Mat _kernel = filter_kernel.getMat(); + CV_Assert( _kernel.channels() == 1 ); + int i, sz = _kernel.rows*_kernel.cols; + + Mat kernel; + _kernel.convertTo(kernel, CV_64F); + + const double* coeffs = (double*)kernel.data; + double sum = 0; + int type = KERNEL_SMOOTH + KERNEL_INTEGER; + if( (_kernel.rows == 1 || _kernel.cols == 1) && + anchor.x*2 + 1 == _kernel.cols && + anchor.y*2 + 1 == _kernel.rows ) + type |= (KERNEL_SYMMETRICAL + KERNEL_ASYMMETRICAL); + + for( i = 0; i < sz; i++ ) + { + double a = coeffs[i], b = coeffs[sz - i - 1]; + if( a != b ) + type &= ~KERNEL_SYMMETRICAL; + if( a != -b ) + type &= ~KERNEL_ASYMMETRICAL; + if( a < 0 ) + type &= ~KERNEL_SMOOTH; + if( a != saturate_cast(a) ) + type &= ~KERNEL_INTEGER; + sum += a; + } + + if( fabs(sum - 1) > FLT_EPSILON*(fabs(sum) + 1) ) + type &= ~KERNEL_SMOOTH; + return type; +} + + +namespace cv +{ + +struct RowNoVec +{ + RowNoVec() {} + RowNoVec(const Mat&) {} + int operator()(const uchar*, uchar*, int, int) const { return 0; } +}; + +struct ColumnNoVec +{ + ColumnNoVec() {} + ColumnNoVec(const Mat&, int, int, double) {} + int operator()(const uchar**, uchar*, int) const { return 0; } +}; + +struct SymmRowSmallNoVec +{ + SymmRowSmallNoVec() {} + SymmRowSmallNoVec(const Mat&, int) {} + int operator()(const uchar*, uchar*, int, int) const { return 0; } +}; + +struct SymmColumnSmallNoVec +{ + SymmColumnSmallNoVec() {} + SymmColumnSmallNoVec(const Mat&, int, int, double) {} + int operator()(const uchar**, uchar*, int) const { return 0; } +}; + +struct FilterNoVec +{ + FilterNoVec() {} + FilterNoVec(const Mat&, int, double) {} + int operator()(const uchar**, uchar*, int) const { return 0; } +}; + + +#if CV_SSE2 + +///////////////////////////////////// 8u-16s & 8u-8u ////////////////////////////////// + +struct RowVec_8u32s +{ + RowVec_8u32s() { smallValues = false; } + RowVec_8u32s( const Mat& _kernel ) + { + kernel = _kernel; + smallValues = true; + int k, ksize = kernel.rows + kernel.cols - 1; + for( k = 0; k < ksize; k++ ) + { + int v = ((const int*)kernel.data)[k]; + if( v < SHRT_MIN || v > SHRT_MAX ) + { + smallValues = false; + break; + } + } + } + + int operator()(const uchar* _src, uchar* _dst, int width, int cn) const + { + if( !checkHardwareSupport(CV_CPU_SSE2) ) + return 0; + + int i = 0, k, _ksize = kernel.rows + kernel.cols - 1; + int* dst = (int*)_dst; + const int* _kx = (const int*)kernel.data; + width *= cn; + + if( smallValues ) + { + for( ; i <= width - 16; i += 16 ) + { + const uchar* src = _src + i; + __m128i f, z = _mm_setzero_si128(), s0 = z, s1 = z, s2 = z, s3 = z; + __m128i x0, x1, x2, x3; + + for( k = 0; k < _ksize; k++, src += cn ) + { + f = _mm_cvtsi32_si128(_kx[k]); + f = _mm_shuffle_epi32(f, 0); + f = _mm_packs_epi32(f, f); + + x0 = _mm_loadu_si128((const __m128i*)src); + x2 = _mm_unpackhi_epi8(x0, z); + x0 = _mm_unpacklo_epi8(x0, z); + x1 = _mm_mulhi_epi16(x0, f); + x3 = _mm_mulhi_epi16(x2, f); + x0 = _mm_mullo_epi16(x0, f); + x2 = _mm_mullo_epi16(x2, f); + + s0 = _mm_add_epi32(s0, _mm_unpacklo_epi16(x0, x1)); + s1 = _mm_add_epi32(s1, _mm_unpackhi_epi16(x0, x1)); + s2 = _mm_add_epi32(s2, _mm_unpacklo_epi16(x2, x3)); + s3 = _mm_add_epi32(s3, _mm_unpackhi_epi16(x2, x3)); + } + + _mm_store_si128((__m128i*)(dst + i), s0); + _mm_store_si128((__m128i*)(dst + i + 4), s1); + _mm_store_si128((__m128i*)(dst + i + 8), s2); + _mm_store_si128((__m128i*)(dst + i + 12), s3); + } + + for( ; i <= width - 4; i += 4 ) + { + const uchar* src = _src + i; + __m128i f, z = _mm_setzero_si128(), s0 = z, x0, x1; + + for( k = 0; k < _ksize; k++, src += cn ) + { + f = _mm_cvtsi32_si128(_kx[k]); + f = _mm_shuffle_epi32(f, 0); + f = _mm_packs_epi32(f, f); + + x0 = _mm_cvtsi32_si128(*(const int*)src); + x0 = _mm_unpacklo_epi8(x0, z); + x1 = _mm_mulhi_epi16(x0, f); + x0 = _mm_mullo_epi16(x0, f); + s0 = _mm_add_epi32(s0, _mm_unpacklo_epi16(x0, x1)); + } + _mm_store_si128((__m128i*)(dst + i), s0); + } + } + return i; + } + + Mat kernel; + bool smallValues; +}; + + +struct SymmRowSmallVec_8u32s +{ + SymmRowSmallVec_8u32s() { smallValues = false; } + SymmRowSmallVec_8u32s( const Mat& _kernel, int _symmetryType ) + { + kernel = _kernel; + symmetryType = _symmetryType; + smallValues = true; + int k, ksize = kernel.rows + kernel.cols - 1; + for( k = 0; k < ksize; k++ ) + { + int v = ((const int*)kernel.data)[k]; + if( v < SHRT_MIN || v > SHRT_MAX ) + { + smallValues = false; + break; + } + } + } + + int operator()(const uchar* src, uchar* _dst, int width, int cn) const + { + if( !checkHardwareSupport(CV_CPU_SSE2) ) + return 0; + + int i = 0, j, k, _ksize = kernel.rows + kernel.cols - 1; + int* dst = (int*)_dst; + bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0; + const int* kx = (const int*)kernel.data + _ksize/2; + if( !smallValues ) + return 0; + + src += (_ksize/2)*cn; + width *= cn; + + __m128i z = _mm_setzero_si128(); + if( symmetrical ) + { + if( _ksize == 1 ) + return 0; + if( _ksize == 3 ) + { + if( kx[0] == 2 && kx[1] == 1 ) + for( ; i <= width - 16; i += 16, src += 16 ) + { + __m128i x0, x1, x2, y0, y1, y2; + x0 = _mm_loadu_si128((__m128i*)(src - cn)); + x1 = _mm_loadu_si128((__m128i*)src); + x2 = _mm_loadu_si128((__m128i*)(src + cn)); + y0 = _mm_unpackhi_epi8(x0, z); + x0 = _mm_unpacklo_epi8(x0, z); + y1 = _mm_unpackhi_epi8(x1, z); + x1 = _mm_unpacklo_epi8(x1, z); + y2 = _mm_unpackhi_epi8(x2, z); + x2 = _mm_unpacklo_epi8(x2, z); + x0 = _mm_add_epi16(x0, _mm_add_epi16(_mm_add_epi16(x1, x1), x2)); + y0 = _mm_add_epi16(y0, _mm_add_epi16(_mm_add_epi16(y1, y1), y2)); + _mm_store_si128((__m128i*)(dst + i), _mm_unpacklo_epi16(x0, z)); + _mm_store_si128((__m128i*)(dst + i + 4), _mm_unpackhi_epi16(x0, z)); + _mm_store_si128((__m128i*)(dst + i + 8), _mm_unpacklo_epi16(y0, z)); + _mm_store_si128((__m128i*)(dst + i + 12), _mm_unpackhi_epi16(y0, z)); + } + else if( kx[0] == -2 && kx[1] == 1 ) + for( ; i <= width - 16; i += 16, src += 16 ) + { + __m128i x0, x1, x2, y0, y1, y2; + x0 = _mm_loadu_si128((__m128i*)(src - cn)); + x1 = _mm_loadu_si128((__m128i*)src); + x2 = _mm_loadu_si128((__m128i*)(src + cn)); + y0 = _mm_unpackhi_epi8(x0, z); + x0 = _mm_unpacklo_epi8(x0, z); + y1 = _mm_unpackhi_epi8(x1, z); + x1 = _mm_unpacklo_epi8(x1, z); + y2 = _mm_unpackhi_epi8(x2, z); + x2 = _mm_unpacklo_epi8(x2, z); + x0 = _mm_add_epi16(x0, _mm_sub_epi16(x2, _mm_add_epi16(x1, x1))); + y0 = _mm_add_epi16(y0, _mm_sub_epi16(y2, _mm_add_epi16(y1, y1))); + _mm_store_si128((__m128i*)(dst + i), _mm_srai_epi32(_mm_unpacklo_epi16(x0, x0),16)); + _mm_store_si128((__m128i*)(dst + i + 4), _mm_srai_epi32(_mm_unpackhi_epi16(x0, x0),16)); + _mm_store_si128((__m128i*)(dst + i + 8), _mm_srai_epi32(_mm_unpacklo_epi16(y0, y0),16)); + _mm_store_si128((__m128i*)(dst + i + 12), _mm_srai_epi32(_mm_unpackhi_epi16(y0, y0),16)); + } + else + { + __m128i k0 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[0]), 0), + k1 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[1]), 0); + k0 = _mm_packs_epi32(k0, k0); + k1 = _mm_packs_epi32(k1, k1); + + for( ; i <= width - 16; i += 16, src += 16 ) + { + __m128i x0, x1, x2, y0, y1, t0, t1, z0, z1, z2, z3; + x0 = _mm_loadu_si128((__m128i*)(src - cn)); + x1 = _mm_loadu_si128((__m128i*)src); + x2 = _mm_loadu_si128((__m128i*)(src + cn)); + y0 = _mm_add_epi16(_mm_unpackhi_epi8(x0, z), _mm_unpackhi_epi8(x2, z)); + x0 = _mm_add_epi16(_mm_unpacklo_epi8(x0, z), _mm_unpacklo_epi8(x2, z)); + y1 = _mm_unpackhi_epi8(x1, z); + x1 = _mm_unpacklo_epi8(x1, z); + + t1 = _mm_mulhi_epi16(x1, k0); + t0 = _mm_mullo_epi16(x1, k0); + x2 = _mm_mulhi_epi16(x0, k1); + x0 = _mm_mullo_epi16(x0, k1); + z0 = _mm_unpacklo_epi16(t0, t1); + z1 = _mm_unpackhi_epi16(t0, t1); + z0 = _mm_add_epi32(z0, _mm_unpacklo_epi16(x0, x2)); + z1 = _mm_add_epi32(z1, _mm_unpackhi_epi16(x0, x2)); + + t1 = _mm_mulhi_epi16(y1, k0); + t0 = _mm_mullo_epi16(y1, k0); + y1 = _mm_mulhi_epi16(y0, k1); + y0 = _mm_mullo_epi16(y0, k1); + z2 = _mm_unpacklo_epi16(t0, t1); + z3 = _mm_unpackhi_epi16(t0, t1); + z2 = _mm_add_epi32(z2, _mm_unpacklo_epi16(y0, y1)); + z3 = _mm_add_epi32(z3, _mm_unpackhi_epi16(y0, y1)); + _mm_store_si128((__m128i*)(dst + i), z0); + _mm_store_si128((__m128i*)(dst + i + 4), z1); + _mm_store_si128((__m128i*)(dst + i + 8), z2); + _mm_store_si128((__m128i*)(dst + i + 12), z3); + } + } + } + else if( _ksize == 5 ) + { + if( kx[0] == -2 && kx[1] == 0 && kx[2] == 1 ) + for( ; i <= width - 16; i += 16, src += 16 ) + { + __m128i x0, x1, x2, y0, y1, y2; + x0 = _mm_loadu_si128((__m128i*)(src - cn*2)); + x1 = _mm_loadu_si128((__m128i*)src); + x2 = _mm_loadu_si128((__m128i*)(src + cn*2)); + y0 = _mm_unpackhi_epi8(x0, z); + x0 = _mm_unpacklo_epi8(x0, z); + y1 = _mm_unpackhi_epi8(x1, z); + x1 = _mm_unpacklo_epi8(x1, z); + y2 = _mm_unpackhi_epi8(x2, z); + x2 = _mm_unpacklo_epi8(x2, z); + x0 = _mm_add_epi16(x0, _mm_sub_epi16(x2, _mm_add_epi16(x1, x1))); + y0 = _mm_add_epi16(y0, _mm_sub_epi16(y2, _mm_add_epi16(y1, y1))); + _mm_store_si128((__m128i*)(dst + i), _mm_srai_epi32(_mm_unpacklo_epi16(x0, x0),16)); + _mm_store_si128((__m128i*)(dst + i + 4), _mm_srai_epi32(_mm_unpackhi_epi16(x0, x0),16)); + _mm_store_si128((__m128i*)(dst + i + 8), _mm_srai_epi32(_mm_unpacklo_epi16(y0, y0),16)); + _mm_store_si128((__m128i*)(dst + i + 12), _mm_srai_epi32(_mm_unpackhi_epi16(y0, y0),16)); + } + else + { + __m128i k0 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[0]), 0), + k1 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[1]), 0), + k2 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[2]), 0); + k0 = _mm_packs_epi32(k0, k0); + k1 = _mm_packs_epi32(k1, k1); + k2 = _mm_packs_epi32(k2, k2); + + for( ; i <= width - 16; i += 16, src += 16 ) + { + __m128i x0, x1, x2, y0, y1, t0, t1, z0, z1, z2, z3; + x0 = _mm_loadu_si128((__m128i*)(src - cn)); + x1 = _mm_loadu_si128((__m128i*)src); + x2 = _mm_loadu_si128((__m128i*)(src + cn)); + y0 = _mm_add_epi16(_mm_unpackhi_epi8(x0, z), _mm_unpackhi_epi8(x2, z)); + x0 = _mm_add_epi16(_mm_unpacklo_epi8(x0, z), _mm_unpacklo_epi8(x2, z)); + y1 = _mm_unpackhi_epi8(x1, z); + x1 = _mm_unpacklo_epi8(x1, z); + + t1 = _mm_mulhi_epi16(x1, k0); + t0 = _mm_mullo_epi16(x1, k0); + x2 = _mm_mulhi_epi16(x0, k1); + x0 = _mm_mullo_epi16(x0, k1); + z0 = _mm_unpacklo_epi16(t0, t1); + z1 = _mm_unpackhi_epi16(t0, t1); + z0 = _mm_add_epi32(z0, _mm_unpacklo_epi16(x0, x2)); + z1 = _mm_add_epi32(z1, _mm_unpackhi_epi16(x0, x2)); + + t1 = _mm_mulhi_epi16(y1, k0); + t0 = _mm_mullo_epi16(y1, k0); + y1 = _mm_mulhi_epi16(y0, k1); + y0 = _mm_mullo_epi16(y0, k1); + z2 = _mm_unpacklo_epi16(t0, t1); + z3 = _mm_unpackhi_epi16(t0, t1); + z2 = _mm_add_epi32(z2, _mm_unpacklo_epi16(y0, y1)); + z3 = _mm_add_epi32(z3, _mm_unpackhi_epi16(y0, y1)); + + x0 = _mm_loadu_si128((__m128i*)(src - cn*2)); + x1 = _mm_loadu_si128((__m128i*)(src + cn*2)); + y1 = _mm_add_epi16(_mm_unpackhi_epi8(x0, z), _mm_unpackhi_epi8(x1, z)); + y0 = _mm_add_epi16(_mm_unpacklo_epi8(x0, z), _mm_unpacklo_epi8(x1, z)); + + t1 = _mm_mulhi_epi16(y0, k2); + t0 = _mm_mullo_epi16(y0, k2); + y0 = _mm_mullo_epi16(y1, k2); + y1 = _mm_mulhi_epi16(y1, k2); + z0 = _mm_add_epi32(z0, _mm_unpacklo_epi16(t0, t1)); + z1 = _mm_add_epi32(z1, _mm_unpackhi_epi16(t0, t1)); + z2 = _mm_add_epi32(z2, _mm_unpacklo_epi16(y0, y1)); + z3 = _mm_add_epi32(z3, _mm_unpackhi_epi16(y0, y1)); + + _mm_store_si128((__m128i*)(dst + i), z0); + _mm_store_si128((__m128i*)(dst + i + 4), z1); + _mm_store_si128((__m128i*)(dst + i + 8), z2); + _mm_store_si128((__m128i*)(dst + i + 12), z3); + } + } + } + } + else + { + if( _ksize == 3 ) + { + if( kx[0] == 0 && kx[1] == 1 ) + for( ; i <= width - 16; i += 16, src += 16 ) + { + __m128i x0, x1, y0; + x0 = _mm_loadu_si128((__m128i*)(src + cn)); + x1 = _mm_loadu_si128((__m128i*)(src - cn)); + y0 = _mm_sub_epi16(_mm_unpackhi_epi8(x0, z), _mm_unpackhi_epi8(x1, z)); + x0 = _mm_sub_epi16(_mm_unpacklo_epi8(x0, z), _mm_unpacklo_epi8(x1, z)); + _mm_store_si128((__m128i*)(dst + i), _mm_srai_epi32(_mm_unpacklo_epi16(x0, x0),16)); + _mm_store_si128((__m128i*)(dst + i + 4), _mm_srai_epi32(_mm_unpackhi_epi16(x0, x0),16)); + _mm_store_si128((__m128i*)(dst + i + 8), _mm_srai_epi32(_mm_unpacklo_epi16(y0, y0),16)); + _mm_store_si128((__m128i*)(dst + i + 12), _mm_srai_epi32(_mm_unpackhi_epi16(y0, y0),16)); + } + else + { + __m128i k1 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[1]), 0); + k1 = _mm_packs_epi32(k1, k1); + + for( ; i <= width - 16; i += 16, src += 16 ) + { + __m128i x0, x1, y0, y1, z0, z1, z2, z3; + x0 = _mm_loadu_si128((__m128i*)(src + cn)); + x1 = _mm_loadu_si128((__m128i*)(src - cn)); + y0 = _mm_sub_epi16(_mm_unpackhi_epi8(x0, z), _mm_unpackhi_epi8(x1, z)); + x0 = _mm_sub_epi16(_mm_unpacklo_epi8(x0, z), _mm_unpacklo_epi8(x1, z)); + + x1 = _mm_mulhi_epi16(x0, k1); + x0 = _mm_mullo_epi16(x0, k1); + z0 = _mm_unpacklo_epi16(x0, x1); + z1 = _mm_unpackhi_epi16(x0, x1); + + y1 = _mm_mulhi_epi16(y0, k1); + y0 = _mm_mullo_epi16(y0, k1); + z2 = _mm_unpacklo_epi16(y0, y1); + z3 = _mm_unpackhi_epi16(y0, y1); + _mm_store_si128((__m128i*)(dst + i), z0); + _mm_store_si128((__m128i*)(dst + i + 4), z1); + _mm_store_si128((__m128i*)(dst + i + 8), z2); + _mm_store_si128((__m128i*)(dst + i + 12), z3); + } + } + } + else if( _ksize == 5 ) + { + __m128i k0 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[0]), 0), + k1 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[1]), 0), + k2 = _mm_shuffle_epi32(_mm_cvtsi32_si128(kx[2]), 0); + k0 = _mm_packs_epi32(k0, k0); + k1 = _mm_packs_epi32(k1, k1); + k2 = _mm_packs_epi32(k2, k2); + + for( ; i <= width - 16; i += 16, src += 16 ) + { + __m128i x0, x1, x2, y0, y1, t0, t1, z0, z1, z2, z3; + x0 = _mm_loadu_si128((__m128i*)(src + cn)); + x2 = _mm_loadu_si128((__m128i*)(src - cn)); + y0 = _mm_sub_epi16(_mm_unpackhi_epi8(x0, z), _mm_unpackhi_epi8(x2, z)); + x0 = _mm_sub_epi16(_mm_unpacklo_epi8(x0, z), _mm_unpacklo_epi8(x2, z)); + + x2 = _mm_mulhi_epi16(x0, k1); + x0 = _mm_mullo_epi16(x0, k1); + z0 = _mm_unpacklo_epi16(x0, x2); + z1 = _mm_unpackhi_epi16(x0, x2); + y1 = _mm_mulhi_epi16(y0, k1); + y0 = _mm_mullo_epi16(y0, k1); + z2 = _mm_unpacklo_epi16(y0, y1); + z3 = _mm_unpackhi_epi16(y0, y1); + + x0 = _mm_loadu_si128((__m128i*)(src + cn*2)); + x1 = _mm_loadu_si128((__m128i*)(src - cn*2)); + y1 = _mm_sub_epi16(_mm_unpackhi_epi8(x0, z), _mm_unpackhi_epi8(x1, z)); + y0 = _mm_sub_epi16(_mm_unpacklo_epi8(x0, z), _mm_unpacklo_epi8(x1, z)); + + t1 = _mm_mulhi_epi16(y0, k2); + t0 = _mm_mullo_epi16(y0, k2); + y0 = _mm_mullo_epi16(y1, k2); + y1 = _mm_mulhi_epi16(y1, k2); + z0 = _mm_add_epi32(z0, _mm_unpacklo_epi16(t0, t1)); + z1 = _mm_add_epi32(z1, _mm_unpackhi_epi16(t0, t1)); + z2 = _mm_add_epi32(z2, _mm_unpacklo_epi16(y0, y1)); + z3 = _mm_add_epi32(z3, _mm_unpackhi_epi16(y0, y1)); + + _mm_store_si128((__m128i*)(dst + i), z0); + _mm_store_si128((__m128i*)(dst + i + 4), z1); + _mm_store_si128((__m128i*)(dst + i + 8), z2); + _mm_store_si128((__m128i*)(dst + i + 12), z3); + } + } + } + + src -= (_ksize/2)*cn; + kx -= _ksize/2; + for( ; i <= width - 4; i += 4, src += 4 ) + { + __m128i f, s0 = z, x0, x1; + + for( k = j = 0; k < _ksize; k++, j += cn ) + { + f = _mm_cvtsi32_si128(kx[k]); + f = _mm_shuffle_epi32(f, 0); + f = _mm_packs_epi32(f, f); + + x0 = _mm_cvtsi32_si128(*(const int*)(src + j)); + x0 = _mm_unpacklo_epi8(x0, z); + x1 = _mm_mulhi_epi16(x0, f); + x0 = _mm_mullo_epi16(x0, f); + s0 = _mm_add_epi32(s0, _mm_unpacklo_epi16(x0, x1)); + } + _mm_store_si128((__m128i*)(dst + i), s0); + } + + return i; + } + + Mat kernel; + int symmetryType; + bool smallValues; +}; + + +struct SymmColumnVec_32s8u +{ + SymmColumnVec_32s8u() { symmetryType=0; } + SymmColumnVec_32s8u(const Mat& _kernel, int _symmetryType, int _bits, double _delta) + { + symmetryType = _symmetryType; + _kernel.convertTo(kernel, CV_32F, 1./(1 << _bits), 0); + delta = (float)(_delta/(1 << _bits)); + CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 ); + } + + int operator()(const uchar** _src, uchar* dst, int width) const + { + if( !checkHardwareSupport(CV_CPU_SSE2) ) + return 0; + + int ksize2 = (kernel.rows + kernel.cols - 1)/2; + const float* ky = (const float*)kernel.data + ksize2; + int i = 0, k; + bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0; + const int** src = (const int**)_src; + const __m128i *S, *S2; + __m128 d4 = _mm_set1_ps(delta); + + if( symmetrical ) + { + for( ; i <= width - 16; i += 16 ) + { + __m128 f = _mm_load_ss(ky); + f = _mm_shuffle_ps(f, f, 0); + __m128 s0, s1, s2, s3; + __m128i x0, x1; + S = (const __m128i*)(src[0] + i); + s0 = _mm_cvtepi32_ps(_mm_load_si128(S)); + s1 = _mm_cvtepi32_ps(_mm_load_si128(S+1)); + s0 = _mm_add_ps(_mm_mul_ps(s0, f), d4); + s1 = _mm_add_ps(_mm_mul_ps(s1, f), d4); + s2 = _mm_cvtepi32_ps(_mm_load_si128(S+2)); + s3 = _mm_cvtepi32_ps(_mm_load_si128(S+3)); + s2 = _mm_add_ps(_mm_mul_ps(s2, f), d4); + s3 = _mm_add_ps(_mm_mul_ps(s3, f), d4); + + for( k = 1; k <= ksize2; k++ ) + { + S = (const __m128i*)(src[k] + i); + S2 = (const __m128i*)(src[-k] + i); + f = _mm_load_ss(ky+k); + f = _mm_shuffle_ps(f, f, 0); + x0 = _mm_add_epi32(_mm_load_si128(S), _mm_load_si128(S2)); + x1 = _mm_add_epi32(_mm_load_si128(S+1), _mm_load_si128(S2+1)); + s0 = _mm_add_ps(s0, _mm_mul_ps(_mm_cvtepi32_ps(x0), f)); + s1 = _mm_add_ps(s1, _mm_mul_ps(_mm_cvtepi32_ps(x1), f)); + x0 = _mm_add_epi32(_mm_load_si128(S+2), _mm_load_si128(S2+2)); + x1 = _mm_add_epi32(_mm_load_si128(S+3), _mm_load_si128(S2+3)); + s2 = _mm_add_ps(s2, _mm_mul_ps(_mm_cvtepi32_ps(x0), f)); + s3 = _mm_add_ps(s3, _mm_mul_ps(_mm_cvtepi32_ps(x1), f)); + } + + x0 = _mm_packs_epi32(_mm_cvtps_epi32(s0), _mm_cvtps_epi32(s1)); + x1 = _mm_packs_epi32(_mm_cvtps_epi32(s2), _mm_cvtps_epi32(s3)); + x0 = _mm_packus_epi16(x0, x1); + _mm_storeu_si128((__m128i*)(dst + i), x0); + } + + for( ; i <= width - 4; i += 4 ) + { + __m128 f = _mm_load_ss(ky); + f = _mm_shuffle_ps(f, f, 0); + __m128i x0; + __m128 s0 = _mm_cvtepi32_ps(_mm_load_si128((const __m128i*)(src[0] + i))); + s0 = _mm_add_ps(_mm_mul_ps(s0, f), d4); + + for( k = 1; k <= ksize2; k++ ) + { + S = (const __m128i*)(src[k] + i); + S2 = (const __m128i*)(src[-k] + i); + f = _mm_load_ss(ky+k); + f = _mm_shuffle_ps(f, f, 0); + x0 = _mm_add_epi32(_mm_load_si128(S), _mm_load_si128(S2)); + s0 = _mm_add_ps(s0, _mm_mul_ps(_mm_cvtepi32_ps(x0), f)); + } + + x0 = _mm_cvtps_epi32(s0); + x0 = _mm_packs_epi32(x0, x0); + x0 = _mm_packus_epi16(x0, x0); + *(int*)(dst + i) = _mm_cvtsi128_si32(x0); + } + } + else + { + for( ; i <= width - 16; i += 16 ) + { + __m128 f, s0 = d4, s1 = d4, s2 = d4, s3 = d4; + __m128i x0, x1; + + for( k = 1; k <= ksize2; k++ ) + { + S = (const __m128i*)(src[k] + i); + S2 = (const __m128i*)(src[-k] + i); + f = _mm_load_ss(ky+k); + f = _mm_shuffle_ps(f, f, 0); + x0 = _mm_sub_epi32(_mm_load_si128(S), _mm_load_si128(S2)); + x1 = _mm_sub_epi32(_mm_load_si128(S+1), _mm_load_si128(S2+1)); + s0 = _mm_add_ps(s0, _mm_mul_ps(_mm_cvtepi32_ps(x0), f)); + s1 = _mm_add_ps(s1, _mm_mul_ps(_mm_cvtepi32_ps(x1), f)); + x0 = _mm_sub_epi32(_mm_load_si128(S+2), _mm_load_si128(S2+2)); + x1 = _mm_sub_epi32(_mm_load_si128(S+3), _mm_load_si128(S2+3)); + s2 = _mm_add_ps(s2, _mm_mul_ps(_mm_cvtepi32_ps(x0), f)); + s3 = _mm_add_ps(s3, _mm_mul_ps(_mm_cvtepi32_ps(x1), f)); + } + + x0 = _mm_packs_epi32(_mm_cvtps_epi32(s0), _mm_cvtps_epi32(s1)); + x1 = _mm_packs_epi32(_mm_cvtps_epi32(s2), _mm_cvtps_epi32(s3)); + x0 = _mm_packus_epi16(x0, x1); + _mm_storeu_si128((__m128i*)(dst + i), x0); + } + + for( ; i <= width - 4; i += 4 ) + { + __m128 f, s0 = d4; + __m128i x0; + + for( k = 1; k <= ksize2; k++ ) + { + S = (const __m128i*)(src[k] + i); + S2 = (const __m128i*)(src[-k] + i); + f = _mm_load_ss(ky+k); + f = _mm_shuffle_ps(f, f, 0); + x0 = _mm_sub_epi32(_mm_load_si128(S), _mm_load_si128(S2)); + s0 = _mm_add_ps(s0, _mm_mul_ps(_mm_cvtepi32_ps(x0), f)); + } + + x0 = _mm_cvtps_epi32(s0); + x0 = _mm_packs_epi32(x0, x0); + x0 = _mm_packus_epi16(x0, x0); + *(int*)(dst + i) = _mm_cvtsi128_si32(x0); + } + } + + return i; + } + + int symmetryType; + float delta; + Mat kernel; +}; + + +struct SymmColumnSmallVec_32s16s +{ + SymmColumnSmallVec_32s16s() { symmetryType=0; } + SymmColumnSmallVec_32s16s(const Mat& _kernel, int _symmetryType, int _bits, double _delta) + { + symmetryType = _symmetryType; + _kernel.convertTo(kernel, CV_32F, 1./(1 << _bits), 0); + delta = (float)(_delta/(1 << _bits)); + CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 ); + } + + int operator()(const uchar** _src, uchar* _dst, int width) const + { + if( !checkHardwareSupport(CV_CPU_SSE2) ) + return 0; + + int ksize2 = (kernel.rows + kernel.cols - 1)/2; + const float* ky = (const float*)kernel.data + ksize2; + int i = 0; + bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0; + const int** src = (const int**)_src; + const int *S0 = src[-1], *S1 = src[0], *S2 = src[1]; + short* dst = (short*)_dst; + __m128 df4 = _mm_set1_ps(delta); + __m128i d4 = _mm_cvtps_epi32(df4); + + if( symmetrical ) + { + if( ky[0] == 2 && ky[1] == 1 ) + { + for( ; i <= width - 8; i += 8 ) + { + __m128i s0, s1, s2, s3, s4, s5; + s0 = _mm_load_si128((__m128i*)(S0 + i)); + s1 = _mm_load_si128((__m128i*)(S0 + i + 4)); + s2 = _mm_load_si128((__m128i*)(S1 + i)); + s3 = _mm_load_si128((__m128i*)(S1 + i + 4)); + s4 = _mm_load_si128((__m128i*)(S2 + i)); + s5 = _mm_load_si128((__m128i*)(S2 + i + 4)); + s0 = _mm_add_epi32(s0, _mm_add_epi32(s4, _mm_add_epi32(s2, s2))); + s1 = _mm_add_epi32(s1, _mm_add_epi32(s5, _mm_add_epi32(s3, s3))); + s0 = _mm_add_epi32(s0, d4); + s1 = _mm_add_epi32(s1, d4); + _mm_storeu_si128((__m128i*)(dst + i), _mm_packs_epi32(s0, s1)); + } + } + else if( ky[0] == -2 && ky[1] == 1 ) + { + for( ; i <= width - 8; i += 8 ) + { + __m128i s0, s1, s2, s3, s4, s5; + s0 = _mm_load_si128((__m128i*)(S0 + i)); + s1 = _mm_load_si128((__m128i*)(S0 + i + 4)); + s2 = _mm_load_si128((__m128i*)(S1 + i)); + s3 = _mm_load_si128((__m128i*)(S1 + i + 4)); + s4 = _mm_load_si128((__m128i*)(S2 + i)); + s5 = _mm_load_si128((__m128i*)(S2 + i + 4)); + s0 = _mm_add_epi32(s0, _mm_sub_epi32(s4, _mm_add_epi32(s2, s2))); + s1 = _mm_add_epi32(s1, _mm_sub_epi32(s5, _mm_add_epi32(s3, s3))); + s0 = _mm_add_epi32(s0, d4); + s1 = _mm_add_epi32(s1, d4); + _mm_storeu_si128((__m128i*)(dst + i), _mm_packs_epi32(s0, s1)); + } + } + else + { + __m128 k0 = _mm_set1_ps(ky[0]), k1 = _mm_set1_ps(ky[1]); + for( ; i <= width - 8; i += 8 ) + { + __m128 s0, s1; + s0 = _mm_cvtepi32_ps(_mm_load_si128((__m128i*)(S1 + i))); + s1 = _mm_cvtepi32_ps(_mm_load_si128((__m128i*)(S1 + i + 4))); + s0 = _mm_add_ps(_mm_mul_ps(s0, k0), df4); + s1 = _mm_add_ps(_mm_mul_ps(s1, k0), df4); + __m128i x0, x1; + x0 = _mm_add_epi32(_mm_load_si128((__m128i*)(S0 + i)), + _mm_load_si128((__m128i*)(S2 + i))); + x1 = _mm_add_epi32(_mm_load_si128((__m128i*)(S0 + i + 4)), + _mm_load_si128((__m128i*)(S2 + i + 4))); + s0 = _mm_add_ps(s0, _mm_mul_ps(_mm_cvtepi32_ps(x0),k1)); + s1 = _mm_add_ps(s1, _mm_mul_ps(_mm_cvtepi32_ps(x1),k1)); + x0 = _mm_packs_epi32(_mm_cvtps_epi32(s0), _mm_cvtps_epi32(s1)); + _mm_storeu_si128((__m128i*)(dst + i), x0); + } + } + } + else + { + if( fabs(ky[1]) == 1 && ky[1] == -ky[-1] ) + { + if( ky[1] < 0 ) + std::swap(S0, S2); + for( ; i <= width - 8; i += 8 ) + { + __m128i s0, s1, s2, s3; + s0 = _mm_load_si128((__m128i*)(S2 + i)); + s1 = _mm_load_si128((__m128i*)(S2 + i + 4)); + s2 = _mm_load_si128((__m128i*)(S0 + i)); + s3 = _mm_load_si128((__m128i*)(S0 + i + 4)); + s0 = _mm_add_epi32(_mm_sub_epi32(s0, s2), d4); + s1 = _mm_add_epi32(_mm_sub_epi32(s1, s3), d4); + _mm_storeu_si128((__m128i*)(dst + i), _mm_packs_epi32(s0, s1)); + } + } + else + { + __m128 k1 = _mm_set1_ps(ky[1]); + for( ; i <= width - 8; i += 8 ) + { + __m128 s0 = df4, s1 = df4; + __m128i x0, x1; + x0 = _mm_sub_epi32(_mm_load_si128((__m128i*)(S0 + i)), + _mm_load_si128((__m128i*)(S2 + i))); + x1 = _mm_sub_epi32(_mm_load_si128((__m128i*)(S0 + i + 4)), + _mm_load_si128((__m128i*)(S2 + i + 4))); + s0 = _mm_add_ps(s0, _mm_mul_ps(_mm_cvtepi32_ps(x0),k1)); + s1 = _mm_add_ps(s1, _mm_mul_ps(_mm_cvtepi32_ps(x1),k1)); + x0 = _mm_packs_epi32(_mm_cvtps_epi32(s0), _mm_cvtps_epi32(s1)); + _mm_storeu_si128((__m128i*)(dst + i), x0); + } + } + } + + return i; + } + + int symmetryType; + float delta; + Mat kernel; +}; + + +/////////////////////////////////////// 32f ////////////////////////////////// + +struct RowVec_32f +{ + RowVec_32f() {} + RowVec_32f( const Mat& _kernel ) + { + kernel = _kernel; + } + + int operator()(const uchar* _src, uchar* _dst, int width, int cn) const + { + if( !checkHardwareSupport(CV_CPU_SSE) ) + return 0; + + int i = 0, k, _ksize = kernel.rows + kernel.cols - 1; + float* dst = (float*)_dst; + const float* _kx = (const float*)kernel.data; + width *= cn; + + for( ; i <= width - 8; i += 8 ) + { + const float* src = (const float*)_src + i; + __m128 f, s0 = _mm_setzero_ps(), s1 = s0, x0, x1; + for( k = 0; k < _ksize; k++, src += cn ) + { + f = _mm_load_ss(_kx+k); + f = _mm_shuffle_ps(f, f, 0); + + x0 = _mm_loadu_ps(src); + x1 = _mm_loadu_ps(src + 4); + s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f)); + s1 = _mm_add_ps(s1, _mm_mul_ps(x1, f)); + } + _mm_store_ps(dst + i, s0); + _mm_store_ps(dst + i + 4, s1); + } + return i; + } + + Mat kernel; +}; + + +struct SymmRowSmallVec_32f +{ + SymmRowSmallVec_32f() {} + SymmRowSmallVec_32f( const Mat& _kernel, int _symmetryType ) + { + kernel = _kernel; + symmetryType = _symmetryType; + } + + int operator()(const uchar* _src, uchar* _dst, int width, int cn) const + { + if( !checkHardwareSupport(CV_CPU_SSE) ) + return 0; + + int i = 0, _ksize = kernel.rows + kernel.cols - 1; + float* dst = (float*)_dst; + const float* src = (const float*)_src + (_ksize/2)*cn; + bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0; + const float* kx = (const float*)kernel.data + _ksize/2; + width *= cn; + + if( symmetrical ) + { + if( _ksize == 1 ) + return 0; + if( _ksize == 3 ) + { + if( kx[0] == 2 && kx[1] == 1 ) + for( ; i <= width - 8; i += 8, src += 8 ) + { + __m128 x0, x1, x2, y0, y1, y2; + x0 = _mm_loadu_ps(src - cn); + x1 = _mm_loadu_ps(src); + x2 = _mm_loadu_ps(src + cn); + y0 = _mm_loadu_ps(src - cn + 4); + y1 = _mm_loadu_ps(src + 4); + y2 = _mm_loadu_ps(src + cn + 4); + x0 = _mm_add_ps(x0, _mm_add_ps(_mm_add_ps(x1, x1), x2)); + y0 = _mm_add_ps(y0, _mm_add_ps(_mm_add_ps(y1, y1), y2)); + _mm_store_ps(dst + i, x0); + _mm_store_ps(dst + i + 4, y0); + } + else if( kx[0] == -2 && kx[1] == 1 ) + for( ; i <= width - 8; i += 8, src += 8 ) + { + __m128 x0, x1, x2, y0, y1, y2; + x0 = _mm_loadu_ps(src - cn); + x1 = _mm_loadu_ps(src); + x2 = _mm_loadu_ps(src + cn); + y0 = _mm_loadu_ps(src - cn + 4); + y1 = _mm_loadu_ps(src + 4); + y2 = _mm_loadu_ps(src + cn + 4); + x0 = _mm_add_ps(x0, _mm_sub_ps(x2, _mm_add_ps(x1, x1))); + y0 = _mm_add_ps(y0, _mm_sub_ps(y2, _mm_add_ps(y1, y1))); + _mm_store_ps(dst + i, x0); + _mm_store_ps(dst + i + 4, y0); + } + else + { + __m128 k0 = _mm_set1_ps(kx[0]), k1 = _mm_set1_ps(kx[1]); + for( ; i <= width - 8; i += 8, src += 8 ) + { + __m128 x0, x1, x2, y0, y1, y2; + x0 = _mm_loadu_ps(src - cn); + x1 = _mm_loadu_ps(src); + x2 = _mm_loadu_ps(src + cn); + y0 = _mm_loadu_ps(src - cn + 4); + y1 = _mm_loadu_ps(src + 4); + y2 = _mm_loadu_ps(src + cn + 4); + + x0 = _mm_mul_ps(_mm_add_ps(x0, x2), k1); + y0 = _mm_mul_ps(_mm_add_ps(y0, y2), k1); + x0 = _mm_add_ps(x0, _mm_mul_ps(x1, k0)); + y0 = _mm_add_ps(y0, _mm_mul_ps(y1, k0)); + _mm_store_ps(dst + i, x0); + _mm_store_ps(dst + i + 4, y0); + } + } + } + else if( _ksize == 5 ) + { + if( kx[0] == -2 && kx[1] == 0 && kx[2] == 1 ) + for( ; i <= width - 8; i += 8, src += 8 ) + { + __m128 x0, x1, x2, y0, y1, y2; + x0 = _mm_loadu_ps(src - cn*2); + x1 = _mm_loadu_ps(src); + x2 = _mm_loadu_ps(src + cn*2); + y0 = _mm_loadu_ps(src - cn*2 + 4); + y1 = _mm_loadu_ps(src + 4); + y2 = _mm_loadu_ps(src + cn*2 + 4); + x0 = _mm_add_ps(x0, _mm_sub_ps(x2, _mm_add_ps(x1, x1))); + y0 = _mm_add_ps(y0, _mm_sub_ps(y2, _mm_add_ps(y1, y1))); + _mm_store_ps(dst + i, x0); + _mm_store_ps(dst + i + 4, y0); + } + else + { + __m128 k0 = _mm_set1_ps(kx[0]), k1 = _mm_set1_ps(kx[1]), k2 = _mm_set1_ps(kx[2]); + for( ; i <= width - 8; i += 8, src += 8 ) + { + __m128 x0, x1, x2, y0, y1, y2; + x0 = _mm_loadu_ps(src - cn); + x1 = _mm_loadu_ps(src); + x2 = _mm_loadu_ps(src + cn); + y0 = _mm_loadu_ps(src - cn + 4); + y1 = _mm_loadu_ps(src + 4); + y2 = _mm_loadu_ps(src + cn + 4); + + x0 = _mm_mul_ps(_mm_add_ps(x0, x2), k1); + y0 = _mm_mul_ps(_mm_add_ps(y0, y2), k1); + x0 = _mm_add_ps(x0, _mm_mul_ps(x1, k0)); + y0 = _mm_add_ps(y0, _mm_mul_ps(y1, k0)); + + x2 = _mm_add_ps(_mm_loadu_ps(src + cn*2), _mm_loadu_ps(src - cn*2)); + y2 = _mm_add_ps(_mm_loadu_ps(src + cn*2 + 4), _mm_loadu_ps(src - cn*2 + 4)); + x0 = _mm_add_ps(x0, _mm_mul_ps(x2, k2)); + y0 = _mm_add_ps(y0, _mm_mul_ps(y2, k2)); + + _mm_store_ps(dst + i, x0); + _mm_store_ps(dst + i + 4, y0); + } + } + } + } + else + { + if( _ksize == 3 ) + { + if( kx[0] == 0 && kx[1] == 1 ) + for( ; i <= width - 8; i += 8, src += 8 ) + { + __m128 x0, x2, y0, y2; + x0 = _mm_loadu_ps(src + cn); + x2 = _mm_loadu_ps(src - cn); + y0 = _mm_loadu_ps(src + cn + 4); + y2 = _mm_loadu_ps(src - cn + 4); + x0 = _mm_sub_ps(x0, x2); + y0 = _mm_sub_ps(y0, y2); + _mm_store_ps(dst + i, x0); + _mm_store_ps(dst + i + 4, y0); + } + else + { + __m128 k1 = _mm_set1_ps(kx[1]); + for( ; i <= width - 8; i += 8, src += 8 ) + { + __m128 x0, x2, y0, y2; + x0 = _mm_loadu_ps(src + cn); + x2 = _mm_loadu_ps(src - cn); + y0 = _mm_loadu_ps(src + cn + 4); + y2 = _mm_loadu_ps(src - cn + 4); + + x0 = _mm_mul_ps(_mm_sub_ps(x0, x2), k1); + y0 = _mm_mul_ps(_mm_sub_ps(y0, y2), k1); + _mm_store_ps(dst + i, x0); + _mm_store_ps(dst + i + 4, y0); + } + } + } + else if( _ksize == 5 ) + { + __m128 k1 = _mm_set1_ps(kx[1]), k2 = _mm_set1_ps(kx[2]); + for( ; i <= width - 8; i += 8, src += 8 ) + { + __m128 x0, x2, y0, y2; + x0 = _mm_loadu_ps(src + cn); + x2 = _mm_loadu_ps(src - cn); + y0 = _mm_loadu_ps(src + cn + 4); + y2 = _mm_loadu_ps(src - cn + 4); + + x0 = _mm_mul_ps(_mm_sub_ps(x0, x2), k1); + y0 = _mm_mul_ps(_mm_sub_ps(y0, y2), k1); + + x2 = _mm_sub_ps(_mm_loadu_ps(src + cn*2), _mm_loadu_ps(src - cn*2)); + y2 = _mm_sub_ps(_mm_loadu_ps(src + cn*2 + 4), _mm_loadu_ps(src - cn*2 + 4)); + x0 = _mm_add_ps(x0, _mm_mul_ps(x2, k2)); + y0 = _mm_add_ps(y0, _mm_mul_ps(y2, k2)); + + _mm_store_ps(dst + i, x0); + _mm_store_ps(dst + i + 4, y0); + } + } + } + + return i; + } + + Mat kernel; + int symmetryType; +}; + + +struct SymmColumnVec_32f +{ + SymmColumnVec_32f() { symmetryType=0; } + SymmColumnVec_32f(const Mat& _kernel, int _symmetryType, int, double _delta) + { + symmetryType = _symmetryType; + kernel = _kernel; + delta = (float)_delta; + CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 ); + } + + int operator()(const uchar** _src, uchar* _dst, int width) const + { + if( !checkHardwareSupport(CV_CPU_SSE) ) + return 0; + + int ksize2 = (kernel.rows + kernel.cols - 1)/2; + const float* ky = (const float*)kernel.data + ksize2; + int i = 0, k; + bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0; + const float** src = (const float**)_src; + const float *S, *S2; + float* dst = (float*)_dst; + __m128 d4 = _mm_set1_ps(delta); + + if( symmetrical ) + { + for( ; i <= width - 16; i += 16 ) + { + __m128 f = _mm_load_ss(ky); + f = _mm_shuffle_ps(f, f, 0); + __m128 s0, s1, s2, s3; + __m128 x0, x1; + S = src[0] + i; + s0 = _mm_load_ps(S); + s1 = _mm_load_ps(S+4); + s0 = _mm_add_ps(_mm_mul_ps(s0, f), d4); + s1 = _mm_add_ps(_mm_mul_ps(s1, f), d4); + s2 = _mm_load_ps(S+8); + s3 = _mm_load_ps(S+12); + s2 = _mm_add_ps(_mm_mul_ps(s2, f), d4); + s3 = _mm_add_ps(_mm_mul_ps(s3, f), d4); + + for( k = 1; k <= ksize2; k++ ) + { + S = src[k] + i; + S2 = src[-k] + i; + f = _mm_load_ss(ky+k); + f = _mm_shuffle_ps(f, f, 0); + x0 = _mm_add_ps(_mm_load_ps(S), _mm_load_ps(S2)); + x1 = _mm_add_ps(_mm_load_ps(S+4), _mm_load_ps(S2+4)); + s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f)); + s1 = _mm_add_ps(s1, _mm_mul_ps(x1, f)); + x0 = _mm_add_ps(_mm_load_ps(S+8), _mm_load_ps(S2+8)); + x1 = _mm_add_ps(_mm_load_ps(S+12), _mm_load_ps(S2+12)); + s2 = _mm_add_ps(s2, _mm_mul_ps(x0, f)); + s3 = _mm_add_ps(s3, _mm_mul_ps(x1, f)); + } + + _mm_storeu_ps(dst + i, s0); + _mm_storeu_ps(dst + i + 4, s1); + _mm_storeu_ps(dst + i + 8, s2); + _mm_storeu_ps(dst + i + 12, s3); + } + + for( ; i <= width - 4; i += 4 ) + { + __m128 f = _mm_load_ss(ky); + f = _mm_shuffle_ps(f, f, 0); + __m128 x0, s0 = _mm_load_ps(src[0] + i); + s0 = _mm_add_ps(_mm_mul_ps(s0, f), d4); + + for( k = 1; k <= ksize2; k++ ) + { + f = _mm_load_ss(ky+k); + f = _mm_shuffle_ps(f, f, 0); + S = src[k] + i; + S2 = src[-k] + i; + x0 = _mm_add_ps(_mm_load_ps(src[k]+i), _mm_load_ps(src[-k] + i)); + s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f)); + } + + _mm_storeu_ps(dst + i, s0); + } + } + else + { + for( ; i <= width - 16; i += 16 ) + { + __m128 f, s0 = d4, s1 = d4, s2 = d4, s3 = d4; + __m128 x0, x1; + S = src[0] + i; + + for( k = 1; k <= ksize2; k++ ) + { + S = src[k] + i; + S2 = src[-k] + i; + f = _mm_load_ss(ky+k); + f = _mm_shuffle_ps(f, f, 0); + x0 = _mm_sub_ps(_mm_load_ps(S), _mm_load_ps(S2)); + x1 = _mm_sub_ps(_mm_load_ps(S+4), _mm_load_ps(S2+4)); + s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f)); + s1 = _mm_add_ps(s1, _mm_mul_ps(x1, f)); + x0 = _mm_sub_ps(_mm_load_ps(S+8), _mm_load_ps(S2+8)); + x1 = _mm_sub_ps(_mm_load_ps(S+12), _mm_load_ps(S2+12)); + s2 = _mm_add_ps(s2, _mm_mul_ps(x0, f)); + s3 = _mm_add_ps(s3, _mm_mul_ps(x1, f)); + } + + _mm_storeu_ps(dst + i, s0); + _mm_storeu_ps(dst + i + 4, s1); + _mm_storeu_ps(dst + i + 8, s2); + _mm_storeu_ps(dst + i + 12, s3); + } + + for( ; i <= width - 4; i += 4 ) + { + __m128 f, x0, s0 = d4; + + for( k = 1; k <= ksize2; k++ ) + { + f = _mm_load_ss(ky+k); + f = _mm_shuffle_ps(f, f, 0); + x0 = _mm_sub_ps(_mm_load_ps(src[k]+i), _mm_load_ps(src[-k] + i)); + s0 = _mm_add_ps(s0, _mm_mul_ps(x0, f)); + } + + _mm_storeu_ps(dst + i, s0); + } + } + + return i; + } + + int symmetryType; + float delta; + Mat kernel; +}; + + +struct SymmColumnSmallVec_32f +{ + SymmColumnSmallVec_32f() { symmetryType=0; } + SymmColumnSmallVec_32f(const Mat& _kernel, int _symmetryType, int, double _delta) + { + symmetryType = _symmetryType; + kernel = _kernel; + delta = (float)_delta; + CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 ); + } + + int operator()(const uchar** _src, uchar* _dst, int width) const + { + if( !checkHardwareSupport(CV_CPU_SSE) ) + return 0; + + int ksize2 = (kernel.rows + kernel.cols - 1)/2; + const float* ky = (const float*)kernel.data + ksize2; + int i = 0; + bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0; + const float** src = (const float**)_src; + const float *S0 = src[-1], *S1 = src[0], *S2 = src[1]; + float* dst = (float*)_dst; + __m128 d4 = _mm_set1_ps(delta); + + if( symmetrical ) + { + if( ky[0] == 2 && ky[1] == 1 ) + { + for( ; i <= width - 8; i += 8 ) + { + __m128 s0, s1, s2, s3, s4, s5; + s0 = _mm_load_ps(S0 + i); + s1 = _mm_load_ps(S0 + i + 4); + s2 = _mm_load_ps(S1 + i); + s3 = _mm_load_ps(S1 + i + 4); + s4 = _mm_load_ps(S2 + i); + s5 = _mm_load_ps(S2 + i + 4); + s0 = _mm_add_ps(s0, _mm_add_ps(s4, _mm_add_ps(s2, s2))); + s1 = _mm_add_ps(s1, _mm_add_ps(s5, _mm_add_ps(s3, s3))); + s0 = _mm_add_ps(s0, d4); + s1 = _mm_add_ps(s1, d4); + _mm_storeu_ps(dst + i, s0); + _mm_storeu_ps(dst + i + 4, s1); + } + } + else if( ky[0] == -2 && ky[1] == 1 ) + { + for( ; i <= width - 8; i += 8 ) + { + __m128 s0, s1, s2, s3, s4, s5; + s0 = _mm_load_ps(S0 + i); + s1 = _mm_load_ps(S0 + i + 4); + s2 = _mm_load_ps(S1 + i); + s3 = _mm_load_ps(S1 + i + 4); + s4 = _mm_load_ps(S2 + i); + s5 = _mm_load_ps(S2 + i + 4); + s0 = _mm_add_ps(s0, _mm_sub_ps(s4, _mm_add_ps(s2, s2))); + s1 = _mm_add_ps(s1, _mm_sub_ps(s5, _mm_add_ps(s3, s3))); + s0 = _mm_add_ps(s0, d4); + s1 = _mm_add_ps(s1, d4); + _mm_storeu_ps(dst + i, s0); + _mm_storeu_ps(dst + i + 4, s1); + } + } + else + { + __m128 k0 = _mm_set1_ps(ky[0]), k1 = _mm_set1_ps(ky[1]); + for( ; i <= width - 8; i += 8 ) + { + __m128 s0, s1, x0, x1; + s0 = _mm_load_ps(S1 + i); + s1 = _mm_load_ps(S1 + i + 4); + s0 = _mm_add_ps(_mm_mul_ps(s0, k0), d4); + s1 = _mm_add_ps(_mm_mul_ps(s1, k0), d4); + x0 = _mm_add_ps(_mm_load_ps(S0 + i), _mm_load_ps(S2 + i)); + x1 = _mm_add_ps(_mm_load_ps(S0 + i + 4), _mm_load_ps(S2 + i + 4)); + s0 = _mm_add_ps(s0, _mm_mul_ps(x0,k1)); + s1 = _mm_add_ps(s1, _mm_mul_ps(x1,k1)); + _mm_storeu_ps(dst + i, s0); + _mm_storeu_ps(dst + i + 4, s1); + } + } + } + else + { + if( fabs(ky[1]) == 1 && ky[1] == -ky[-1] ) + { + if( ky[1] < 0 ) + std::swap(S0, S2); + for( ; i <= width - 8; i += 8 ) + { + __m128 s0, s1, s2, s3; + s0 = _mm_load_ps(S2 + i); + s1 = _mm_load_ps(S2 + i + 4); + s2 = _mm_load_ps(S0 + i); + s3 = _mm_load_ps(S0 + i + 4); + s0 = _mm_add_ps(_mm_sub_ps(s0, s2), d4); + s1 = _mm_add_ps(_mm_sub_ps(s1, s3), d4); + _mm_storeu_ps(dst + i, s0); + _mm_storeu_ps(dst + i + 4, s1); + } + } + else + { + __m128 k1 = _mm_set1_ps(ky[1]); + for( ; i <= width - 8; i += 8 ) + { + __m128 s0 = d4, s1 = d4, x0, x1; + x0 = _mm_sub_ps(_mm_load_ps(S2 + i), _mm_load_ps(S0 + i)); + x1 = _mm_sub_ps(_mm_load_ps(S2 + i + 4), _mm_load_ps(S0 + i + 4)); + s0 = _mm_add_ps(s0, _mm_mul_ps(x0,k1)); + s1 = _mm_add_ps(s1, _mm_mul_ps(x1,k1)); + _mm_storeu_ps(dst + i, s0); + _mm_storeu_ps(dst + i + 4, s1); + } + } + } + + return i; + } + + int symmetryType; + float delta; + Mat kernel; +}; + + +/////////////////////////////// non-separable filters /////////////////////////////// + +///////////////////////////////// 8u<->8u, 8u<->16s ///////////////////////////////// + +struct FilterVec_8u +{ + FilterVec_8u() {} + FilterVec_8u(const Mat& _kernel, int _bits, double _delta) + { + Mat kernel; + _kernel.convertTo(kernel, CV_32F, 1./(1 << _bits), 0); + delta = (float)(_delta/(1 << _bits)); + vector coords; + preprocess2DKernel(kernel, coords, coeffs); + _nz = (int)coords.size(); + } + + int operator()(const uchar** src, uchar* dst, int width) const + { + if( !checkHardwareSupport(CV_CPU_SSE2) ) + return 0; + + const float* kf = (const float*)&coeffs[0]; + int i = 0, k, nz = _nz; + __m128 d4 = _mm_set1_ps(delta); + + for( ; i <= width - 16; i += 16 ) + { + __m128 s0 = d4, s1 = d4, s2 = d4, s3 = d4; + __m128i x0, x1, z = _mm_setzero_si128(); + + for( k = 0; k < nz; k++ ) + { + __m128 f = _mm_load_ss(kf+k), t0, t1; + f = _mm_shuffle_ps(f, f, 0); + + x0 = _mm_loadu_si128((const __m128i*)(src[k] + i)); + x1 = _mm_unpackhi_epi8(x0, z); + x0 = _mm_unpacklo_epi8(x0, z); + + t0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(x0, z)); + t1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(x0, z)); + s0 = _mm_add_ps(s0, _mm_mul_ps(t0, f)); + s1 = _mm_add_ps(s1, _mm_mul_ps(t1, f)); + + t0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(x1, z)); + t1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(x1, z)); + s2 = _mm_add_ps(s2, _mm_mul_ps(t0, f)); + s3 = _mm_add_ps(s3, _mm_mul_ps(t1, f)); + } + + x0 = _mm_packs_epi32(_mm_cvtps_epi32(s0), _mm_cvtps_epi32(s1)); + x1 = _mm_packs_epi32(_mm_cvtps_epi32(s2), _mm_cvtps_epi32(s3)); + x0 = _mm_packus_epi16(x0, x1); + _mm_storeu_si128((__m128i*)(dst + i), x0); + } + + for( ; i <= width - 4; i += 4 ) + { + __m128 s0 = d4; + __m128i x0, z = _mm_setzero_si128(); + + for( k = 0; k < nz; k++ ) + { + __m128 f = _mm_load_ss(kf+k), t0; + f = _mm_shuffle_ps(f, f, 0); + + x0 = _mm_cvtsi32_si128(*(const int*)(src[k] + i)); + x0 = _mm_unpacklo_epi8(x0, z); + t0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(x0, z)); + s0 = _mm_add_ps(s0, _mm_mul_ps(t0, f)); + } + + x0 = _mm_packs_epi32(_mm_cvtps_epi32(s0), z); + x0 = _mm_packus_epi16(x0, x0); + *(int*)(dst + i) = _mm_cvtsi128_si32(x0); + } + + return i; + } + + int _nz; + vector coeffs; + float delta; +}; + + +struct FilterVec_8u16s +{ + FilterVec_8u16s() {} + FilterVec_8u16s(const Mat& _kernel, int _bits, double _delta) + { + Mat kernel; + _kernel.convertTo(kernel, CV_32F, 1./(1 << _bits), 0); + delta = (float)(_delta/(1 << _bits)); + vector coords; + preprocess2DKernel(kernel, coords, coeffs); + _nz = (int)coords.size(); + } + + int operator()(const uchar** src, uchar* _dst, int width) const + { + if( !checkHardwareSupport(CV_CPU_SSE2) ) + return 0; + + const float* kf = (const float*)&coeffs[0]; + short* dst = (short*)_dst; + int i = 0, k, nz = _nz; + __m128 d4 = _mm_set1_ps(delta); + + for( ; i <= width - 16; i += 16 ) + { + __m128 s0 = d4, s1 = d4, s2 = d4, s3 = d4; + __m128i x0, x1, z = _mm_setzero_si128(); + + for( k = 0; k < nz; k++ ) + { + __m128 f = _mm_load_ss(kf+k), t0, t1; + f = _mm_shuffle_ps(f, f, 0); + + x0 = _mm_loadu_si128((const __m128i*)(src[k] + i)); + x1 = _mm_unpackhi_epi8(x0, z); + x0 = _mm_unpacklo_epi8(x0, z); + + t0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(x0, z)); + t1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(x0, z)); + s0 = _mm_add_ps(s0, _mm_mul_ps(t0, f)); + s1 = _mm_add_ps(s1, _mm_mul_ps(t1, f)); + + t0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(x1, z)); + t1 = _mm_cvtepi32_ps(_mm_unpackhi_epi16(x1, z)); + s2 = _mm_add_ps(s2, _mm_mul_ps(t0, f)); + s3 = _mm_add_ps(s3, _mm_mul_ps(t1, f)); + } + + x0 = _mm_packs_epi32(_mm_cvtps_epi32(s0), _mm_cvtps_epi32(s1)); + x1 = _mm_packs_epi32(_mm_cvtps_epi32(s2), _mm_cvtps_epi32(s3)); + _mm_storeu_si128((__m128i*)(dst + i), x0); + _mm_storeu_si128((__m128i*)(dst + i + 8), x1); + } + + for( ; i <= width - 4; i += 4 ) + { + __m128 s0 = d4; + __m128i x0, z = _mm_setzero_si128(); + + for( k = 0; k < nz; k++ ) + { + __m128 f = _mm_load_ss(kf+k), t0; + f = _mm_shuffle_ps(f, f, 0); + + x0 = _mm_cvtsi32_si128(*(const int*)(src[k] + i)); + x0 = _mm_unpacklo_epi8(x0, z); + t0 = _mm_cvtepi32_ps(_mm_unpacklo_epi16(x0, z)); + s0 = _mm_add_ps(s0, _mm_mul_ps(t0, f)); + } + + x0 = _mm_packs_epi32(_mm_cvtps_epi32(s0), z); + _mm_storel_epi64((__m128i*)(dst + i), x0); + } + + return i; + } + + int _nz; + vector coeffs; + float delta; +}; + + +struct FilterVec_32f +{ + FilterVec_32f() {} + FilterVec_32f(const Mat& _kernel, int, double _delta) + { + delta = (float)_delta; + vector coords; + preprocess2DKernel(_kernel, coords, coeffs); + _nz = (int)coords.size(); + } + + int operator()(const uchar** _src, uchar* _dst, int width) const + { + if( !checkHardwareSupport(CV_CPU_SSE) ) + return 0; + + const float* kf = (const float*)&coeffs[0]; + const float** src = (const float**)_src; + float* dst = (float*)_dst; + int i = 0, k, nz = _nz; + __m128 d4 = _mm_set1_ps(delta); + + for( ; i <= width - 16; i += 16 ) + { + __m128 s0 = d4, s1 = d4, s2 = d4, s3 = d4; + + for( k = 0; k < nz; k++ ) + { + __m128 f = _mm_load_ss(kf+k), t0, t1; + f = _mm_shuffle_ps(f, f, 0); + const float* S = src[k] + i; + + t0 = _mm_loadu_ps(S); + t1 = _mm_loadu_ps(S + 4); + s0 = _mm_add_ps(s0, _mm_mul_ps(t0, f)); + s1 = _mm_add_ps(s1, _mm_mul_ps(t1, f)); + + t0 = _mm_loadu_ps(S + 8); + t1 = _mm_loadu_ps(S + 12); + s2 = _mm_add_ps(s2, _mm_mul_ps(t0, f)); + s3 = _mm_add_ps(s3, _mm_mul_ps(t1, f)); + } + + _mm_storeu_ps(dst + i, s0); + _mm_storeu_ps(dst + i + 4, s1); + _mm_storeu_ps(dst + i + 8, s2); + _mm_storeu_ps(dst + i + 12, s3); + } + + for( ; i <= width - 4; i += 4 ) + { + __m128 s0 = d4; + + for( k = 0; k < nz; k++ ) + { + __m128 f = _mm_load_ss(kf+k), t0; + f = _mm_shuffle_ps(f, f, 0); + t0 = _mm_loadu_ps(src[k] + i); + s0 = _mm_add_ps(s0, _mm_mul_ps(t0, f)); + } + _mm_storeu_ps(dst + i, s0); + } + + return i; + } + + int _nz; + vector coeffs; + float delta; +}; + + +#else + +typedef RowNoVec RowVec_8u32s; +typedef RowNoVec RowVec_32f; +typedef SymmRowSmallNoVec SymmRowSmallVec_8u32s; +typedef SymmRowSmallNoVec SymmRowSmallVec_32f; +typedef ColumnNoVec SymmColumnVec_32s8u; +typedef ColumnNoVec SymmColumnVec_32f; +typedef SymmColumnSmallNoVec SymmColumnSmallVec_32s16s; +typedef SymmColumnSmallNoVec SymmColumnSmallVec_32f; +typedef FilterNoVec FilterVec_8u; +typedef FilterNoVec FilterVec_8u16s; +typedef FilterNoVec FilterVec_32f; + +#endif + + +template struct RowFilter : public BaseRowFilter +{ + RowFilter( const Mat& _kernel, int _anchor, const VecOp& _vecOp=VecOp() ) + { + if( _kernel.isContinuous() ) + kernel = _kernel; + else + _kernel.copyTo(kernel); + anchor = _anchor; + ksize = kernel.rows + kernel.cols - 1; + CV_Assert( kernel.type() == DataType
::type && + (kernel.rows == 1 || kernel.cols == 1)); + vecOp = _vecOp; + } + + void operator()(const uchar* src, uchar* dst, int width, int cn) + { + int _ksize = ksize; + const DT* kx = (const DT*)kernel.data; + const ST* S; + DT* D = (DT*)dst; + int i, k; + + i = vecOp(src, dst, width, cn); + width *= cn; + + for( ; i <= width - 4; i += 4 ) + { + S = (const ST*)src + i; + DT f = kx[0]; + DT s0 = f*S[0], s1 = f*S[1], s2 = f*S[2], s3 = f*S[3]; + + for( k = 1; k < _ksize; k++ ) + { + S += cn; + f = kx[k]; + s0 += f*S[0]; s1 += f*S[1]; + s2 += f*S[2]; s3 += f*S[3]; + } + + D[i] = s0; D[i+1] = s1; + D[i+2] = s2; D[i+3] = s3; + } + + for( ; i < width; i++ ) + { + S = (const ST*)src + i; + DT s0 = kx[0]*S[0]; + for( k = 1; k < _ksize; k++ ) + { + S += cn; + s0 += kx[k]*S[0]; + } + D[i] = s0; + } + } + + Mat kernel; + VecOp vecOp; +}; + + +template struct SymmRowSmallFilter : + public RowFilter +{ + SymmRowSmallFilter( const Mat& _kernel, int _anchor, int _symmetryType, + const VecOp& _vecOp = VecOp()) + : RowFilter( _kernel, _anchor, _vecOp ) + { + symmetryType = _symmetryType; + CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 && this->ksize <= 5 ); + } + + void operator()(const uchar* src, uchar* dst, int width, int cn) + { + int ksize2 = this->ksize/2, ksize2n = ksize2*cn; + const DT* kx = (const DT*)this->kernel.data + ksize2; + bool symmetrical = (this->symmetryType & KERNEL_SYMMETRICAL) != 0; + DT* D = (DT*)dst; + int i = this->vecOp(src, dst, width, cn), j, k; + const ST* S = (const ST*)src + i + ksize2n; + width *= cn; + + if( symmetrical ) + { + if( this->ksize == 1 && kx[0] == 1 ) + { + for( ; i <= width - 2; i += 2 ) + { + DT s0 = S[i], s1 = S[i+1]; + D[i] = s0; D[i+1] = s1; + } + S += i; + } + else if( this->ksize == 3 ) + { + if( kx[0] == 2 && kx[1] == 1 ) + for( ; i <= width - 2; i += 2, S += 2 ) + { + DT s0 = S[-cn] + S[0]*2 + S[cn], s1 = S[1-cn] + S[1]*2 + S[1+cn]; + D[i] = s0; D[i+1] = s1; + } + else if( kx[0] == -2 && kx[1] == 1 ) + for( ; i <= width - 2; i += 2, S += 2 ) + { + DT s0 = S[-cn] - S[0]*2 + S[cn], s1 = S[1-cn] - S[1]*2 + S[1+cn]; + D[i] = s0; D[i+1] = s1; + } + else + { + DT k0 = kx[0], k1 = kx[1]; + for( ; i <= width - 2; i += 2, S += 2 ) + { + DT s0 = S[0]*k0 + (S[-cn] + S[cn])*k1, s1 = S[1]*k0 + (S[1-cn] + S[1+cn])*k1; + D[i] = s0; D[i+1] = s1; + } + } + } + else if( this->ksize == 5 ) + { + DT k0 = kx[0], k1 = kx[1], k2 = kx[2]; + if( k0 == -2 && k1 == 0 && k2 == 1 ) + for( ; i <= width - 2; i += 2, S += 2 ) + { + DT s0 = -2*S[0] + S[-cn*2] + S[cn*2]; + DT s1 = -2*S[1] + S[1-cn*2] + S[1+cn*2]; + D[i] = s0; D[i+1] = s1; + } + else + for( ; i <= width - 2; i += 2, S += 2 ) + { + DT s0 = S[0]*k0 + (S[-cn] + S[cn])*k1 + (S[-cn*2] + S[cn*2])*k2; + DT s1 = S[1]*k0 + (S[1-cn] + S[1+cn])*k1 + (S[1-cn*2] + S[1+cn*2])*k2; + D[i] = s0; D[i+1] = s1; + } + } + + for( ; i < width; i++, S++ ) + { + DT s0 = kx[0]*S[0]; + for( k = 1, j = cn; k <= ksize2; k++, j += cn ) + s0 += kx[k]*(S[j] + S[-j]); + D[i] = s0; + } + } + else + { + if( this->ksize == 3 ) + { + if( kx[0] == 0 && kx[1] == 1 ) + for( ; i <= width - 2; i += 2, S += 2 ) + { + DT s0 = S[cn] - S[-cn], s1 = S[1+cn] - S[1-cn]; + D[i] = s0; D[i+1] = s1; + } + else + { + DT k1 = kx[1]; + for( ; i <= width - 2; i += 2, S += 2 ) + { + DT s0 = (S[cn] - S[-cn])*k1, s1 = (S[1+cn] - S[1-cn])*k1; + D[i] = s0; D[i+1] = s1; + } + } + } + else if( this->ksize == 5 ) + { + DT k1 = kx[1], k2 = kx[2]; + for( ; i <= width - 2; i += 2, S += 2 ) + { + DT s0 = (S[cn] - S[-cn])*k1 + (S[cn*2] - S[-cn*2])*k2; + DT s1 = (S[1+cn] - S[1-cn])*k1 + (S[1+cn*2] - S[1-cn*2])*k2; + D[i] = s0; D[i+1] = s1; + } + } + + for( ; i < width; i++, S++ ) + { + DT s0 = kx[0]*S[0]; + for( k = 1, j = cn; k <= ksize2; k++, j += cn ) + s0 += kx[k]*(S[j] - S[-j]); + D[i] = s0; + } + } + } + + int symmetryType; +}; + + +template struct ColumnFilter : public BaseColumnFilter +{ + typedef typename CastOp::type1 ST; + typedef typename CastOp::rtype DT; + + ColumnFilter( const Mat& _kernel, int _anchor, + double _delta, const CastOp& _castOp=CastOp(), + const VecOp& _vecOp=VecOp() ) + { + if( _kernel.isContinuous() ) + kernel = _kernel; + else + _kernel.copyTo(kernel); + anchor = _anchor; + ksize = kernel.rows + kernel.cols - 1; + delta = saturate_cast(_delta); + castOp0 = _castOp; + vecOp = _vecOp; + CV_Assert( kernel.type() == DataType::type && + (kernel.rows == 1 || kernel.cols == 1)); + } + + void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) + { + const ST* ky = (const ST*)kernel.data; + ST _delta = delta; + int _ksize = ksize; + int i, k; + CastOp castOp = castOp0; + + for( ; count--; dst += dststep, src++ ) + { + DT* D = (DT*)dst; + i = vecOp(src, dst, width); + for( ; i <= width - 4; i += 4 ) + { + ST f = ky[0]; + const ST* S = (const ST*)src[0] + i; + ST s0 = f*S[0] + _delta, s1 = f*S[1] + _delta, + s2 = f*S[2] + _delta, s3 = f*S[3] + _delta; + + for( k = 1; k < _ksize; k++ ) + { + S = (const ST*)src[k] + i; f = ky[k]; + s0 += f*S[0]; s1 += f*S[1]; + s2 += f*S[2]; s3 += f*S[3]; + } + + D[i] = castOp(s0); D[i+1] = castOp(s1); + D[i+2] = castOp(s2); D[i+3] = castOp(s3); + } + + for( ; i < width; i++ ) + { + ST s0 = ky[0]*((const ST*)src[0])[i] + _delta; + for( k = 1; k < _ksize; k++ ) + s0 += ky[k]*((const ST*)src[k])[i]; + D[i] = castOp(s0); + } + } + } + + Mat kernel; + CastOp castOp0; + VecOp vecOp; + ST delta; +}; + + +template struct SymmColumnFilter : public ColumnFilter +{ + typedef typename CastOp::type1 ST; + typedef typename CastOp::rtype DT; + + SymmColumnFilter( const Mat& _kernel, int _anchor, + double _delta, int _symmetryType, + const CastOp& _castOp=CastOp(), + const VecOp& _vecOp=VecOp()) + : ColumnFilter( _kernel, _anchor, _delta, _castOp, _vecOp ) + { + symmetryType = _symmetryType; + CV_Assert( (symmetryType & (KERNEL_SYMMETRICAL | KERNEL_ASYMMETRICAL)) != 0 ); + } + + void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) + { + int ksize2 = this->ksize/2; + const ST* ky = (const ST*)this->kernel.data + ksize2; + int i, k; + bool symmetrical = (symmetryType & KERNEL_SYMMETRICAL) != 0; + ST _delta = this->delta; + CastOp castOp = this->castOp0; + src += ksize2; + + if( symmetrical ) + { + for( ; count--; dst += dststep, src++ ) + { + DT* D = (DT*)dst; + i = (this->vecOp)(src, dst, width); + + for( ; i <= width - 4; i += 4 ) + { + ST f = ky[0]; + const ST* S = (const ST*)src[0] + i, *S2; + ST s0 = f*S[0] + _delta, s1 = f*S[1] + _delta, + s2 = f*S[2] + _delta, s3 = f*S[3] + _delta; + + for( k = 1; k <= ksize2; k++ ) + { + S = (const ST*)src[k] + i; + S2 = (const ST*)src[-k] + i; + f = ky[k]; + s0 += f*(S[0] + S2[0]); + s1 += f*(S[1] + S2[1]); + s2 += f*(S[2] + S2[2]); + s3 += f*(S[3] + S2[3]); + } + + D[i] = castOp(s0); D[i+1] = castOp(s1); + D[i+2] = castOp(s2); D[i+3] = castOp(s3); + } + + for( ; i < width; i++ ) + { + ST s0 = ky[0]*((const ST*)src[0])[i] + _delta; + for( k = 1; k <= ksize2; k++ ) + s0 += ky[k]*(((const ST*)src[k])[i] + ((const ST*)src[-k])[i]); + D[i] = castOp(s0); + } + } + } + else + { + for( ; count--; dst += dststep, src++ ) + { + DT* D = (DT*)dst; + i = this->vecOp(src, dst, width); + + for( ; i <= width - 4; i += 4 ) + { + ST f = ky[0]; + const ST *S, *S2; + ST s0 = _delta, s1 = _delta, s2 = _delta, s3 = _delta; + + for( k = 1; k <= ksize2; k++ ) + { + S = (const ST*)src[k] + i; + S2 = (const ST*)src[-k] + i; + f = ky[k]; + s0 += f*(S[0] - S2[0]); + s1 += f*(S[1] - S2[1]); + s2 += f*(S[2] - S2[2]); + s3 += f*(S[3] - S2[3]); + } + + D[i] = castOp(s0); D[i+1] = castOp(s1); + D[i+2] = castOp(s2); D[i+3] = castOp(s3); + } + + for( ; i < width; i++ ) + { + ST s0 = _delta; + for( k = 1; k <= ksize2; k++ ) + s0 += ky[k]*(((const ST*)src[k])[i] - ((const ST*)src[-k])[i]); + D[i] = castOp(s0); + } + } + } + } + + int symmetryType; +}; + + +template +struct SymmColumnSmallFilter : public SymmColumnFilter +{ + typedef typename CastOp::type1 ST; + typedef typename CastOp::rtype DT; + + SymmColumnSmallFilter( const Mat& _kernel, int _anchor, + double _delta, int _symmetryType, + const CastOp& _castOp=CastOp(), + const VecOp& _vecOp=VecOp()) + : SymmColumnFilter( _kernel, _anchor, _delta, _symmetryType, _castOp, _vecOp ) + { + CV_Assert( this->ksize == 3 ); + } + + void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) + { + int ksize2 = this->ksize/2; + const ST* ky = (const ST*)this->kernel.data + ksize2; + int i; + bool symmetrical = (this->symmetryType & KERNEL_SYMMETRICAL) != 0; + bool is_1_2_1 = ky[0] == 1 && ky[1] == 2; + bool is_1_m2_1 = ky[0] == 1 && ky[1] == -2; + bool is_m1_0_1 = ky[1] == 1 || ky[1] == -1; + ST f0 = ky[0], f1 = ky[1]; + ST _delta = this->delta; + CastOp castOp = this->castOp0; + src += ksize2; + + for( ; count--; dst += dststep, src++ ) + { + DT* D = (DT*)dst; + i = (this->vecOp)(src, dst, width); + const ST* S0 = (const ST*)src[-1]; + const ST* S1 = (const ST*)src[0]; + const ST* S2 = (const ST*)src[1]; + + if( symmetrical ) + { + if( is_1_2_1 ) + { + for( ; i <= width - 4; i += 4 ) + { + ST s0 = S0[i] + S1[i]*2 + S2[i] + _delta; + ST s1 = S0[i+1] + S1[i+1]*2 + S2[i+1] + _delta; + D[i] = castOp(s0); + D[i+1] = castOp(s1); + + s0 = S0[i+2] + S1[i+2]*2 + S2[i+2] + _delta; + s1 = S0[i+3] + S1[i+3]*2 + S2[i+3] + _delta; + D[i+2] = castOp(s0); + D[i+3] = castOp(s1); + } + } + else if( is_1_m2_1 ) + { + for( ; i <= width - 4; i += 4 ) + { + ST s0 = S0[i] - S1[i]*2 + S2[i] + _delta; + ST s1 = S0[i+1] - S1[i+1]*2 + S2[i+1] + _delta; + D[i] = castOp(s0); + D[i+1] = castOp(s1); + + s0 = S0[i+2] - S1[i+2]*2 + S2[i+2] + _delta; + s1 = S0[i+3] - S1[i+3]*2 + S2[i+3] + _delta; + D[i+2] = castOp(s0); + D[i+3] = castOp(s1); + } + } + else + { + for( ; i <= width - 4; i += 4 ) + { + ST s0 = (S0[i] + S2[i])*f1 + S1[i]*f0 + _delta; + ST s1 = (S0[i+1] + S2[i+1])*f1 + S1[i+1]*f0 + _delta; + D[i] = castOp(s0); + D[i+1] = castOp(s1); + + s0 = (S0[i+2] + S2[i+2])*f1 + S1[i+2]*f0 + _delta; + s1 = (S0[i+3] + S2[i+3])*f1 + S1[i+3]*f0 + _delta; + D[i+2] = castOp(s0); + D[i+3] = castOp(s1); + } + } + + for( ; i < width; i++ ) + D[i] = castOp((S0[i] + S2[i])*f1 + S1[i]*f0 + _delta); + } + else + { + if( is_m1_0_1 ) + { + if( f1 < 0 ) + std::swap(S0, S2); + + for( ; i <= width - 4; i += 4 ) + { + ST s0 = S2[i] - S0[i] + _delta; + ST s1 = S2[i+1] - S0[i+1] + _delta; + D[i] = castOp(s0); + D[i+1] = castOp(s1); + + s0 = S2[i+2] - S0[i+2] + _delta; + s1 = S2[i+3] - S0[i+3] + _delta; + D[i+2] = castOp(s0); + D[i+3] = castOp(s1); + } + + if( f1 < 0 ) + std::swap(S0, S2); + } + else + { + for( ; i <= width - 4; i += 4 ) + { + ST s0 = (S2[i] - S0[i])*f1 + _delta; + ST s1 = (S2[i+1] - S0[i+1])*f1 + _delta; + D[i] = castOp(s0); + D[i+1] = castOp(s1); + + s0 = (S2[i+2] - S0[i+2])*f1 + _delta; + s1 = (S2[i+3] - S0[i+3])*f1 + _delta; + D[i+2] = castOp(s0); + D[i+3] = castOp(s1); + } + } + + for( ; i < width; i++ ) + D[i] = castOp((S2[i] - S0[i])*f1 + _delta); + } + } + } +}; + +template struct Cast +{ + typedef ST type1; + typedef DT rtype; + + DT operator()(ST val) const { return saturate_cast
(val); } +}; + +template struct FixedPtCast +{ + typedef ST type1; + typedef DT rtype; + enum { SHIFT = bits, DELTA = 1 << (bits-1) }; + + DT operator()(ST val) const { return saturate_cast
((val + DELTA)>>SHIFT); } +}; + +template struct FixedPtCastEx +{ + typedef ST type1; + typedef DT rtype; + + FixedPtCastEx() : SHIFT(0), DELTA(0) {} + FixedPtCastEx(int bits) : SHIFT(bits), DELTA(bits ? 1 << (bits-1) : 0) {} + DT operator()(ST val) const { return saturate_cast
((val + DELTA)>>SHIFT); } + int SHIFT, DELTA; +}; + +} + +cv::Ptr cv::getLinearRowFilter( int srcType, int bufType, + InputArray _kernel, int anchor, + int symmetryType ) +{ + Mat kernel = _kernel.getMat(); + int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(bufType); + int cn = CV_MAT_CN(srcType); + CV_Assert( cn == CV_MAT_CN(bufType) && + ddepth >= std::max(sdepth, CV_32S) && + kernel.type() == ddepth ); + int ksize = kernel.rows + kernel.cols - 1; + + if( (symmetryType & (KERNEL_SYMMETRICAL|KERNEL_ASYMMETRICAL)) != 0 && ksize <= 5 ) + { + if( sdepth == CV_8U && ddepth == CV_32S ) + return Ptr(new SymmRowSmallFilter + (kernel, anchor, symmetryType, SymmRowSmallVec_8u32s(kernel, symmetryType))); + if( sdepth == CV_32F && ddepth == CV_32F ) + return Ptr(new SymmRowSmallFilter + (kernel, anchor, symmetryType, SymmRowSmallVec_32f(kernel, symmetryType))); + } + + if( sdepth == CV_8U && ddepth == CV_32S ) + return Ptr(new RowFilter + (kernel, anchor, RowVec_8u32s(kernel))); + if( sdepth == CV_8U && ddepth == CV_32F ) + return Ptr(new RowFilter(kernel, anchor)); + if( sdepth == CV_8U && ddepth == CV_64F ) + return Ptr(new RowFilter(kernel, anchor)); + if( sdepth == CV_16U && ddepth == CV_32F ) + return Ptr(new RowFilter(kernel, anchor)); + if( sdepth == CV_16U && ddepth == CV_64F ) + return Ptr(new RowFilter(kernel, anchor)); + if( sdepth == CV_16S && ddepth == CV_32F ) + return Ptr(new RowFilter(kernel, anchor)); + if( sdepth == CV_16S && ddepth == CV_64F ) + return Ptr(new RowFilter(kernel, anchor)); + if( sdepth == CV_32F && ddepth == CV_32F ) + return Ptr(new RowFilter + (kernel, anchor, RowVec_32f(kernel))); + if( sdepth == CV_64F && ddepth == CV_64F ) + return Ptr(new RowFilter(kernel, anchor)); + + CV_Error_( CV_StsNotImplemented, + ("Unsupported combination of source format (=%d), and buffer format (=%d)", + srcType, bufType)); + + return Ptr(0); +} + + +cv::Ptr cv::getLinearColumnFilter( int bufType, int dstType, + InputArray _kernel, int anchor, + int symmetryType, double delta, + int bits ) +{ + Mat kernel = _kernel.getMat(); + int sdepth = CV_MAT_DEPTH(bufType), ddepth = CV_MAT_DEPTH(dstType); + int cn = CV_MAT_CN(dstType); + CV_Assert( cn == CV_MAT_CN(bufType) && + sdepth >= std::max(ddepth, CV_32S) && + kernel.type() == sdepth ); + + if( !(symmetryType & (KERNEL_SYMMETRICAL|KERNEL_ASYMMETRICAL)) ) + { + if( ddepth == CV_8U && sdepth == CV_32S ) + return Ptr(new ColumnFilter, ColumnNoVec> + (kernel, anchor, delta, FixedPtCastEx(bits))); + if( ddepth == CV_8U && sdepth == CV_32F ) + return Ptr(new ColumnFilter, ColumnNoVec>(kernel, anchor, delta)); + if( ddepth == CV_8U && sdepth == CV_64F ) + return Ptr(new ColumnFilter, ColumnNoVec>(kernel, anchor, delta)); + if( ddepth == CV_16U && sdepth == CV_32F ) + return Ptr(new ColumnFilter, ColumnNoVec>(kernel, anchor, delta)); + if( ddepth == CV_16U && sdepth == CV_64F ) + return Ptr(new ColumnFilter, ColumnNoVec>(kernel, anchor, delta)); + if( ddepth == CV_16S && sdepth == CV_32F ) + return Ptr(new ColumnFilter, ColumnNoVec>(kernel, anchor, delta)); + if( ddepth == CV_16S && sdepth == CV_64F ) + return Ptr(new ColumnFilter, ColumnNoVec>(kernel, anchor, delta)); + if( ddepth == CV_32F && sdepth == CV_32F ) + return Ptr(new ColumnFilter, ColumnNoVec>(kernel, anchor, delta)); + if( ddepth == CV_64F && sdepth == CV_64F ) + return Ptr(new ColumnFilter, ColumnNoVec>(kernel, anchor, delta)); + } + else + { + int ksize = kernel.rows + kernel.cols - 1; + if( ksize == 3 ) + { + if( ddepth == CV_8U && sdepth == CV_32S ) + return Ptr(new SymmColumnSmallFilter< + FixedPtCastEx, SymmColumnVec_32s8u> + (kernel, anchor, delta, symmetryType, FixedPtCastEx(bits), + SymmColumnVec_32s8u(kernel, symmetryType, bits, delta))); + if( ddepth == CV_16S && sdepth == CV_32S && bits == 0 ) + return Ptr(new SymmColumnSmallFilter, + SymmColumnSmallVec_32s16s>(kernel, anchor, delta, symmetryType, + Cast(), SymmColumnSmallVec_32s16s(kernel, symmetryType, bits, delta))); + if( ddepth == CV_32F && sdepth == CV_32F ) + return Ptr(new SymmColumnSmallFilter< + Cast,SymmColumnSmallVec_32f> + (kernel, anchor, delta, symmetryType, Cast(), + SymmColumnSmallVec_32f(kernel, symmetryType, 0, delta))); + } + if( ddepth == CV_8U && sdepth == CV_32S ) + return Ptr(new SymmColumnFilter, SymmColumnVec_32s8u> + (kernel, anchor, delta, symmetryType, FixedPtCastEx(bits), + SymmColumnVec_32s8u(kernel, symmetryType, bits, delta))); + if( ddepth == CV_8U && sdepth == CV_32F ) + return Ptr(new SymmColumnFilter, ColumnNoVec> + (kernel, anchor, delta, symmetryType)); + if( ddepth == CV_8U && sdepth == CV_64F ) + return Ptr(new SymmColumnFilter, ColumnNoVec> + (kernel, anchor, delta, symmetryType)); + if( ddepth == CV_16U && sdepth == CV_32F ) + return Ptr(new SymmColumnFilter, ColumnNoVec> + (kernel, anchor, delta, symmetryType)); + if( ddepth == CV_16U && sdepth == CV_64F ) + return Ptr(new SymmColumnFilter, ColumnNoVec> + (kernel, anchor, delta, symmetryType)); + if( ddepth == CV_16S && sdepth == CV_32S ) + return Ptr(new SymmColumnFilter, ColumnNoVec> + (kernel, anchor, delta, symmetryType)); + if( ddepth == CV_16S && sdepth == CV_32F ) + return Ptr(new SymmColumnFilter, ColumnNoVec> + (kernel, anchor, delta, symmetryType)); + if( ddepth == CV_16S && sdepth == CV_64F ) + return Ptr(new SymmColumnFilter, ColumnNoVec> + (kernel, anchor, delta, symmetryType)); + if( ddepth == CV_32F && sdepth == CV_32F ) + return Ptr(new SymmColumnFilter, SymmColumnVec_32f> + (kernel, anchor, delta, symmetryType, Cast(), + SymmColumnVec_32f(kernel, symmetryType, 0, delta))); + if( ddepth == CV_64F && sdepth == CV_64F ) + return Ptr(new SymmColumnFilter, ColumnNoVec> + (kernel, anchor, delta, symmetryType)); + } + + CV_Error_( CV_StsNotImplemented, + ("Unsupported combination of buffer format (=%d), and destination format (=%d)", + bufType, dstType)); + + return Ptr(0); +} + + +cv::Ptr cv::createSeparableLinearFilter( + int _srcType, int _dstType, + InputArray __rowKernel, InputArray __columnKernel, + Point _anchor, double _delta, + int _rowBorderType, int _columnBorderType, + const Scalar& _borderValue ) +{ + Mat _rowKernel = __rowKernel.getMat(), _columnKernel = __columnKernel.getMat(); + _srcType = CV_MAT_TYPE(_srcType); + _dstType = CV_MAT_TYPE(_dstType); + int sdepth = CV_MAT_DEPTH(_srcType), ddepth = CV_MAT_DEPTH(_dstType); + int cn = CV_MAT_CN(_srcType); + CV_Assert( cn == CV_MAT_CN(_dstType) ); + int rsize = _rowKernel.rows + _rowKernel.cols - 1; + int csize = _columnKernel.rows + _columnKernel.cols - 1; + if( _anchor.x < 0 ) + _anchor.x = rsize/2; + if( _anchor.y < 0 ) + _anchor.y = csize/2; + int rtype = getKernelType(_rowKernel, + _rowKernel.rows == 1 ? Point(_anchor.x, 0) : Point(0, _anchor.x)); + int ctype = getKernelType(_columnKernel, + _columnKernel.rows == 1 ? Point(_anchor.y, 0) : Point(0, _anchor.y)); + Mat rowKernel, columnKernel; + + int bdepth = std::max(CV_32F,std::max(sdepth, ddepth)); + int bits = 0; + + if( sdepth == CV_8U && + ((rtype == KERNEL_SMOOTH+KERNEL_SYMMETRICAL && + ctype == KERNEL_SMOOTH+KERNEL_SYMMETRICAL && + ddepth == CV_8U) || + ((rtype & (KERNEL_SYMMETRICAL+KERNEL_ASYMMETRICAL)) && + (ctype & (KERNEL_SYMMETRICAL+KERNEL_ASYMMETRICAL)) && + (rtype & ctype & KERNEL_INTEGER) && + ddepth == CV_16S)) ) + { + bdepth = CV_32S; + bits = ddepth == CV_8U ? 8 : 0; + _rowKernel.convertTo( rowKernel, CV_32S, 1 << bits ); + _columnKernel.convertTo( columnKernel, CV_32S, 1 << bits ); + bits *= 2; + _delta *= (1 << bits); + } + else + { + if( _rowKernel.type() != bdepth ) + _rowKernel.convertTo( rowKernel, bdepth ); + else + rowKernel = _rowKernel; + if( _columnKernel.type() != bdepth ) + _columnKernel.convertTo( columnKernel, bdepth ); + else + columnKernel = _columnKernel; + } + + int _bufType = CV_MAKETYPE(bdepth, cn); + Ptr _rowFilter = getLinearRowFilter( + _srcType, _bufType, rowKernel, _anchor.x, rtype); + Ptr _columnFilter = getLinearColumnFilter( + _bufType, _dstType, columnKernel, _anchor.y, ctype, _delta, bits ); + + return Ptr( new FilterEngine(Ptr(0), _rowFilter, _columnFilter, + _srcType, _dstType, _bufType, _rowBorderType, _columnBorderType, _borderValue )); +} + + +/****************************************************************************************\ +* Non-separable linear filter * +\****************************************************************************************/ + +namespace cv +{ + +void preprocess2DKernel( const Mat& kernel, vector& coords, vector& coeffs ) +{ + int i, j, k, nz = countNonZero(kernel), ktype = kernel.type(); + if(nz == 0) + nz = 1; + CV_Assert( ktype == CV_8U || ktype == CV_32S || ktype == CV_32F || ktype == CV_64F ); + coords.resize(nz); + coeffs.resize(nz*getElemSize(ktype)); + uchar* _coeffs = &coeffs[0]; + + for( i = k = 0; i < kernel.rows; i++ ) + { + const uchar* krow = kernel.data + kernel.step*i; + for( j = 0; j < kernel.cols; j++ ) + { + if( ktype == CV_8U ) + { + uchar val = krow[j]; + if( val == 0 ) + continue; + coords[k] = Point(j,i); + _coeffs[k++] = val; + } + else if( ktype == CV_32S ) + { + int val = ((const int*)krow)[j]; + if( val == 0 ) + continue; + coords[k] = Point(j,i); + ((int*)_coeffs)[k++] = val; + } + else if( ktype == CV_32F ) + { + float val = ((const float*)krow)[j]; + if( val == 0 ) + continue; + coords[k] = Point(j,i); + ((float*)_coeffs)[k++] = val; + } + else + { + double val = ((const double*)krow)[j]; + if( val == 0 ) + continue; + coords[k] = Point(j,i); + ((double*)_coeffs)[k++] = val; + } + } + } +} + + +template struct Filter2D : public BaseFilter +{ + typedef typename CastOp::type1 KT; + typedef typename CastOp::rtype DT; + + Filter2D( const Mat& _kernel, Point _anchor, + double _delta, const CastOp& _castOp=CastOp(), + const VecOp& _vecOp=VecOp() ) + { + anchor = _anchor; + ksize = _kernel.size(); + delta = saturate_cast(_delta); + castOp0 = _castOp; + vecOp = _vecOp; + CV_Assert( _kernel.type() == DataType::type ); + preprocess2DKernel( _kernel, coords, coeffs ); + ptrs.resize( coords.size() ); + } + + void operator()(const uchar** src, uchar* dst, int dststep, int count, int width, int cn) + { + KT _delta = delta; + const Point* pt = &coords[0]; + const KT* kf = (const KT*)&coeffs[0]; + const ST** kp = (const ST**)&ptrs[0]; + int i, k, nz = (int)coords.size(); + CastOp castOp = castOp0; + + width *= cn; + for( ; count > 0; count--, dst += dststep, src++ ) + { + DT* D = (DT*)dst; + + for( k = 0; k < nz; k++ ) + kp[k] = (const ST*)src[pt[k].y] + pt[k].x*cn; + + i = vecOp((const uchar**)kp, dst, width); + + for( ; i <= width - 4; i += 4 ) + { + KT s0 = _delta, s1 = _delta, s2 = _delta, s3 = _delta; + + for( k = 0; k < nz; k++ ) + { + const ST* sptr = kp[k] + i; + KT f = kf[k]; + s0 += f*sptr[0]; + s1 += f*sptr[1]; + s2 += f*sptr[2]; + s3 += f*sptr[3]; + } + + D[i] = castOp(s0); D[i+1] = castOp(s1); + D[i+2] = castOp(s2); D[i+3] = castOp(s3); + } + + for( ; i < width; i++ ) + { + KT s0 = _delta; + for( k = 0; k < nz; k++ ) + s0 += kf[k]*kp[k][i]; + D[i] = castOp(s0); + } + } + } + + vector coords; + vector coeffs; + vector ptrs; + KT delta; + CastOp castOp0; + VecOp vecOp; +}; + +} + +cv::Ptr cv::getLinearFilter(int srcType, int dstType, + InputArray filter_kernel, Point anchor, + double delta, int bits) +{ + Mat _kernel = filter_kernel.getMat(); + int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(dstType); + int cn = CV_MAT_CN(srcType), kdepth = _kernel.depth(); + CV_Assert( cn == CV_MAT_CN(dstType) && ddepth >= sdepth ); + + anchor = normalizeAnchor(anchor, _kernel.size()); + + /*if( sdepth == CV_8U && ddepth == CV_8U && kdepth == CV_32S ) + return Ptr(new Filter2D, FilterVec_8u> + (_kernel, anchor, delta, FixedPtCastEx(bits), + FilterVec_8u(_kernel, bits, delta))); + if( sdepth == CV_8U && ddepth == CV_16S && kdepth == CV_32S ) + return Ptr(new Filter2D, FilterVec_8u16s> + (_kernel, anchor, delta, FixedPtCastEx(bits), + FilterVec_8u16s(_kernel, bits, delta)));*/ + + kdepth = sdepth == CV_64F || ddepth == CV_64F ? CV_64F : CV_32F; + Mat kernel; + if( _kernel.type() == kdepth ) + kernel = _kernel; + else + _kernel.convertTo(kernel, kdepth, _kernel.type() == CV_32S ? 1./(1 << bits) : 1.); + + if( sdepth == CV_8U && ddepth == CV_8U ) + return Ptr(new Filter2D, FilterVec_8u> + (kernel, anchor, delta, Cast(), FilterVec_8u(kernel, 0, delta))); + if( sdepth == CV_8U && ddepth == CV_16U ) + return Ptr(new Filter2D, FilterNoVec>(kernel, anchor, delta)); + if( sdepth == CV_8U && ddepth == CV_16S ) + return Ptr(new Filter2D, FilterVec_8u16s> + (kernel, anchor, delta, Cast(), FilterVec_8u16s(kernel, 0, delta))); + if( sdepth == CV_8U && ddepth == CV_32F ) + return Ptr(new Filter2D, FilterNoVec>(kernel, anchor, delta)); + if( sdepth == CV_8U && ddepth == CV_64F ) + return Ptr(new Filter2D, FilterNoVec>(kernel, anchor, delta)); + + if( sdepth == CV_16U && ddepth == CV_16U ) + return Ptr(new Filter2D, FilterNoVec>(kernel, anchor, delta)); + if( sdepth == CV_16U && ddepth == CV_32F ) + return Ptr(new Filter2D, FilterNoVec>(kernel, anchor, delta)); + if( sdepth == CV_16U && ddepth == CV_64F ) + return Ptr(new Filter2D, FilterNoVec>(kernel, anchor, delta)); + + if( sdepth == CV_16S && ddepth == CV_16S ) + return Ptr(new Filter2D, FilterNoVec>(kernel, anchor, delta)); + if( sdepth == CV_16S && ddepth == CV_32F ) + return Ptr(new Filter2D, FilterNoVec>(kernel, anchor, delta)); + if( sdepth == CV_16S && ddepth == CV_64F ) + return Ptr(new Filter2D, FilterNoVec>(kernel, anchor, delta)); + + if( sdepth == CV_32F && ddepth == CV_32F ) + return Ptr(new Filter2D, FilterVec_32f> + (kernel, anchor, delta, Cast(), FilterVec_32f(kernel, 0, delta))); + if( sdepth == CV_64F && ddepth == CV_64F ) + return Ptr(new Filter2D, FilterNoVec>(kernel, anchor, delta)); + + CV_Error_( CV_StsNotImplemented, + ("Unsupported combination of source format (=%d), and destination format (=%d)", + srcType, dstType)); + + return Ptr(0); +} + + +cv::Ptr cv::createLinearFilter( int _srcType, int _dstType, + InputArray filter_kernel, + Point _anchor, double _delta, + int _rowBorderType, int _columnBorderType, + const Scalar& _borderValue ) +{ + Mat _kernel = filter_kernel.getMat(); + _srcType = CV_MAT_TYPE(_srcType); + _dstType = CV_MAT_TYPE(_dstType); + int cn = CV_MAT_CN(_srcType); + CV_Assert( cn == CV_MAT_CN(_dstType) ); + + Mat kernel = _kernel; + int bits = 0; + + /*int sdepth = CV_MAT_DEPTH(_srcType), ddepth = CV_MAT_DEPTH(_dstType); + int ktype = _kernel.depth() == CV_32S ? KERNEL_INTEGER : getKernelType(_kernel, _anchor); + if( sdepth == CV_8U && (ddepth == CV_8U || ddepth == CV_16S) && + _kernel.rows*_kernel.cols <= (1 << 10) ) + { + bits = (ktype & KERNEL_INTEGER) ? 0 : 11; + _kernel.convertTo(kernel, CV_32S, 1 << bits); + }*/ + + Ptr _filter2D = getLinearFilter(_srcType, _dstType, + kernel, _anchor, _delta, bits); + + return Ptr(new FilterEngine(_filter2D, Ptr(0), + Ptr(0), _srcType, _dstType, _srcType, + _rowBorderType, _columnBorderType, _borderValue )); +} + + +void cv::filter2D( InputArray _src, OutputArray _dst, int ddepth, + InputArray _kernel, Point anchor, + double delta, int borderType ) +{ + Mat src = _src.getMat(), kernel = _kernel.getMat(); + + if( ddepth < 0 ) + ddepth = src.depth(); + +#if CV_SSE2 + int dft_filter_size = ((src.depth() == CV_8U && (ddepth == CV_8U || ddepth == CV_16S)) || + (src.depth() == CV_32F && ddepth == CV_32F)) && checkHardwareSupport(CV_CPU_SSE3)? 130 : 50; +#else + int dft_filter_size = 50; +#endif + + _dst.create( src.size(), CV_MAKETYPE(ddepth, src.channels()) ); + Mat dst = _dst.getMat(); + anchor = normalizeAnchor(anchor, kernel.size()); + + if( kernel.cols*kernel.rows >= dft_filter_size ) + { + Mat temp; + if( src.data != dst.data ) + temp = dst; + else + temp.create(dst.size(), dst.type()); + crossCorr( src, kernel, temp, src.size(), + CV_MAKETYPE(ddepth, src.channels()), + anchor, delta, borderType ); + if( temp.data != dst.data ) + temp.copyTo(dst); + return; + } + + Ptr f = createLinearFilter(src.type(), dst.type(), kernel, + anchor, delta, borderType ); + f->apply(src, dst); +} + + +void cv::sepFilter2D( InputArray _src, OutputArray _dst, int ddepth, + InputArray _kernelX, InputArray _kernelY, Point anchor, + double delta, int borderType ) +{ + Mat src = _src.getMat(), kernelX = _kernelX.getMat(), kernelY = _kernelY.getMat(); + + if( ddepth < 0 ) + ddepth = src.depth(); + + _dst.create( src.size(), CV_MAKETYPE(ddepth, src.channels()) ); + Mat dst = _dst.getMat(); + + Ptr f = createSeparableLinearFilter(src.type(), + dst.type(), kernelX, kernelY, anchor, delta, borderType & ~BORDER_ISOLATED ); + f->apply(src, dst, Rect(0,0,-1,-1), Point(), (borderType & BORDER_ISOLATED) != 0 ); +} + + +CV_IMPL void +cvFilter2D( const CvArr* srcarr, CvArr* dstarr, const CvMat* _kernel, CvPoint anchor ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + cv::Mat kernel = cv::cvarrToMat(_kernel); + + CV_Assert( src.size() == dst.size() && src.channels() == dst.channels() ); + + cv::filter2D( src, dst, dst.depth(), kernel, anchor, 0, cv::BORDER_REPLICATE ); +} + +/* End of file. */ diff --git a/opencv/imgproc/floodfill.cpp b/opencv/imgproc/floodfill.cpp new file mode 100644 index 0000000..25b6a01 --- /dev/null +++ b/opencv/imgproc/floodfill.cpp @@ -0,0 +1,1141 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +typedef struct CvFFillSegment +{ + ushort y; + ushort l; + ushort r; + ushort prevl; + ushort prevr; + short dir; +} +CvFFillSegment; + +#define UP 1 +#define DOWN -1 + +#define ICV_PUSH( Y, L, R, PREV_L, PREV_R, DIR )\ +{ \ + tail->y = (ushort)(Y); \ + tail->l = (ushort)(L); \ + tail->r = (ushort)(R); \ + tail->prevl = (ushort)(PREV_L); \ + tail->prevr = (ushort)(PREV_R); \ + tail->dir = (short)(DIR); \ + if( ++tail >= buffer_end ) \ + tail = buffer; \ +} + + +#define ICV_POP( Y, L, R, PREV_L, PREV_R, DIR ) \ +{ \ + Y = head->y; \ + L = head->l; \ + R = head->r; \ + PREV_L = head->prevl; \ + PREV_R = head->prevr; \ + DIR = head->dir; \ + if( ++head >= buffer_end ) \ + head = buffer; \ +} + + +#define ICV_EQ_C3( p1, p2 ) \ + ((p1)[0] == (p2)[0] && (p1)[1] == (p2)[1] && (p1)[2] == (p2)[2]) + +#define ICV_SET_C3( p, q ) \ + ((p)[0] = (q)[0], (p)[1] = (q)[1], (p)[2] = (q)[2]) + +/****************************************************************************************\ +* Simple Floodfill (repainting single-color connected component) * +\****************************************************************************************/ + +static void +icvFloodFill_8u_CnIR( uchar* pImage, int step, CvSize roi, CvPoint seed, + uchar* _newVal, CvConnectedComp* region, int flags, + CvFFillSegment* buffer, int buffer_size, int cn ) +{ + uchar* img = pImage + step * seed.y; + int i, L, R; + int area = 0; + int val0[] = {0,0,0}; + uchar newVal[] = {0,0,0}; + int XMin, XMax, YMin = seed.y, YMax = seed.y; + int _8_connectivity = (flags & 255) == 8; + CvFFillSegment* buffer_end = buffer + buffer_size, *head = buffer, *tail = buffer; + + L = R = XMin = XMax = seed.x; + + if( cn == 1 ) + { + val0[0] = img[L]; + newVal[0] = _newVal[0]; + + img[L] = newVal[0]; + + while( ++R < roi.width && img[R] == val0[0] ) + img[R] = newVal[0]; + + while( --L >= 0 && img[L] == val0[0] ) + img[L] = newVal[0]; + } + else + { + assert( cn == 3 ); + ICV_SET_C3( val0, img + L*3 ); + ICV_SET_C3( newVal, _newVal ); + + ICV_SET_C3( img + L*3, newVal ); + + while( --L >= 0 && ICV_EQ_C3( img + L*3, val0 )) + ICV_SET_C3( img + L*3, newVal ); + + while( ++R < roi.width && ICV_EQ_C3( img + R*3, val0 )) + ICV_SET_C3( img + R*3, newVal ); + } + + XMax = --R; + XMin = ++L; + ICV_PUSH( seed.y, L, R, R + 1, R, UP ); + + while( head != tail ) + { + int k, YC, PL, PR, dir; + ICV_POP( YC, L, R, PL, PR, dir ); + + int data[][3] = + { + {-dir, L - _8_connectivity, R + _8_connectivity}, + {dir, L - _8_connectivity, PL - 1}, + {dir, PR + 1, R + _8_connectivity} + }; + + if( region ) + { + area += R - L + 1; + + if( XMax < R ) XMax = R; + if( XMin > L ) XMin = L; + if( YMax < YC ) YMax = YC; + if( YMin > YC ) YMin = YC; + } + + for( k = 0/*(unsigned)(YC - dir) >= (unsigned)roi.height*/; k < 3; k++ ) + { + dir = data[k][0]; + img = pImage + (YC + dir) * step; + int left = data[k][1]; + int right = data[k][2]; + + if( (unsigned)(YC + dir) >= (unsigned)roi.height ) + continue; + + if( cn == 1 ) + for( i = left; i <= right; i++ ) + { + if( (unsigned)i < (unsigned)roi.width && img[i] == val0[0] ) + { + int j = i; + img[i] = newVal[0]; + while( --j >= 0 && img[j] == val0[0] ) + img[j] = newVal[0]; + + while( ++i < roi.width && img[i] == val0[0] ) + img[i] = newVal[0]; + + ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir ); + } + } + else + for( i = left; i <= right; i++ ) + { + if( (unsigned)i < (unsigned)roi.width && ICV_EQ_C3( img + i*3, val0 )) + { + int j = i; + ICV_SET_C3( img + i*3, newVal ); + while( --j >= 0 && ICV_EQ_C3( img + j*3, val0 )) + ICV_SET_C3( img + j*3, newVal ); + + while( ++i < roi.width && ICV_EQ_C3( img + i*3, val0 )) + ICV_SET_C3( img + i*3, newVal ); + + ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir ); + } + } + } + } + + if( region ) + { + region->area = area; + region->rect.x = XMin; + region->rect.y = YMin; + region->rect.width = XMax - XMin + 1; + region->rect.height = YMax - YMin + 1; + region->value = cvScalar(newVal[0], newVal[1], newVal[2], 0); + } +} + + +/* because all the operations on floats that are done during non-gradient floodfill + are just copying and comparison on equality, + we can do the whole op on 32-bit integers instead */ +static void +icvFloodFill_32f_CnIR( int* pImage, int step, CvSize roi, CvPoint seed, + int* _newVal, CvConnectedComp* region, int flags, + CvFFillSegment* buffer, int buffer_size, int cn ) +{ + int* img = pImage + (step /= sizeof(pImage[0])) * seed.y; + int i, L, R; + int area = 0; + int val0[] = {0,0,0}; + int newVal[] = {0,0,0}; + int XMin, XMax, YMin = seed.y, YMax = seed.y; + int _8_connectivity = (flags & 255) == 8; + CvFFillSegment* buffer_end = buffer + buffer_size, *head = buffer, *tail = buffer; + + L = R = XMin = XMax = seed.x; + + if( cn == 1 ) + { + val0[0] = img[L]; + newVal[0] = _newVal[0]; + + img[L] = newVal[0]; + + while( ++R < roi.width && img[R] == val0[0] ) + img[R] = newVal[0]; + + while( --L >= 0 && img[L] == val0[0] ) + img[L] = newVal[0]; + } + else + { + assert( cn == 3 ); + ICV_SET_C3( val0, img + L*3 ); + ICV_SET_C3( newVal, _newVal ); + + ICV_SET_C3( img + L*3, newVal ); + + while( --L >= 0 && ICV_EQ_C3( img + L*3, val0 )) + ICV_SET_C3( img + L*3, newVal ); + + while( ++R < roi.width && ICV_EQ_C3( img + R*3, val0 )) + ICV_SET_C3( img + R*3, newVal ); + } + + XMax = --R; + XMin = ++L; + ICV_PUSH( seed.y, L, R, R + 1, R, UP ); + + while( head != tail ) + { + int k, YC, PL, PR, dir; + ICV_POP( YC, L, R, PL, PR, dir ); + + int data[][3] = + { + {-dir, L - _8_connectivity, R + _8_connectivity}, + {dir, L - _8_connectivity, PL - 1}, + {dir, PR + 1, R + _8_connectivity} + }; + + if( region ) + { + area += R - L + 1; + + if( XMax < R ) XMax = R; + if( XMin > L ) XMin = L; + if( YMax < YC ) YMax = YC; + if( YMin > YC ) YMin = YC; + } + + for( k = 0/*(unsigned)(YC - dir) >= (unsigned)roi.height*/; k < 3; k++ ) + { + dir = data[k][0]; + img = pImage + (YC + dir) * step; + int left = data[k][1]; + int right = data[k][2]; + + if( (unsigned)(YC + dir) >= (unsigned)roi.height ) + continue; + + if( cn == 1 ) + for( i = left; i <= right; i++ ) + { + if( (unsigned)i < (unsigned)roi.width && img[i] == val0[0] ) + { + int j = i; + img[i] = newVal[0]; + while( --j >= 0 && img[j] == val0[0] ) + img[j] = newVal[0]; + + while( ++i < roi.width && img[i] == val0[0] ) + img[i] = newVal[0]; + + ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir ); + } + } + else + for( i = left; i <= right; i++ ) + { + if( (unsigned)i < (unsigned)roi.width && ICV_EQ_C3( img + i*3, val0 )) + { + int j = i; + ICV_SET_C3( img + i*3, newVal ); + while( --j >= 0 && ICV_EQ_C3( img + j*3, val0 )) + ICV_SET_C3( img + j*3, newVal ); + + while( ++i < roi.width && ICV_EQ_C3( img + i*3, val0 )) + ICV_SET_C3( img + i*3, newVal ); + + ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir ); + } + } + } + } + + if( region ) + { + Cv32suf v0, v1, v2; + region->area = area; + region->rect.x = XMin; + region->rect.y = YMin; + region->rect.width = XMax - XMin + 1; + region->rect.height = YMax - YMin + 1; + v0.i = newVal[0]; v1.i = newVal[1]; v2.i = newVal[2]; + region->value = cvScalar( v0.f, v1.f, v2.f ); + } +} + +/****************************************************************************************\ +* Gradient Floodfill * +\****************************************************************************************/ + +#define DIFF_INT_C1(p1,p2) ((unsigned)((p1)[0] - (p2)[0] + d_lw[0]) <= interval[0]) + +#define DIFF_INT_C3(p1,p2) ((unsigned)((p1)[0] - (p2)[0] + d_lw[0])<= interval[0] && \ + (unsigned)((p1)[1] - (p2)[1] + d_lw[1])<= interval[1] && \ + (unsigned)((p1)[2] - (p2)[2] + d_lw[2])<= interval[2]) + +#define DIFF_FLT_C1(p1,p2) (fabs((p1)[0] - (p2)[0] + d_lw[0]) <= interval[0]) + +#define DIFF_FLT_C3(p1,p2) (fabs((p1)[0] - (p2)[0] + d_lw[0]) <= interval[0] && \ + fabs((p1)[1] - (p2)[1] + d_lw[1]) <= interval[1] && \ + fabs((p1)[2] - (p2)[2] + d_lw[2]) <= interval[2]) + +static void +icvFloodFillGrad_8u_CnIR( uchar* pImage, int step, uchar* pMask, int maskStep, + CvSize /*roi*/, CvPoint seed, uchar* _newVal, uchar* _d_lw, + uchar* _d_up, CvConnectedComp* region, int flags, + CvFFillSegment* buffer, int buffer_size, int cn ) +{ + uchar* img = pImage + step*seed.y; + uchar* mask = (pMask += maskStep + 1) + maskStep*seed.y; + int i, L, R; + int area = 0; + int sum[] = {0,0,0}, val0[] = {0,0,0}; + uchar newVal[] = {0,0,0}; + int d_lw[] = {0,0,0}; + unsigned interval[] = {0,0,0}; + int XMin, XMax, YMin = seed.y, YMax = seed.y; + int _8_connectivity = (flags & 255) == 8; + int fixedRange = flags & CV_FLOODFILL_FIXED_RANGE; + int fillImage = (flags & CV_FLOODFILL_MASK_ONLY) == 0; + uchar newMaskVal = (uchar)(flags & 0xff00 ? flags >> 8 : 1); + CvFFillSegment* buffer_end = buffer + buffer_size, *head = buffer, *tail = buffer; + + L = R = seed.x; + if( mask[L] ) + return; + + mask[L] = newMaskVal; + + for( i = 0; i < cn; i++ ) + { + newVal[i] = _newVal[i]; + d_lw[i] = _d_lw[i]; + interval[i] = (unsigned)(_d_up[i] + _d_lw[i]); + if( fixedRange ) + val0[i] = img[L*cn+i]; + } + + if( cn == 1 ) + { + if( fixedRange ) + { + while( !mask[R + 1] && DIFF_INT_C1( img + (R+1), val0 )) + mask[++R] = newMaskVal; + + while( !mask[L - 1] && DIFF_INT_C1( img + (L-1), val0 )) + mask[--L] = newMaskVal; + } + else + { + while( !mask[R + 1] && DIFF_INT_C1( img + (R+1), img + R )) + mask[++R] = newMaskVal; + + while( !mask[L - 1] && DIFF_INT_C1( img + (L-1), img + L )) + mask[--L] = newMaskVal; + } + } + else + { + if( fixedRange ) + { + while( !mask[R + 1] && DIFF_INT_C3( img + (R+1)*3, val0 )) + mask[++R] = newMaskVal; + + while( !mask[L - 1] && DIFF_INT_C3( img + (L-1)*3, val0 )) + mask[--L] = newMaskVal; + } + else + { + while( !mask[R + 1] && DIFF_INT_C3( img + (R+1)*3, img + R*3 )) + mask[++R] = newMaskVal; + + while( !mask[L - 1] && DIFF_INT_C3( img + (L-1)*3, img + L*3 )) + mask[--L] = newMaskVal; + } + } + + XMax = R; + XMin = L; + ICV_PUSH( seed.y, L, R, R + 1, R, UP ); + + while( head != tail ) + { + int k, YC, PL, PR, dir, curstep; + ICV_POP( YC, L, R, PL, PR, dir ); + + int data[][3] = + { + {-dir, L - _8_connectivity, R + _8_connectivity}, + {dir, L - _8_connectivity, PL - 1}, + {dir, PR + 1, R + _8_connectivity} + }; + + unsigned length = (unsigned)(R-L); + + if( region ) + { + area += (int)length + 1; + + if( XMax < R ) XMax = R; + if( XMin > L ) XMin = L; + if( YMax < YC ) YMax = YC; + if( YMin > YC ) YMin = YC; + } + + if( cn == 1 ) + { + for( k = 0; k < 3; k++ ) + { + dir = data[k][0]; + curstep = dir * step; + img = pImage + (YC + dir) * step; + mask = pMask + (YC + dir) * maskStep; + int left = data[k][1]; + int right = data[k][2]; + + if( fixedRange ) + for( i = left; i <= right; i++ ) + { + if( !mask[i] && DIFF_INT_C1( img + i, val0 )) + { + int j = i; + mask[i] = newMaskVal; + while( !mask[--j] && DIFF_INT_C1( img + j, val0 )) + mask[j] = newMaskVal; + + while( !mask[++i] && DIFF_INT_C1( img + i, val0 )) + mask[i] = newMaskVal; + + ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir ); + } + } + else if( !_8_connectivity ) + for( i = left; i <= right; i++ ) + { + if( !mask[i] && DIFF_INT_C1( img + i, img - curstep + i )) + { + int j = i; + mask[i] = newMaskVal; + while( !mask[--j] && DIFF_INT_C1( img + j, img + (j+1) )) + mask[j] = newMaskVal; + + while( !mask[++i] && + (DIFF_INT_C1( img + i, img + (i-1) ) || + (DIFF_INT_C1( img + i, img + i - curstep) && i <= R))) + mask[i] = newMaskVal; + + ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir ); + } + } + else + for( i = left; i <= right; i++ ) + { + int idx, val[1]; + + if( !mask[i] && + (((val[0] = img[i], + (unsigned)(idx = i-L-1) <= length) && + DIFF_INT_C1( val, img - curstep + (i-1))) || + ((unsigned)(++idx) <= length && + DIFF_INT_C1( val, img - curstep + i )) || + ((unsigned)(++idx) <= length && + DIFF_INT_C1( val, img - curstep + (i+1) )))) + { + int j = i; + mask[i] = newMaskVal; + while( !mask[--j] && DIFF_INT_C1( img + j, img + (j+1) )) + mask[j] = newMaskVal; + + while( !mask[++i] && + ((val[0] = img[i], + DIFF_INT_C1( val, img + (i-1) )) || + (((unsigned)(idx = i-L-1) <= length && + DIFF_INT_C1( val, img - curstep + (i-1) ))) || + ((unsigned)(++idx) <= length && + DIFF_INT_C1( val, img - curstep + i )) || + ((unsigned)(++idx) <= length && + DIFF_INT_C1( val, img - curstep + (i+1) )))) + mask[i] = newMaskVal; + + ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir ); + } + } + } + + img = pImage + YC * step; + if( fillImage ) + for( i = L; i <= R; i++ ) + img[i] = newVal[0]; + else if( region ) + for( i = L; i <= R; i++ ) + sum[0] += img[i]; + } + else + { + for( k = 0; k < 3; k++ ) + { + dir = data[k][0]; + curstep = dir * step; + img = pImage + (YC + dir) * step; + mask = pMask + (YC + dir) * maskStep; + int left = data[k][1]; + int right = data[k][2]; + + if( fixedRange ) + for( i = left; i <= right; i++ ) + { + if( !mask[i] && DIFF_INT_C3( img + i*3, val0 )) + { + int j = i; + mask[i] = newMaskVal; + while( !mask[--j] && DIFF_INT_C3( img + j*3, val0 )) + mask[j] = newMaskVal; + + while( !mask[++i] && DIFF_INT_C3( img + i*3, val0 )) + mask[i] = newMaskVal; + + ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir ); + } + } + else if( !_8_connectivity ) + for( i = left; i <= right; i++ ) + { + if( !mask[i] && DIFF_INT_C3( img + i*3, img - curstep + i*3 )) + { + int j = i; + mask[i] = newMaskVal; + while( !mask[--j] && DIFF_INT_C3( img + j*3, img + (j+1)*3 )) + mask[j] = newMaskVal; + + while( !mask[++i] && + (DIFF_INT_C3( img + i*3, img + (i-1)*3 ) || + (DIFF_INT_C3( img + i*3, img + i*3 - curstep) && i <= R))) + mask[i] = newMaskVal; + + ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir ); + } + } + else + for( i = left; i <= right; i++ ) + { + int idx, val[3]; + + if( !mask[i] && + (((ICV_SET_C3( val, img+i*3 ), + (unsigned)(idx = i-L-1) <= length) && + DIFF_INT_C3( val, img - curstep + (i-1)*3 )) || + ((unsigned)(++idx) <= length && + DIFF_INT_C3( val, img - curstep + i*3 )) || + ((unsigned)(++idx) <= length && + DIFF_INT_C3( val, img - curstep + (i+1)*3 )))) + { + int j = i; + mask[i] = newMaskVal; + while( !mask[--j] && DIFF_INT_C3( img + j*3, img + (j+1)*3 )) + mask[j] = newMaskVal; + + while( !mask[++i] && + ((ICV_SET_C3( val, img + i*3 ), + DIFF_INT_C3( val, img + (i-1)*3 )) || + (((unsigned)(idx = i-L-1) <= length && + DIFF_INT_C3( val, img - curstep + (i-1)*3 ))) || + ((unsigned)(++idx) <= length && + DIFF_INT_C3( val, img - curstep + i*3 )) || + ((unsigned)(++idx) <= length && + DIFF_INT_C3( val, img - curstep + (i+1)*3 )))) + mask[i] = newMaskVal; + + ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir ); + } + } + } + + img = pImage + YC * step; + if( fillImage ) + for( i = L; i <= R; i++ ) + ICV_SET_C3( img + i*3, newVal ); + else if( region ) + for( i = L; i <= R; i++ ) + { + sum[0] += img[i*3]; + sum[1] += img[i*3+1]; + sum[2] += img[i*3+2]; + } + } + } + + if( region ) + { + region->area = area; + region->rect.x = XMin; + region->rect.y = YMin; + region->rect.width = XMax - XMin + 1; + region->rect.height = YMax - YMin + 1; + + if( fillImage ) + region->value = cvScalar(newVal[0], newVal[1], newVal[2]); + else + { + double iarea = area ? 1./area : 0; + region->value = cvScalar(sum[0]*iarea, sum[1]*iarea, sum[2]*iarea); + } + } +} + + +static void +icvFloodFillGrad_32f_CnIR( float* pImage, int step, uchar* pMask, int maskStep, + CvSize /*roi*/, CvPoint seed, float* _newVal, float* _d_lw, + float* _d_up, CvConnectedComp* region, int flags, + CvFFillSegment* buffer, int buffer_size, int cn ) +{ + float* img = pImage + (step /= sizeof(float))*seed.y; + uchar* mask = (pMask += maskStep + 1) + maskStep*seed.y; + int i, L, R; + int area = 0; + double sum[] = {0,0,0}, val0[] = {0,0,0}; + float newVal[] = {0,0,0}; + float d_lw[] = {0,0,0}; + float interval[] = {0,0,0}; + int XMin, XMax, YMin = seed.y, YMax = seed.y; + int _8_connectivity = (flags & 255) == 8; + int fixedRange = flags & CV_FLOODFILL_FIXED_RANGE; + int fillImage = (flags & CV_FLOODFILL_MASK_ONLY) == 0; + uchar newMaskVal = (uchar)(flags & 0xff00 ? flags >> 8 : 1); + CvFFillSegment* buffer_end = buffer + buffer_size, *head = buffer, *tail = buffer; + + L = R = seed.x; + if( mask[L] ) + return; + + mask[L] = newMaskVal; + + for( i = 0; i < cn; i++ ) + { + newVal[i] = _newVal[i]; + d_lw[i] = 0.5f*(_d_lw[i] - _d_up[i]); + interval[i] = 0.5f*(_d_lw[i] + _d_up[i]); + if( fixedRange ) + val0[i] = img[L*cn+i]; + } + + if( cn == 1 ) + { + if( fixedRange ) + { + while( !mask[R + 1] && DIFF_FLT_C1( img + (R+1), val0 )) + mask[++R] = newMaskVal; + + while( !mask[L - 1] && DIFF_FLT_C1( img + (L-1), val0 )) + mask[--L] = newMaskVal; + } + else + { + while( !mask[R + 1] && DIFF_FLT_C1( img + (R+1), img + R )) + mask[++R] = newMaskVal; + + while( !mask[L - 1] && DIFF_FLT_C1( img + (L-1), img + L )) + mask[--L] = newMaskVal; + } + } + else + { + if( fixedRange ) + { + while( !mask[R + 1] && DIFF_FLT_C3( img + (R+1)*3, val0 )) + mask[++R] = newMaskVal; + + while( !mask[L - 1] && DIFF_FLT_C3( img + (L-1)*3, val0 )) + mask[--L] = newMaskVal; + } + else + { + while( !mask[R + 1] && DIFF_FLT_C3( img + (R+1)*3, img + R*3 )) + mask[++R] = newMaskVal; + + while( !mask[L - 1] && DIFF_FLT_C3( img + (L-1)*3, img + L*3 )) + mask[--L] = newMaskVal; + } + } + + XMax = R; + XMin = L; + ICV_PUSH( seed.y, L, R, R + 1, R, UP ); + + while( head != tail ) + { + int k, YC, PL, PR, dir, curstep; + ICV_POP( YC, L, R, PL, PR, dir ); + + int data[][3] = + { + {-dir, L - _8_connectivity, R + _8_connectivity}, + {dir, L - _8_connectivity, PL - 1}, + {dir, PR + 1, R + _8_connectivity} + }; + + unsigned length = (unsigned)(R-L); + + if( region ) + { + area += (int)length + 1; + + if( XMax < R ) XMax = R; + if( XMin > L ) XMin = L; + if( YMax < YC ) YMax = YC; + if( YMin > YC ) YMin = YC; + } + + if( cn == 1 ) + { + for( k = 0; k < 3; k++ ) + { + dir = data[k][0]; + curstep = dir * step; + img = pImage + (YC + dir) * step; + mask = pMask + (YC + dir) * maskStep; + int left = data[k][1]; + int right = data[k][2]; + + if( fixedRange ) + for( i = left; i <= right; i++ ) + { + if( !mask[i] && DIFF_FLT_C1( img + i, val0 )) + { + int j = i; + mask[i] = newMaskVal; + while( !mask[--j] && DIFF_FLT_C1( img + j, val0 )) + mask[j] = newMaskVal; + + while( !mask[++i] && DIFF_FLT_C1( img + i, val0 )) + mask[i] = newMaskVal; + + ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir ); + } + } + else if( !_8_connectivity ) + for( i = left; i <= right; i++ ) + { + if( !mask[i] && DIFF_FLT_C1( img + i, img - curstep + i )) + { + int j = i; + mask[i] = newMaskVal; + while( !mask[--j] && DIFF_FLT_C1( img + j, img + (j+1) )) + mask[j] = newMaskVal; + + while( !mask[++i] && + (DIFF_FLT_C1( img + i, img + (i-1) ) || + (DIFF_FLT_C1( img + i, img + i - curstep) && i <= R))) + mask[i] = newMaskVal; + + ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir ); + } + } + else + for( i = left; i <= right; i++ ) + { + int idx; + float val[1]; + + if( !mask[i] && + (((val[0] = img[i], + (unsigned)(idx = i-L-1) <= length) && + DIFF_FLT_C1( val, img - curstep + (i-1) )) || + ((unsigned)(++idx) <= length && + DIFF_FLT_C1( val, img - curstep + i )) || + ((unsigned)(++idx) <= length && + DIFF_FLT_C1( val, img - curstep + (i+1) )))) + { + int j = i; + mask[i] = newMaskVal; + while( !mask[--j] && DIFF_FLT_C1( img + j, img + (j+1) )) + mask[j] = newMaskVal; + + while( !mask[++i] && + ((val[0] = img[i], + DIFF_FLT_C1( val, img + (i-1) )) || + (((unsigned)(idx = i-L-1) <= length && + DIFF_FLT_C1( val, img - curstep + (i-1) ))) || + ((unsigned)(++idx) <= length && + DIFF_FLT_C1( val, img - curstep + i )) || + ((unsigned)(++idx) <= length && + DIFF_FLT_C1( val, img - curstep + (i+1) )))) + mask[i] = newMaskVal; + + ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir ); + } + } + } + + img = pImage + YC * step; + if( fillImage ) + for( i = L; i <= R; i++ ) + img[i] = newVal[0]; + else if( region ) + for( i = L; i <= R; i++ ) + sum[0] += img[i]; + } + else + { + for( k = 0; k < 3; k++ ) + { + dir = data[k][0]; + curstep = dir * step; + img = pImage + (YC + dir) * step; + mask = pMask + (YC + dir) * maskStep; + int left = data[k][1]; + int right = data[k][2]; + + if( fixedRange ) + for( i = left; i <= right; i++ ) + { + if( !mask[i] && DIFF_FLT_C3( img + i*3, val0 )) + { + int j = i; + mask[i] = newMaskVal; + while( !mask[--j] && DIFF_FLT_C3( img + j*3, val0 )) + mask[j] = newMaskVal; + + while( !mask[++i] && DIFF_FLT_C3( img + i*3, val0 )) + mask[i] = newMaskVal; + + ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir ); + } + } + else if( !_8_connectivity ) + for( i = left; i <= right; i++ ) + { + if( !mask[i] && DIFF_FLT_C3( img + i*3, img - curstep + i*3 )) + { + int j = i; + mask[i] = newMaskVal; + while( !mask[--j] && DIFF_FLT_C3( img + j*3, img + (j+1)*3 )) + mask[j] = newMaskVal; + + while( !mask[++i] && + (DIFF_FLT_C3( img + i*3, img + (i-1)*3 ) || + (DIFF_FLT_C3( img + i*3, img + i*3 - curstep) && i <= R))) + mask[i] = newMaskVal; + + ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir ); + } + } + else + for( i = left; i <= right; i++ ) + { + int idx; + float val[3]; + + if( !mask[i] && + (((ICV_SET_C3( val, img+i*3 ), + (unsigned)(idx = i-L-1) <= length) && + DIFF_FLT_C3( val, img - curstep + (i-1)*3 )) || + ((unsigned)(++idx) <= length && + DIFF_FLT_C3( val, img - curstep + i*3 )) || + ((unsigned)(++idx) <= length && + DIFF_FLT_C3( val, img - curstep + (i+1)*3 )))) + { + int j = i; + mask[i] = newMaskVal; + while( !mask[--j] && DIFF_FLT_C3( img + j*3, img + (j+1)*3 )) + mask[j] = newMaskVal; + + while( !mask[++i] && + ((ICV_SET_C3( val, img + i*3 ), + DIFF_FLT_C3( val, img + (i-1)*3 )) || + (((unsigned)(idx = i-L-1) <= length && + DIFF_FLT_C3( val, img - curstep + (i-1)*3 ))) || + ((unsigned)(++idx) <= length && + DIFF_FLT_C3( val, img - curstep + i*3 )) || + ((unsigned)(++idx) <= length && + DIFF_FLT_C3( val, img - curstep + (i+1)*3 )))) + mask[i] = newMaskVal; + + ICV_PUSH( YC + dir, j+1, i-1, L, R, -dir ); + } + } + } + + img = pImage + YC * step; + if( fillImage ) + for( i = L; i <= R; i++ ) + ICV_SET_C3( img + i*3, newVal ); + else if( region ) + for( i = L; i <= R; i++ ) + { + sum[0] += img[i*3]; + sum[1] += img[i*3+1]; + sum[2] += img[i*3+2]; + } + } + } + + if( region ) + { + region->area = area; + region->rect.x = XMin; + region->rect.y = YMin; + region->rect.width = XMax - XMin + 1; + region->rect.height = YMax - YMin + 1; + + if( fillImage ) + region->value = cvScalar(newVal[0], newVal[1], newVal[2]); + else + { + double iarea = area ? 1./area : 0; + region->value = cvScalar(sum[0]*iarea, sum[1]*iarea, sum[2]*iarea); + } + } +} + + +/****************************************************************************************\ +* External Functions * +\****************************************************************************************/ + +typedef void (*CvFloodFillFunc)( + void* img, int step, CvSize size, CvPoint seed, void* newval, + CvConnectedComp* comp, int flags, void* buffer, int buffer_size, int cn ); + +typedef void (*CvFloodFillGradFunc)( + void* img, int step, uchar* mask, int maskStep, CvSize size, + CvPoint seed, void* newval, void* d_lw, void* d_up, void* ccomp, + int flags, void* buffer, int buffer_size, int cn ); + +CV_IMPL void +cvFloodFill( CvArr* arr, CvPoint seed_point, + CvScalar newVal, CvScalar lo_diff, CvScalar up_diff, + CvConnectedComp* comp, int flags, CvArr* maskarr ) +{ + cv::Ptr tempMask; + cv::AutoBuffer buffer; + + if( comp ) + memset( comp, 0, sizeof(*comp) ); + + int i, type, depth, cn, is_simple; + int buffer_size, connectivity = flags & 255; + double nv_buf[4] = {0,0,0,0}; + union { uchar b[4]; float f[4]; } ld_buf, ud_buf; + CvMat stub, *img = cvGetMat(arr, &stub); + CvMat maskstub, *mask = (CvMat*)maskarr; + CvSize size; + + type = CV_MAT_TYPE( img->type ); + depth = CV_MAT_DEPTH(type); + cn = CV_MAT_CN(type); + + if( connectivity == 0 ) + connectivity = 4; + else if( connectivity != 4 && connectivity != 8 ) + CV_Error( CV_StsBadFlag, "Connectivity must be 4, 0(=4) or 8" ); + + is_simple = mask == 0 && (flags & CV_FLOODFILL_MASK_ONLY) == 0; + + for( i = 0; i < cn; i++ ) + { + if( lo_diff.val[i] < 0 || up_diff.val[i] < 0 ) + CV_Error( CV_StsBadArg, "lo_diff and up_diff must be non-negative" ); + is_simple &= fabs(lo_diff.val[i]) < DBL_EPSILON && fabs(up_diff.val[i]) < DBL_EPSILON; + } + + size = cvGetMatSize( img ); + + if( (unsigned)seed_point.x >= (unsigned)size.width || + (unsigned)seed_point.y >= (unsigned)size.height ) + CV_Error( CV_StsOutOfRange, "Seed point is outside of image" ); + + cvScalarToRawData( &newVal, &nv_buf, type, 0 ); + buffer_size = MAX( size.width, size.height )*2; + buffer.allocate( buffer_size ); + + if( is_simple ) + { + int elem_size = CV_ELEM_SIZE(type); + const uchar* seed_ptr = img->data.ptr + img->step*seed_point.y + elem_size*seed_point.x; + CvFloodFillFunc func = + type == CV_8UC1 || type == CV_8UC3 ? (CvFloodFillFunc)icvFloodFill_8u_CnIR : + type == CV_32FC1 || type == CV_32FC3 ? (CvFloodFillFunc)icvFloodFill_32f_CnIR : 0; + if( !func ) + CV_Error( CV_StsUnsupportedFormat, "" ); + // check if the new value is different from the current value at the seed point. + // if they are exactly the same, use the generic version with mask to avoid infinite loops. + for( i = 0; i < elem_size; i++ ) + if( seed_ptr[i] != ((uchar*)nv_buf)[i] ) + break; + if( i < elem_size ) + { + func( img->data.ptr, img->step, size, + seed_point, &nv_buf, comp, flags, + buffer, buffer_size, cn ); + return; + } + } + + CvFloodFillGradFunc func = + type == CV_8UC1 || type == CV_8UC3 ? (CvFloodFillGradFunc)icvFloodFillGrad_8u_CnIR : + type == CV_32FC1 || type == CV_32FC3 ? (CvFloodFillGradFunc)icvFloodFillGrad_32f_CnIR : 0; + if( !func ) + CV_Error( CV_StsUnsupportedFormat, "" ); + + if( !mask ) + { + /* created mask will be 8-byte aligned */ + tempMask = cvCreateMat( size.height + 2, (size.width + 9) & -8, CV_8UC1 ); + mask = tempMask; + } + else + { + mask = cvGetMat( mask, &maskstub ); + if( !CV_IS_MASK_ARR( mask )) + CV_Error( CV_StsBadMask, "" ); + + if( mask->width != size.width + 2 || mask->height != size.height + 2 ) + CV_Error( CV_StsUnmatchedSizes, "mask must be 2 pixel wider " + "and 2 pixel taller than filled image" ); + } + + int width = tempMask ? mask->step : size.width + 2; + uchar* mask_row = mask->data.ptr + mask->step; + memset( mask_row - mask->step, 1, width ); + + for( i = 1; i <= size.height; i++, mask_row += mask->step ) + { + if( tempMask ) + memset( mask_row, 0, width ); + mask_row[0] = mask_row[size.width+1] = (uchar)1; + } + memset( mask_row, 1, width ); + + if( depth == CV_8U ) + for( i = 0; i < cn; i++ ) + { + int t = cvFloor(lo_diff.val[i]); + ld_buf.b[i] = CV_CAST_8U(t); + t = cvFloor(up_diff.val[i]); + ud_buf.b[i] = CV_CAST_8U(t); + } + else + for( i = 0; i < cn; i++ ) + { + ld_buf.f[i] = (float)lo_diff.val[i]; + ud_buf.f[i] = (float)up_diff.val[i]; + } + + func( img->data.ptr, img->step, mask->data.ptr, mask->step, + size, seed_point, &nv_buf, ld_buf.f, ud_buf.f, + comp, flags, buffer, buffer_size, cn ); +} + + +int cv::floodFill( InputOutputArray _image, Point seedPoint, + Scalar newVal, Rect* rect, + Scalar loDiff, Scalar upDiff, int flags ) +{ + CvConnectedComp ccomp; + CvMat c_image = _image.getMat(); + cvFloodFill(&c_image, seedPoint, newVal, loDiff, upDiff, &ccomp, flags, 0); + if( rect ) + *rect = ccomp.rect; + return cvRound(ccomp.area); +} + +int cv::floodFill( InputOutputArray _image, InputOutputArray _mask, + Point seedPoint, Scalar newVal, Rect* rect, + Scalar loDiff, Scalar upDiff, int flags ) +{ + CvConnectedComp ccomp; + CvMat c_image = _image.getMat(), c_mask = _mask.getMat(); + cvFloodFill(&c_image, seedPoint, newVal, loDiff, upDiff, &ccomp, flags, c_mask.data.ptr ? &c_mask : 0); + if( rect ) + *rect = ccomp.rect; + return cvRound(ccomp.area); +} + +/* End of file. */ diff --git a/opencv/imgproc/gcgraph.hpp b/opencv/imgproc/gcgraph.hpp new file mode 100644 index 0000000..7f0f501 --- /dev/null +++ b/opencv/imgproc/gcgraph.hpp @@ -0,0 +1,385 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef _CV_GCGRAPH_H_ +#define _CV_GCGRAPH_H_ + +template class GCGraph +{ +public: + GCGraph(); + GCGraph( unsigned int vtxCount, unsigned int edgeCount ); + ~GCGraph(); + void create( unsigned int vtxCount, unsigned int edgeCount ); + int addVtx(); + void addEdges( int i, int j, TWeight w, TWeight revw ); + void addTermWeights( int i, TWeight sourceW, TWeight sinkW ); + TWeight maxFlow(); + bool inSourceSegment( int i ); +private: + class Vtx + { + public: + Vtx *next; // initialized and used in maxFlow() only + int parent; + int first; + int ts; + int dist; + TWeight weight; + uchar t; + }; + class Edge + { + public: + int dst; + int next; + TWeight weight; + }; + + std::vector vtcs; + std::vector edges; + TWeight flow; +}; + +template +GCGraph::GCGraph() +{ + flow = 0; +} +template +GCGraph::GCGraph( unsigned int vtxCount, unsigned int edgeCount ) +{ + create( vtxCount, edgeCount ); +} +template +GCGraph::~GCGraph() +{ +} +template +void GCGraph::create( unsigned int vtxCount, unsigned int edgeCount ) +{ + vtcs.reserve( vtxCount ); + edges.reserve( edgeCount + 2 ); + flow = 0; +} + +template +int GCGraph::addVtx() +{ + Vtx v; + memset( &v, 0, sizeof(Vtx)); + vtcs.push_back(v); + return (int)vtcs.size() - 1; +} + +template +void GCGraph::addEdges( int i, int j, TWeight w, TWeight revw ) +{ + CV_Assert( i>=0 && i<(int)vtcs.size() ); + CV_Assert( j>=0 && j<(int)vtcs.size() ); + CV_Assert( w>=0 && revw>=0 ); + CV_Assert( i != j ); + + if( !edges.size() ) + edges.resize( 2 ); + + Edge fromI, toI; + fromI.dst = j; + fromI.next = vtcs[i].first; + fromI.weight = w; + vtcs[i].first = (int)edges.size(); + edges.push_back( fromI ); + + toI.dst = i; + toI.next = vtcs[j].first; + toI.weight = revw; + vtcs[j].first = (int)edges.size(); + edges.push_back( toI ); +} + +template +void GCGraph::addTermWeights( int i, TWeight sourceW, TWeight sinkW ) +{ + CV_Assert( i>=0 && i<(int)vtcs.size() ); + + TWeight dw = vtcs[i].weight; + if( dw > 0 ) + sourceW += dw; + else + sinkW -= dw; + flow += (sourceW < sinkW) ? sourceW : sinkW; + vtcs[i].weight = sourceW - sinkW; +} + +template +TWeight GCGraph::maxFlow() +{ + const int TERMINAL = -1, ORPHAN = -2; + Vtx stub, *nilNode = &stub, *first = nilNode, *last = nilNode; + int curr_ts = 0; + stub.next = nilNode; + Vtx *vtxPtr = &vtcs[0]; + Edge *edgePtr = &edges[0]; + + std::vector orphans; + + // initialize the active queue and the graph vertices + for( int i = 0; i < (int)vtcs.size(); i++ ) + { + Vtx* v = vtxPtr + i; + v->ts = 0; + if( v->weight != 0 ) + { + last = last->next = v; + v->dist = 1; + v->parent = TERMINAL; + v->t = v->weight < 0; + } + else + v->parent = 0; + } + first = first->next; + last->next = nilNode; + nilNode->next = 0; + + // run the search-path -> augment-graph -> restore-trees loop + for(;;) + { + Vtx* v, *u; + int e0 = -1, ei = 0, ej = 0; + TWeight minWeight, weight; + uchar vt; + + // grow S & T search trees, find an edge connecting them + while( first != nilNode ) + { + v = first; + if( v->parent ) + { + vt = v->t; + for( ei = v->first; ei != 0; ei = edgePtr[ei].next ) + { + if( edgePtr[ei^vt].weight == 0 ) + continue; + u = vtxPtr+edgePtr[ei].dst; + if( !u->parent ) + { + u->t = vt; + u->parent = ei ^ 1; + u->ts = v->ts; + u->dist = v->dist + 1; + if( !u->next ) + { + u->next = nilNode; + last = last->next = u; + } + continue; + } + + if( u->t != vt ) + { + e0 = ei ^ vt; + break; + } + + if( u->dist > v->dist+1 && u->ts <= v->ts ) + { + // reassign the parent + u->parent = ei ^ 1; + u->ts = v->ts; + u->dist = v->dist + 1; + } + } + if( e0 > 0 ) + break; + } + // exclude the vertex from the active list + first = first->next; + v->next = 0; + } + + if( e0 <= 0 ) + break; + + // find the minimum edge weight along the path + minWeight = edgePtr[e0].weight; + assert( minWeight > 0 ); + // k = 1: source tree, k = 0: destination tree + for( int k = 1; k >= 0; k-- ) + { + for( v = vtxPtr+edgePtr[e0^k].dst;; v = vtxPtr+edgePtr[ei].dst ) + { + if( (ei = v->parent) < 0 ) + break; + weight = edgePtr[ei^k].weight; + minWeight = MIN(minWeight, weight); + assert( minWeight > 0 ); + } + weight = fabs(v->weight); + minWeight = MIN(minWeight, weight); + assert( minWeight > 0 ); + } + + // modify weights of the edges along the path and collect orphans + edgePtr[e0].weight -= minWeight; + edgePtr[e0^1].weight += minWeight; + flow += minWeight; + + // k = 1: source tree, k = 0: destination tree + for( int k = 1; k >= 0; k-- ) + { + for( v = vtxPtr+edgePtr[e0^k].dst;; v = vtxPtr+edgePtr[ei].dst ) + { + if( (ei = v->parent) < 0 ) + break; + edgePtr[ei^(k^1)].weight += minWeight; + if( (edgePtr[ei^k].weight -= minWeight) == 0 ) + { + orphans.push_back(v); + v->parent = ORPHAN; + } + } + + v->weight = v->weight + minWeight*(1-k*2); + if( v->weight == 0 ) + { + orphans.push_back(v); + v->parent = ORPHAN; + } + } + + // restore the search trees by finding new parents for the orphans + curr_ts++; + while( !orphans.empty() ) + { + Vtx* v = orphans.back(); + orphans.pop_back(); + + int d, minDist = INT_MAX; + e0 = 0; + vt = v->t; + + for( ei = v->first; ei != 0; ei = edgePtr[ei].next ) + { + if( edgePtr[ei^(vt^1)].weight == 0 ) + continue; + u = vtxPtr+edgePtr[ei].dst; + if( u->t != vt || u->parent == 0 ) + continue; + // compute the distance to the tree root + for( d = 0;; ) + { + if( u->ts == curr_ts ) + { + d += u->dist; + break; + } + ej = u->parent; + d++; + if( ej < 0 ) + { + if( ej == ORPHAN ) + d = INT_MAX-1; + else + { + u->ts = curr_ts; + u->dist = 1; + } + break; + } + u = vtxPtr+edgePtr[ej].dst; + } + + // update the distance + if( ++d < INT_MAX ) + { + if( d < minDist ) + { + minDist = d; + e0 = ei; + } + for( u = vtxPtr+edgePtr[ei].dst; u->ts != curr_ts; u = vtxPtr+edgePtr[u->parent].dst ) + { + u->ts = curr_ts; + u->dist = --d; + } + } + } + + if( (v->parent = e0) > 0 ) + { + v->ts = curr_ts; + v->dist = minDist; + continue; + } + + /* no parent is found */ + v->ts = 0; + for( ei = v->first; ei != 0; ei = edgePtr[ei].next ) + { + u = vtxPtr+edgePtr[ei].dst; + ej = u->parent; + if( u->t != vt || !ej ) + continue; + if( edgePtr[ei^(vt^1)].weight && !u->next ) + { + u->next = nilNode; + last = last->next = u; + } + if( ej > 0 && vtxPtr+edgePtr[ej].dst == v ) + { + orphans.push_back(u); + u->parent = ORPHAN; + } + } + } + } + return flow; +} + +template +bool GCGraph::inSourceSegment( int i ) +{ + CV_Assert( i>=0 && i<(int)vtcs.size() ); + return vtcs[i].t == 0; +}; + +#endif diff --git a/opencv/imgproc/geometry.cpp b/opencv/imgproc/geometry.cpp new file mode 100644 index 0000000..ab0bc75 --- /dev/null +++ b/opencv/imgproc/geometry.cpp @@ -0,0 +1,331 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" + + +CV_IMPL CvRect +cvMaxRect( const CvRect* rect1, const CvRect* rect2 ) +{ + if( rect1 && rect2 ) + { + CvRect max_rect; + int a, b; + + max_rect.x = a = rect1->x; + b = rect2->x; + if( max_rect.x > b ) + max_rect.x = b; + + max_rect.width = a += rect1->width; + b += rect2->width; + + if( max_rect.width < b ) + max_rect.width = b; + max_rect.width -= max_rect.x; + + max_rect.y = a = rect1->y; + b = rect2->y; + if( max_rect.y > b ) + max_rect.y = b; + + max_rect.height = a += rect1->height; + b += rect2->height; + + if( max_rect.height < b ) + max_rect.height = b; + max_rect.height -= max_rect.y; + return max_rect; + } + else if( rect1 ) + return *rect1; + else if( rect2 ) + return *rect2; + else + return cvRect(0,0,0,0); +} + + +CV_IMPL void +cvBoxPoints( CvBox2D box, CvPoint2D32f pt[4] ) +{ + if( !pt ) + CV_Error( CV_StsNullPtr, "NULL vertex array pointer" ); + cv::RotatedRect(box).points((cv::Point2f*)pt); +} + + +int +icvIntersectLines( double x1, double dx1, double y1, double dy1, + double x2, double dx2, double y2, double dy2, double *t2 ) +{ + double d = dx1 * dy2 - dx2 * dy1; + int result = -1; + + if( d != 0 ) + { + *t2 = ((x2 - x1) * dy1 - (y2 - y1) * dx1) / d; + result = 0; + } + return result; +} + + +void +icvCreateCenterNormalLine( CvSubdiv2DEdge edge, double *_a, double *_b, double *_c ) +{ + CvPoint2D32f org = cvSubdiv2DEdgeOrg( edge )->pt; + CvPoint2D32f dst = cvSubdiv2DEdgeDst( edge )->pt; + + double a = dst.x - org.x; + double b = dst.y - org.y; + double c = -(a * (dst.x + org.x) + b * (dst.y + org.y)); + + *_a = a + a; + *_b = b + b; + *_c = c; +} + + +void +icvIntersectLines3( double *a0, double *b0, double *c0, + double *a1, double *b1, double *c1, CvPoint2D32f * point ) +{ + double det = a0[0] * b1[0] - a1[0] * b0[0]; + + if( det != 0 ) + { + det = 1. / det; + point->x = (float) ((b0[0] * c1[0] - b1[0] * c0[0]) * det); + point->y = (float) ((a1[0] * c0[0] - a0[0] * c1[0]) * det); + } + else + { + point->x = point->y = FLT_MAX; + } +} + + +CV_IMPL double +cvPointPolygonTest( const CvArr* _contour, CvPoint2D32f pt, int measure_dist ) +{ + double result = 0; + + CvSeqBlock block; + CvContour header; + CvSeq* contour = (CvSeq*)_contour; + CvSeqReader reader; + int i, total, counter = 0; + int is_float; + double min_dist_num = FLT_MAX, min_dist_denom = 1; + CvPoint ip = {0,0}; + + if( !CV_IS_SEQ(contour) ) + { + contour = cvPointSeqFromMat( CV_SEQ_KIND_CURVE + CV_SEQ_FLAG_CLOSED, + _contour, &header, &block ); + } + else if( CV_IS_SEQ_POINT_SET(contour) ) + { + if( contour->header_size == sizeof(CvContour) && !measure_dist ) + { + CvRect r = ((CvContour*)contour)->rect; + if( pt.x < r.x || pt.y < r.y || + pt.x >= r.x + r.width || pt.y >= r.y + r.height ) + return -1; + } + } + else if( CV_IS_SEQ_CHAIN(contour) ) + { + CV_Error( CV_StsBadArg, + "Chains are not supported. Convert them to polygonal representation using cvApproxChains()" ); + } + else + CV_Error( CV_StsBadArg, "Input contour is neither a valid sequence nor a matrix" ); + + total = contour->total; + is_float = CV_SEQ_ELTYPE(contour) == CV_32FC2; + cvStartReadSeq( contour, &reader, -1 ); + + if( !is_float && !measure_dist && (ip.x = cvRound(pt.x)) == pt.x && (ip.y = cvRound(pt.y)) == pt.y ) + { + // the fastest "pure integer" branch + CvPoint v0, v; + CV_READ_SEQ_ELEM( v, reader ); + + for( i = 0; i < total; i++ ) + { + int dist; + v0 = v; + CV_READ_SEQ_ELEM( v, reader ); + + if( (v0.y <= ip.y && v.y <= ip.y) || + (v0.y > ip.y && v.y > ip.y) || + (v0.x < ip.x && v.x < ip.x) ) + { + if( ip.y == v.y && (ip.x == v.x || (ip.y == v0.y && + ((v0.x <= ip.x && ip.x <= v.x) || (v.x <= ip.x && ip.x <= v0.x)))) ) + return 0; + continue; + } + + dist = (ip.y - v0.y)*(v.x - v0.x) - (ip.x - v0.x)*(v.y - v0.y); + if( dist == 0 ) + return 0; + if( v.y < v0.y ) + dist = -dist; + counter += dist > 0; + } + + result = counter % 2 == 0 ? -1 : 1; + } + else + { + CvPoint2D32f v0, v; + CvPoint iv; + + if( is_float ) + { + CV_READ_SEQ_ELEM( v, reader ); + } + else + { + CV_READ_SEQ_ELEM( iv, reader ); + v = cvPointTo32f( iv ); + } + + if( !measure_dist ) + { + for( i = 0; i < total; i++ ) + { + double dist; + v0 = v; + if( is_float ) + { + CV_READ_SEQ_ELEM( v, reader ); + } + else + { + CV_READ_SEQ_ELEM( iv, reader ); + v = cvPointTo32f( iv ); + } + + if( (v0.y <= pt.y && v.y <= pt.y) || + (v0.y > pt.y && v.y > pt.y) || + (v0.x < pt.x && v.x < pt.x) ) + { + if( pt.y == v.y && (pt.x == v.x || (pt.y == v0.y && + ((v0.x <= pt.x && pt.x <= v.x) || (v.x <= pt.x && pt.x <= v0.x)))) ) + return 0; + continue; + } + + dist = (double)(pt.y - v0.y)*(v.x - v0.x) - (double)(pt.x - v0.x)*(v.y - v0.y); + if( dist == 0 ) + return 0; + if( v.y < v0.y ) + dist = -dist; + counter += dist > 0; + } + + result = counter % 2 == 0 ? -1 : 1; + } + else + { + for( i = 0; i < total; i++ ) + { + double dx, dy, dx1, dy1, dx2, dy2, dist_num, dist_denom = 1; + + v0 = v; + if( is_float ) + { + CV_READ_SEQ_ELEM( v, reader ); + } + else + { + CV_READ_SEQ_ELEM( iv, reader ); + v = cvPointTo32f( iv ); + } + + dx = v.x - v0.x; dy = v.y - v0.y; + dx1 = pt.x - v0.x; dy1 = pt.y - v0.y; + dx2 = pt.x - v.x; dy2 = pt.y - v.y; + + if( dx1*dx + dy1*dy <= 0 ) + dist_num = dx1*dx1 + dy1*dy1; + else if( dx2*dx + dy2*dy >= 0 ) + dist_num = dx2*dx2 + dy2*dy2; + else + { + dist_num = (dy1*dx - dx1*dy); + dist_num *= dist_num; + dist_denom = dx*dx + dy*dy; + } + + if( dist_num*min_dist_denom < min_dist_num*dist_denom ) + { + min_dist_num = dist_num; + min_dist_denom = dist_denom; + if( min_dist_num == 0 ) + break; + } + + if( (v0.y <= pt.y && v.y <= pt.y) || + (v0.y > pt.y && v.y > pt.y) || + (v0.x < pt.x && v.x < pt.x) ) + continue; + + dist_num = dy1*dx - dx1*dy; + if( dy < 0 ) + dist_num = -dist_num; + counter += dist_num > 0; + } + + result = sqrt(min_dist_num/min_dist_denom); + if( counter % 2 == 0 ) + result = -result; + } + } + + return result; +} + + +/* End of file. */ diff --git a/opencv/imgproc/grabcut.cpp b/opencv/imgproc/grabcut.cpp new file mode 100644 index 0000000..27a535c --- /dev/null +++ b/opencv/imgproc/grabcut.cpp @@ -0,0 +1,575 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include "gcgraph.hpp" +#include + +using namespace cv; + +/* +This is implementation of image segmentation algorithm GrabCut described in +"GrabCut — Interactive Foreground Extraction using Iterated Graph Cuts". +Carsten Rother, Vladimir Kolmogorov, Andrew Blake. + */ + +/* + GMM - Gaussian Mixture Model +*/ +class GMM +{ +public: + static const int componentsCount = 5; + + GMM( Mat& _model ); + double operator()( const Vec3d color ) const; + double operator()( int ci, const Vec3d color ) const; + int whichComponent( const Vec3d color ) const; + + void initLearning(); + void addSample( int ci, const Vec3d color ); + void endLearning(); + +private: + void calcInverseCovAndDeterm( int ci ); + Mat model; + double* coefs; + double* mean; + double* cov; + + double inverseCovs[componentsCount][3][3]; + double covDeterms[componentsCount]; + + double sums[componentsCount][3]; + double prods[componentsCount][3][3]; + int sampleCounts[componentsCount]; + int totalSampleCount; +}; + +GMM::GMM( Mat& _model ) +{ + const int modelSize = 3/*mean*/ + 9/*covariance*/ + 1/*component weight*/; + if( _model.empty() ) + { + _model.create( 1, modelSize*componentsCount, CV_64FC1 ); + _model.setTo(Scalar(0)); + } + else if( (_model.type() != CV_64FC1) || (_model.rows != 1) || (_model.cols != modelSize*componentsCount) ) + CV_Error( CV_StsBadArg, "_model must have CV_64FC1 type, rows == 1 and cols == 13*componentsCount" ); + + model = _model; + + coefs = model.ptr(0); + mean = coefs + componentsCount; + cov = mean + 3*componentsCount; + + for( int ci = 0; ci < componentsCount; ci++ ) + if( coefs[ci] > 0 ) + calcInverseCovAndDeterm( ci ); +} + +double GMM::operator()( const Vec3d color ) const +{ + double res = 0; + for( int ci = 0; ci < componentsCount; ci++ ) + res += coefs[ci] * (*this)(ci, color ); + return res; +} + +double GMM::operator()( int ci, const Vec3d color ) const +{ + double res = 0; + if( coefs[ci] > 0 ) + { + CV_Assert( covDeterms[ci] > std::numeric_limits::epsilon() ); + Vec3d diff = color; + double* m = mean + 3*ci; + diff[0] -= m[0]; diff[1] -= m[1]; diff[2] -= m[2]; + double mult = diff[0]*(diff[0]*inverseCovs[ci][0][0] + diff[1]*inverseCovs[ci][1][0] + diff[2]*inverseCovs[ci][2][0]) + + diff[1]*(diff[0]*inverseCovs[ci][0][1] + diff[1]*inverseCovs[ci][1][1] + diff[2]*inverseCovs[ci][2][1]) + + diff[2]*(diff[0]*inverseCovs[ci][0][2] + diff[1]*inverseCovs[ci][1][2] + diff[2]*inverseCovs[ci][2][2]); + res = 1.0f/sqrt(covDeterms[ci]) * exp(-0.5f*mult); + } + return res; +} + +int GMM::whichComponent( const Vec3d color ) const +{ + int k = 0; + double max = 0; + + for( int ci = 0; ci < componentsCount; ci++ ) + { + double p = (*this)( ci, color ); + if( p > max ) + { + k = ci; + max = p; + } + } + return k; +} + +void GMM::initLearning() +{ + for( int ci = 0; ci < componentsCount; ci++) + { + sums[ci][0] = sums[ci][1] = sums[ci][2] = 0; + prods[ci][0][0] = prods[ci][0][1] = prods[ci][0][2] = 0; + prods[ci][1][0] = prods[ci][1][1] = prods[ci][1][2] = 0; + prods[ci][2][0] = prods[ci][2][1] = prods[ci][2][2] = 0; + sampleCounts[ci] = 0; + } + totalSampleCount = 0; +} + +void GMM::addSample( int ci, const Vec3d color ) +{ + sums[ci][0] += color[0]; sums[ci][1] += color[1]; sums[ci][2] += color[2]; + prods[ci][0][0] += color[0]*color[0]; prods[ci][0][1] += color[0]*color[1]; prods[ci][0][2] += color[0]*color[2]; + prods[ci][1][0] += color[1]*color[0]; prods[ci][1][1] += color[1]*color[1]; prods[ci][1][2] += color[1]*color[2]; + prods[ci][2][0] += color[2]*color[0]; prods[ci][2][1] += color[2]*color[1]; prods[ci][2][2] += color[2]*color[2]; + sampleCounts[ci]++; + totalSampleCount++; +} + +void GMM::endLearning() +{ + const double variance = 0.01; + for( int ci = 0; ci < componentsCount; ci++ ) + { + int n = sampleCounts[ci]; + if( n == 0 ) + coefs[ci] = 0; + else + { + coefs[ci] = (double)n/totalSampleCount; + + double* m = mean + 3*ci; + m[0] = sums[ci][0]/n; m[1] = sums[ci][1]/n; m[2] = sums[ci][2]/n; + + double* c = cov + 9*ci; + c[0] = prods[ci][0][0]/n - m[0]*m[0]; c[1] = prods[ci][0][1]/n - m[0]*m[1]; c[2] = prods[ci][0][2]/n - m[0]*m[2]; + c[3] = prods[ci][1][0]/n - m[1]*m[0]; c[4] = prods[ci][1][1]/n - m[1]*m[1]; c[5] = prods[ci][1][2]/n - m[1]*m[2]; + c[6] = prods[ci][2][0]/n - m[2]*m[0]; c[7] = prods[ci][2][1]/n - m[2]*m[1]; c[8] = prods[ci][2][2]/n - m[2]*m[2]; + + double dtrm = c[0]*(c[4]*c[8]-c[5]*c[7]) - c[1]*(c[3]*c[8]-c[5]*c[6]) + c[2]*(c[3]*c[7]-c[4]*c[6]); + if( dtrm <= std::numeric_limits::epsilon() ) + { + // Adds the white noise to avoid singular covariance matrix. + c[0] += variance; + c[4] += variance; + c[8] += variance; + } + + calcInverseCovAndDeterm(ci); + } + } +} + +void GMM::calcInverseCovAndDeterm( int ci ) +{ + if( coefs[ci] > 0 ) + { + double *c = cov + 9*ci; + double dtrm = + covDeterms[ci] = c[0]*(c[4]*c[8]-c[5]*c[7]) - c[1]*(c[3]*c[8]-c[5]*c[6]) + c[2]*(c[3]*c[7]-c[4]*c[6]); + + CV_Assert( dtrm > std::numeric_limits::epsilon() ); + inverseCovs[ci][0][0] = (c[4]*c[8] - c[5]*c[7]) / dtrm; + inverseCovs[ci][1][0] = -(c[3]*c[8] - c[5]*c[6]) / dtrm; + inverseCovs[ci][2][0] = (c[3]*c[7] - c[4]*c[6]) / dtrm; + inverseCovs[ci][0][1] = -(c[1]*c[8] - c[2]*c[7]) / dtrm; + inverseCovs[ci][1][1] = (c[0]*c[8] - c[2]*c[6]) / dtrm; + inverseCovs[ci][2][1] = -(c[0]*c[7] - c[1]*c[6]) / dtrm; + inverseCovs[ci][0][2] = (c[1]*c[5] - c[2]*c[4]) / dtrm; + inverseCovs[ci][1][2] = -(c[0]*c[5] - c[2]*c[3]) / dtrm; + inverseCovs[ci][2][2] = (c[0]*c[4] - c[1]*c[3]) / dtrm; + } +} + +/* + Calculate beta - parameter of GrabCut algorithm. + beta = 1/(2*avg(sqr(||color[i] - color[j]||))) +*/ +double calcBeta( const Mat& img ) +{ + double beta = 0; + for( int y = 0; y < img.rows; y++ ) + { + for( int x = 0; x < img.cols; x++ ) + { + Vec3d color = img.at(y,x); + if( x>0 ) // left + { + Vec3d diff = color - (Vec3d)img.at(y,x-1); + beta += diff.dot(diff); + } + if( y>0 && x>0 ) // upleft + { + Vec3d diff = color - (Vec3d)img.at(y-1,x-1); + beta += diff.dot(diff); + } + if( y>0 ) // up + { + Vec3d diff = color - (Vec3d)img.at(y-1,x); + beta += diff.dot(diff); + } + if( y>0 && x(y-1,x+1); + beta += diff.dot(diff); + } + } + } + if( beta <= std::numeric_limits::epsilon() ) + beta = 0; + else + beta = 1.f / (2 * beta/(4*img.cols*img.rows - 3*img.cols - 3*img.rows + 2) ); + + return beta; +} + +/* + Calculate weights of noterminal vertices of graph. + beta and gamma - parameters of GrabCut algorithm. + */ +void calcNWeights( const Mat& img, Mat& leftW, Mat& upleftW, Mat& upW, Mat& uprightW, double beta, double gamma ) +{ + const double gammaDivSqrt2 = gamma / std::sqrt(2.0f); + leftW.create( img.rows, img.cols, CV_64FC1 ); + upleftW.create( img.rows, img.cols, CV_64FC1 ); + upW.create( img.rows, img.cols, CV_64FC1 ); + uprightW.create( img.rows, img.cols, CV_64FC1 ); + for( int y = 0; y < img.rows; y++ ) + { + for( int x = 0; x < img.cols; x++ ) + { + Vec3d color = img.at(y,x); + if( x-1>=0 ) // left + { + Vec3d diff = color - (Vec3d)img.at(y,x-1); + leftW.at(y,x) = gamma * exp(-beta*diff.dot(diff)); + } + else + leftW.at(y,x) = 0; + if( x-1>=0 && y-1>=0 ) // upleft + { + Vec3d diff = color - (Vec3d)img.at(y-1,x-1); + upleftW.at(y,x) = gammaDivSqrt2 * exp(-beta*diff.dot(diff)); + } + else + upleftW.at(y,x) = 0; + if( y-1>=0 ) // up + { + Vec3d diff = color - (Vec3d)img.at(y-1,x); + upW.at(y,x) = gamma * exp(-beta*diff.dot(diff)); + } + else + upW.at(y,x) = 0; + if( x+1=0 ) // upright + { + Vec3d diff = color - (Vec3d)img.at(y-1,x+1); + uprightW.at(y,x) = gammaDivSqrt2 * exp(-beta*diff.dot(diff)); + } + else + uprightW.at(y,x) = 0; + } + } +} + +/* + Check size, type and element values of mask matrix. + */ +void checkMask( const Mat& img, const Mat& mask ) +{ + if( mask.empty() ) + CV_Error( CV_StsBadArg, "mask is empty" ); + if( mask.type() != CV_8UC1 ) + CV_Error( CV_StsBadArg, "mask must have CV_8UC1 type" ); + if( mask.cols != img.cols || mask.rows != img.rows ) + CV_Error( CV_StsBadArg, "mask must have as many rows and cols as img" ); + for( int y = 0; y < mask.rows; y++ ) + { + for( int x = 0; x < mask.cols; x++ ) + { + uchar val = mask.at(y,x); + if( val!=GC_BGD && val!=GC_FGD && val!=GC_PR_BGD && val!=GC_PR_FGD ) + CV_Error( CV_StsBadArg, "mask element value must be equel" + "GC_BGD or GC_FGD or GC_PR_BGD or GC_PR_FGD" ); + } + } +} + +/* + Initialize mask using rectangular. +*/ +void initMaskWithRect( Mat& mask, Size imgSize, Rect rect ) +{ + mask.create( imgSize, CV_8UC1 ); + mask.setTo( GC_BGD ); + + rect.x = max(0, rect.x); + rect.y = max(0, rect.y); + rect.width = min(rect.width, imgSize.width-rect.x); + rect.height = min(rect.height, imgSize.height-rect.y); + + (mask(rect)).setTo( Scalar(GC_PR_FGD) ); +} + +/* + Initialize GMM background and foreground models using kmeans algorithm. +*/ +void initGMMs( const Mat& img, const Mat& mask, GMM& bgdGMM, GMM& fgdGMM ) +{ + const int kMeansItCount = 10; + const int kMeansType = KMEANS_PP_CENTERS; + + Mat bgdLabels, fgdLabels; + vector bgdSamples, fgdSamples; + Point p; + for( p.y = 0; p.y < img.rows; p.y++ ) + { + for( p.x = 0; p.x < img.cols; p.x++ ) + { + if( mask.at(p) == GC_BGD || mask.at(p) == GC_PR_BGD ) + bgdSamples.push_back( (Vec3f)img.at(p) ); + else // GC_FGD | GC_PR_FGD + fgdSamples.push_back( (Vec3f)img.at(p) ); + } + } + CV_Assert( !bgdSamples.empty() && !fgdSamples.empty() ); + Mat _bgdSamples( (int)bgdSamples.size(), 3, CV_32FC1, &bgdSamples[0][0] ); + kmeans( _bgdSamples, GMM::componentsCount, bgdLabels, + TermCriteria( CV_TERMCRIT_ITER, kMeansItCount, 0.0), 0, kMeansType ); + Mat _fgdSamples( (int)fgdSamples.size(), 3, CV_32FC1, &fgdSamples[0][0] ); + kmeans( _fgdSamples, GMM::componentsCount, fgdLabels, + TermCriteria( CV_TERMCRIT_ITER, kMeansItCount, 0.0), 0, kMeansType ); + + bgdGMM.initLearning(); + for( int i = 0; i < (int)bgdSamples.size(); i++ ) + bgdGMM.addSample( bgdLabels.at(i,0), bgdSamples[i] ); + bgdGMM.endLearning(); + + fgdGMM.initLearning(); + for( int i = 0; i < (int)fgdSamples.size(); i++ ) + fgdGMM.addSample( fgdLabels.at(i,0), fgdSamples[i] ); + fgdGMM.endLearning(); +} + +/* + Assign GMMs components for each pixel. +*/ +void assignGMMsComponents( const Mat& img, const Mat& mask, const GMM& bgdGMM, const GMM& fgdGMM, Mat& compIdxs ) +{ + Point p; + for( p.y = 0; p.y < img.rows; p.y++ ) + { + for( p.x = 0; p.x < img.cols; p.x++ ) + { + Vec3d color = img.at(p); + compIdxs.at(p) = mask.at(p) == GC_BGD || mask.at(p) == GC_PR_BGD ? + bgdGMM.whichComponent(color) : fgdGMM.whichComponent(color); + } + } +} + +/* + Learn GMMs parameters. +*/ +void learnGMMs( const Mat& img, const Mat& mask, const Mat& compIdxs, GMM& bgdGMM, GMM& fgdGMM ) +{ + bgdGMM.initLearning(); + fgdGMM.initLearning(); + Point p; + for( int ci = 0; ci < GMM::componentsCount; ci++ ) + { + for( p.y = 0; p.y < img.rows; p.y++ ) + { + for( p.x = 0; p.x < img.cols; p.x++ ) + { + if( compIdxs.at(p) == ci ) + { + if( mask.at(p) == GC_BGD || mask.at(p) == GC_PR_BGD ) + bgdGMM.addSample( ci, img.at(p) ); + else + fgdGMM.addSample( ci, img.at(p) ); + } + } + } + } + bgdGMM.endLearning(); + fgdGMM.endLearning(); +} + +/* + Construct GCGraph +*/ +void constructGCGraph( const Mat& img, const Mat& mask, const GMM& bgdGMM, const GMM& fgdGMM, double lambda, + const Mat& leftW, const Mat& upleftW, const Mat& upW, const Mat& uprightW, + GCGraph& graph ) +{ + int vtxCount = img.cols*img.rows, + edgeCount = 2*(4*img.cols*img.rows - 3*(img.cols + img.rows) + 2); + graph.create(vtxCount, edgeCount); + Point p; + for( p.y = 0; p.y < img.rows; p.y++ ) + { + for( p.x = 0; p.x < img.cols; p.x++) + { + // add node + int vtxIdx = graph.addVtx(); + Vec3b color = img.at(p); + + // set t-weights + double fromSource, toSink; + if( mask.at(p) == GC_PR_BGD || mask.at(p) == GC_PR_FGD ) + { + fromSource = -log( bgdGMM(color) ); + toSink = -log( fgdGMM(color) ); + } + else if( mask.at(p) == GC_BGD ) + { + fromSource = 0; + toSink = lambda; + } + else // GC_FGD + { + fromSource = lambda; + toSink = 0; + } + graph.addTermWeights( vtxIdx, fromSource, toSink ); + + // set n-weights + if( p.x>0 ) + { + double w = leftW.at(p); + graph.addEdges( vtxIdx, vtxIdx-1, w, w ); + } + if( p.x>0 && p.y>0 ) + { + double w = upleftW.at(p); + graph.addEdges( vtxIdx, vtxIdx-img.cols-1, w, w ); + } + if( p.y>0 ) + { + double w = upW.at(p); + graph.addEdges( vtxIdx, vtxIdx-img.cols, w, w ); + } + if( p.x0 ) + { + double w = uprightW.at(p); + graph.addEdges( vtxIdx, vtxIdx-img.cols+1, w, w ); + } + } + } +} + +/* + Estimate segmentation using MaxFlow algorithm +*/ +void estimateSegmentation( GCGraph& graph, Mat& mask ) +{ + graph.maxFlow(); + Point p; + for( p.y = 0; p.y < mask.rows; p.y++ ) + { + for( p.x = 0; p.x < mask.cols; p.x++ ) + { + if( mask.at(p) == GC_PR_BGD || mask.at(p) == GC_PR_FGD ) + { + if( graph.inSourceSegment( p.y*mask.cols+p.x /*vertex index*/ ) ) + mask.at(p) = GC_PR_FGD; + else + mask.at(p) = GC_PR_BGD; + } + } + } +} + +void cv::grabCut( InputArray _img, InputOutputArray _mask, Rect rect, + InputOutputArray _bgdModel, InputOutputArray _fgdModel, + int iterCount, int mode ) +{ + Mat img = _img.getMat(); + Mat& mask = _mask.getMatRef(); + Mat& bgdModel = _bgdModel.getMatRef(); + Mat& fgdModel = _fgdModel.getMatRef(); + + if( img.empty() ) + CV_Error( CV_StsBadArg, "image is empty" ); + if( img.type() != CV_8UC3 ) + CV_Error( CV_StsBadArg, "image mush have CV_8UC3 type" ); + + GMM bgdGMM( bgdModel ), fgdGMM( fgdModel ); + Mat compIdxs( img.size(), CV_32SC1 ); + + if( mode == GC_INIT_WITH_RECT || mode == GC_INIT_WITH_MASK ) + { + if( mode == GC_INIT_WITH_RECT ) + initMaskWithRect( mask, img.size(), rect ); + else // flag == GC_INIT_WITH_MASK + checkMask( img, mask ); + initGMMs( img, mask, bgdGMM, fgdGMM ); + } + + if( iterCount <= 0) + return; + + if( mode == GC_EVAL ) + checkMask( img, mask ); + + const double gamma = 50; + const double lambda = 9*gamma; + const double beta = calcBeta( img ); + + Mat leftW, upleftW, upW, uprightW; + calcNWeights( img, leftW, upleftW, upW, uprightW, beta, gamma ); + + for( int i = 0; i < iterCount; i++ ) + { + GCGraph graph; + assignGMMsComponents( img, mask, bgdGMM, fgdGMM, compIdxs ); + learnGMMs( img, mask, compIdxs, bgdGMM, fgdGMM ); + constructGCGraph(img, mask, bgdGMM, fgdGMM, lambda, leftW, upleftW, upW, uprightW, graph ); + estimateSegmentation( graph, mask ); + } +} diff --git a/opencv/imgproc/histogram.cpp b/opencv/imgproc/histogram.cpp new file mode 100644 index 0000000..8d1dd6f --- /dev/null +++ b/opencv/imgproc/histogram.cpp @@ -0,0 +1,2631 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" + +namespace cv +{ + +template<> void Ptr::delete_obj() +{ cvReleaseHist(&obj); } + + +////////////////// Helper functions ////////////////////// + +static const size_t OUT_OF_RANGE = (size_t)1 << (sizeof(size_t)*8 - 2); + +static void +calcHistLookupTables_8u( const Mat& hist, const SparseMat& shist, + int dims, const float** ranges, const double* uniranges, + bool uniform, bool issparse, vector& _tab ) +{ + const int low = 0, high = 256; + int i, j; + _tab.resize((high-low)*dims); + size_t* tab = &_tab[0]; + + if( uniform ) + { + for( i = 0; i < dims; i++ ) + { + double a = uniranges[i*2]; + double b = uniranges[i*2+1]; + int sz = !issparse ? hist.size[i] : shist.size(i); + size_t step = !issparse ? hist.step[i] : 1; + + for( j = low; j < high; j++ ) + { + int idx = cvFloor(j*a + b); + size_t written_idx; + if( (unsigned)idx < (unsigned)sz ) + written_idx = idx*step; + else + written_idx = OUT_OF_RANGE; + + tab[i*(high - low) + j - low] = written_idx; + } + } + } + else + { + for( i = 0; i < dims; i++ ) + { + int limit = std::min(cvCeil(ranges[i][0]), high); + int idx = -1, sz = !issparse ? hist.size[i] : shist.size(i); + size_t written_idx = OUT_OF_RANGE; + size_t step = !issparse ? hist.step[i] : 1; + + for(j = low;;) + { + for( ; j < limit; j++ ) + tab[i*(high - low) + j - low] = written_idx; + + if( (unsigned)(++idx) < (unsigned)sz ) + { + limit = std::min(cvCeil(ranges[i][idx+1]), high); + written_idx = idx*step; + } + else + { + for( ; j < high; j++ ) + tab[i*(high - low) + j - low] = OUT_OF_RANGE; + break; + } + } + } + } +} + + +static void histPrepareImages( const Mat* images, int nimages, const int* channels, + const Mat& mask, int dims, const int* histSize, + const float** ranges, bool uniform, + vector& ptrs, vector& deltas, + Size& imsize, vector& uniranges ) +{ + int i, j, c; + CV_Assert( channels != 0 || nimages == dims ); + + imsize = images[0].size(); + int depth = images[0].depth(), esz1 = (int)images[0].elemSize1(); + bool isContinuous = true; + + ptrs.resize(dims + 1); + deltas.resize((dims + 1)*2); + + for( i = 0; i < dims; i++ ) + { + if(!channels) + { + j = i; + c = 0; + CV_Assert( images[j].channels() == 1 ); + } + else + { + c = channels[i]; + CV_Assert( c >= 0 ); + for( j = 0; j < nimages; c -= images[j].channels(), j++ ) + if( c < images[j].channels() ) + break; + CV_Assert( j < nimages ); + } + + CV_Assert( images[j].size() == imsize && images[j].depth() == depth ); + if( !images[j].isContinuous() ) + isContinuous = false; + ptrs[i] = images[j].data + c*esz1; + deltas[i*2] = images[j].channels(); + deltas[i*2+1] = (int)(images[j].step/esz1 - imsize.width*deltas[i*2]); + } + + if( mask.data ) + { + CV_Assert( mask.size() == imsize && mask.channels() == 1 ); + isContinuous = isContinuous && mask.isContinuous(); + ptrs[dims] = mask.data; + deltas[dims*2] = 1; + deltas[dims*2 + 1] = (int)(mask.step/mask.elemSize1()); + } + + if( isContinuous ) + { + imsize.width *= imsize.height; + imsize.height = 1; + } + + if( !ranges ) + { + CV_Assert( depth == CV_8U ); + + uniranges.resize( dims*2 ); + for( i = 0; i < dims; i++ ) + { + uniranges[i*2] = histSize[i]/256.; + uniranges[i*2+1] = 0; + } + } + else if( uniform ) + { + uniranges.resize( dims*2 ); + for( i = 0; i < dims; i++ ) + { + CV_Assert( ranges[i] && ranges[i][0] < ranges[i][1] ); + double low = ranges[i][0], high = ranges[i][1]; + double t = histSize[i]/(high - low); + uniranges[i*2] = t; + uniranges[i*2+1] = -t*low; + } + } + else + { + for( i = 0; i < dims; i++ ) + { + size_t j, n = histSize[i]; + for( j = 0; j < n; j++ ) + CV_Assert( ranges[i][j] < ranges[i][j+1] ); + } + } +} + + +////////////////////////////////// C A L C U L A T E H I S T O G R A M //////////////////////////////////// + +template static void +calcHist_( vector& _ptrs, const vector& _deltas, + Size imsize, Mat& hist, int dims, const float** _ranges, + const double* _uniranges, bool uniform ) +{ + T** ptrs = (T**)&_ptrs[0]; + const int* deltas = &_deltas[0]; + uchar* H = hist.data; + int i, x; + const uchar* mask = _ptrs[dims]; + int mstep = _deltas[dims*2 + 1]; + int size[CV_MAX_DIM]; + size_t hstep[CV_MAX_DIM]; + + for( i = 0; i < dims; i++ ) + { + size[i] = hist.size[i]; + hstep[i] = hist.step[i]; + } + + if( uniform ) + { + const double* uniranges = &_uniranges[0]; + + if( dims == 1 ) + { + double a = uniranges[0], b = uniranges[1]; + int sz = size[0], d0 = deltas[0], step0 = deltas[1]; + const T* p0 = (const T*)ptrs[0]; + + for( ; imsize.height--; p0 += step0, mask += mstep ) + { + if( !mask ) + for( x = 0; x < imsize.width; x++, p0 += d0 ) + { + int idx = cvFloor(*p0*a + b); + if( (unsigned)idx < (unsigned)sz ) + ((int*)H)[idx]++; + } + else + for( x = 0; x < imsize.width; x++, p0 += d0 ) + if( mask[x] ) + { + int idx = cvFloor(*p0*a + b); + if( (unsigned)idx < (unsigned)sz ) + ((int*)H)[idx]++; + } + } + } + else if( dims == 2 ) + { + double a0 = uniranges[0], b0 = uniranges[1], a1 = uniranges[2], b1 = uniranges[3]; + int sz0 = size[0], sz1 = size[1]; + int d0 = deltas[0], step0 = deltas[1], + d1 = deltas[2], step1 = deltas[3]; + size_t hstep0 = hstep[0]; + const T* p0 = (const T*)ptrs[0]; + const T* p1 = (const T*)ptrs[1]; + + for( ; imsize.height--; p0 += step0, p1 += step1, mask += mstep ) + { + if( !mask ) + for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 ) + { + int idx0 = cvFloor(*p0*a0 + b0); + int idx1 = cvFloor(*p1*a1 + b1); + if( (unsigned)idx0 < (unsigned)sz0 && (unsigned)idx1 < (unsigned)sz1 ) + ((int*)(H + hstep0*idx0))[idx1]++; + } + else + for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 ) + if( mask[x] ) + { + int idx0 = cvFloor(*p0*a0 + b0); + int idx1 = cvFloor(*p1*a1 + b1); + if( (unsigned)idx0 < (unsigned)sz0 && (unsigned)idx1 < (unsigned)sz1 ) + ((int*)(H + hstep0*idx0))[idx1]++; + } + } + } + else if( dims == 3 ) + { + double a0 = uniranges[0], b0 = uniranges[1], + a1 = uniranges[2], b1 = uniranges[3], + a2 = uniranges[4], b2 = uniranges[5]; + int sz0 = size[0], sz1 = size[1], sz2 = size[2]; + int d0 = deltas[0], step0 = deltas[1], + d1 = deltas[2], step1 = deltas[3], + d2 = deltas[4], step2 = deltas[5]; + size_t hstep0 = hstep[0], hstep1 = hstep[1]; + const T* p0 = (const T*)ptrs[0]; + const T* p1 = (const T*)ptrs[1]; + const T* p2 = (const T*)ptrs[2]; + + for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, mask += mstep ) + { + if( !mask ) + for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 ) + { + int idx0 = cvFloor(*p0*a0 + b0); + int idx1 = cvFloor(*p1*a1 + b1); + int idx2 = cvFloor(*p2*a2 + b2); + if( (unsigned)idx0 < (unsigned)sz0 && + (unsigned)idx1 < (unsigned)sz1 && + (unsigned)idx2 < (unsigned)sz2 ) + ((int*)(H + hstep0*idx0 + hstep1*idx1))[idx2]++; + } + else + for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 ) + if( mask[x] ) + { + int idx0 = cvFloor(*p0*a0 + b0); + int idx1 = cvFloor(*p1*a1 + b1); + int idx2 = cvFloor(*p2*a2 + b2); + if( (unsigned)idx0 < (unsigned)sz0 && + (unsigned)idx1 < (unsigned)sz1 && + (unsigned)idx2 < (unsigned)sz2 ) + ((int*)(H + hstep0*idx0 + hstep1*idx1))[idx2]++; + } + } + } + else + { + for( ; imsize.height--; mask += mstep ) + { + if( !mask ) + for( x = 0; x < imsize.width; x++ ) + { + uchar* Hptr = H; + for( i = 0; i < dims; i++ ) + { + int idx = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]); + if( (unsigned)idx >= (unsigned)size[i] ) + break; + ptrs[i] += deltas[i*2]; + Hptr += idx*hstep[i]; + } + + if( i == dims ) + ++*((int*)Hptr); + else + for( ; i < dims; i++ ) + ptrs[i] += deltas[i*2]; + } + else + for( x = 0; x < imsize.width; x++ ) + { + uchar* Hptr = H; + i = 0; + if( mask[x] ) + for( ; i < dims; i++ ) + { + int idx = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]); + if( (unsigned)idx >= (unsigned)size[i] ) + break; + ptrs[i] += deltas[i*2]; + Hptr += idx*hstep[i]; + } + + if( i == dims ) + ++*((int*)Hptr); + else + for( ; i < dims; i++ ) + ptrs[i] += deltas[i*2]; + } + for( i = 0; i < dims; i++ ) + ptrs[i] += deltas[i*2 + 1]; + } + } + } + else + { + // non-uniform histogram + const float* ranges[CV_MAX_DIM]; + for( i = 0; i < dims; i++ ) + ranges[i] = &_ranges[i][0]; + + for( ; imsize.height--; mask += mstep ) + { + for( x = 0; x < imsize.width; x++ ) + { + uchar* Hptr = H; + i = 0; + + if( !mask || mask[x] ) + for( ; i < dims; i++ ) + { + float v = (float)*ptrs[i]; + const float* R = ranges[i]; + int idx = -1, sz = size[i]; + + while( v >= R[idx+1] && ++idx < sz ) + ; // nop + + if( (unsigned)idx >= (unsigned)sz ) + break; + + ptrs[i] += deltas[i*2]; + Hptr += idx*hstep[i]; + } + + if( i == dims ) + ++*((int*)Hptr); + else + for( ; i < dims; i++ ) + ptrs[i] += deltas[i*2]; + } + + for( i = 0; i < dims; i++ ) + ptrs[i] += deltas[i*2 + 1]; + } + } +} + + +static void +calcHist_8u( vector& _ptrs, const vector& _deltas, + Size imsize, Mat& hist, int dims, const float** _ranges, + const double* _uniranges, bool uniform ) +{ + uchar** ptrs = &_ptrs[0]; + const int* deltas = &_deltas[0]; + uchar* H = hist.data; + int i, x; + const uchar* mask = _ptrs[dims]; + int mstep = _deltas[dims*2 + 1]; + vector _tab; + + calcHistLookupTables_8u( hist, SparseMat(), dims, _ranges, _uniranges, uniform, false, _tab ); + const size_t* tab = &_tab[0]; + + if( dims == 1 ) + { + int d0 = deltas[0], step0 = deltas[1]; + int matH[256] = {0}; + const uchar* p0 = (const uchar*)ptrs[0]; + + for( ; imsize.height--; p0 += step0, mask += mstep ) + { + if( !mask ) + { + if( d0 == 1 ) + { + for( x = 0; x <= imsize.width - 4; x += 4 ) + { + int t0 = p0[x], t1 = p0[x+1]; + matH[t0]++; matH[t1]++; + t0 = p0[x+2]; t1 = p0[x+3]; + matH[t0]++; matH[t1]++; + } + p0 += x; + } + else + for( x = 0; x <= imsize.width - 4; x += 4 ) + { + int t0 = p0[0], t1 = p0[d0]; + matH[t0]++; matH[t1]++; + p0 += d0*2; + t0 = p0[0]; t1 = p0[d0]; + matH[t0]++; matH[t1]++; + p0 += d0*2; + } + + for( ; x < imsize.width; x++, p0 += d0 ) + matH[*p0]++; + } + else + for( x = 0; x < imsize.width; x++, p0 += d0 ) + if( mask[x] ) + matH[*p0]++; + } + + for( i = 0; i < 256; i++ ) + { + size_t hidx = tab[i]; + if( hidx < OUT_OF_RANGE ) + *(int*)(H + hidx) += matH[i]; + } + } + else if( dims == 2 ) + { + int d0 = deltas[0], step0 = deltas[1], + d1 = deltas[2], step1 = deltas[3]; + const uchar* p0 = (const uchar*)ptrs[0]; + const uchar* p1 = (const uchar*)ptrs[1]; + + for( ; imsize.height--; p0 += step0, p1 += step1, mask += mstep ) + { + if( !mask ) + for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 ) + { + size_t idx = tab[*p0] + tab[*p1 + 256]; + if( idx < OUT_OF_RANGE ) + ++*(int*)(H + idx); + } + else + for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 ) + { + size_t idx; + if( mask[x] && (idx = tab[*p0] + tab[*p1 + 256]) < OUT_OF_RANGE ) + ++*(int*)(H + idx); + } + } + } + else if( dims == 3 ) + { + int d0 = deltas[0], step0 = deltas[1], + d1 = deltas[2], step1 = deltas[3], + d2 = deltas[4], step2 = deltas[5]; + + const uchar* p0 = (const uchar*)ptrs[0]; + const uchar* p1 = (const uchar*)ptrs[1]; + const uchar* p2 = (const uchar*)ptrs[2]; + + for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, mask += mstep ) + { + if( !mask ) + for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 ) + { + size_t idx = tab[*p0] + tab[*p1 + 256] + tab[*p2 + 512]; + if( idx < OUT_OF_RANGE ) + ++*(int*)(H + idx); + } + else + for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 ) + { + size_t idx; + if( mask[x] && (idx = tab[*p0] + tab[*p1 + 256] + tab[*p2 + 512]) < OUT_OF_RANGE ) + ++*(int*)(H + idx); + } + } + } + else + { + for( ; imsize.height--; mask += mstep ) + { + if( !mask ) + for( x = 0; x < imsize.width; x++ ) + { + uchar* Hptr = H; + for( i = 0; i < dims; i++ ) + { + size_t idx = tab[*ptrs[i] + i*256]; + if( idx >= OUT_OF_RANGE ) + break; + Hptr += idx; + ptrs[i] += deltas[i*2]; + } + + if( i == dims ) + ++*((int*)Hptr); + else + for( ; i < dims; i++ ) + ptrs[i] += deltas[i*2]; + } + else + for( x = 0; x < imsize.width; x++ ) + { + uchar* Hptr = H; + int i = 0; + if( mask[x] ) + for( ; i < dims; i++ ) + { + size_t idx = tab[*ptrs[i] + i*256]; + if( idx >= OUT_OF_RANGE ) + break; + Hptr += idx; + ptrs[i] += deltas[i*2]; + } + + if( i == dims ) + ++*((int*)Hptr); + else + for( ; i < dims; i++ ) + ptrs[i] += deltas[i*2]; + } + for( i = 0; i < dims; i++ ) + ptrs[i] += deltas[i*2 + 1]; + } + } +} + +} + +void cv::calcHist( const Mat* images, int nimages, const int* channels, + InputArray _mask, OutputArray _hist, int dims, const int* histSize, + const float** ranges, bool uniform, bool accumulate ) +{ + Mat mask = _mask.getMat(); + + CV_Assert(dims > 0 && histSize); + + uchar* histdata = _hist.getMat().data; + _hist.create(dims, histSize, CV_32F); + Mat hist = _hist.getMat(), ihist = hist; + ihist.flags = (ihist.flags & ~CV_MAT_TYPE_MASK)|CV_32S; + + if( !accumulate || histdata != hist.data ) + hist = Scalar(0.); + else + hist.convertTo(ihist, CV_32S); + + vector ptrs; + vector deltas; + vector uniranges; + Size imsize; + + CV_Assert( !mask.data || mask.type() == CV_8UC1 ); + histPrepareImages( images, nimages, channels, mask, dims, hist.size, ranges, + uniform, ptrs, deltas, imsize, uniranges ); + const double* _uniranges = uniform ? &uniranges[0] : 0; + + int depth = images[0].depth(); + + if( depth == CV_8U ) + calcHist_8u(ptrs, deltas, imsize, ihist, dims, ranges, _uniranges, uniform ); + else if( depth == CV_16U ) + calcHist_(ptrs, deltas, imsize, ihist, dims, ranges, _uniranges, uniform ); + else if( depth == CV_32F ) + calcHist_(ptrs, deltas, imsize, ihist, dims, ranges, _uniranges, uniform ); + else + CV_Error(CV_StsUnsupportedFormat, ""); + + ihist.convertTo(hist, CV_32F); +} + + +namespace cv +{ + +template static void +calcSparseHist_( vector& _ptrs, const vector& _deltas, + Size imsize, SparseMat& hist, int dims, const float** _ranges, + const double* _uniranges, bool uniform ) +{ + T** ptrs = (T**)&_ptrs[0]; + const int* deltas = &_deltas[0]; + int i, x; + const uchar* mask = _ptrs[dims]; + int mstep = _deltas[dims*2 + 1]; + const int* size = hist.hdr->size; + int idx[CV_MAX_DIM]; + + if( uniform ) + { + const double* uniranges = &_uniranges[0]; + + for( ; imsize.height--; mask += mstep ) + { + for( x = 0; x < imsize.width; x++ ) + { + i = 0; + if( !mask || mask[x] ) + for( ; i < dims; i++ ) + { + idx[i] = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]); + if( (unsigned)idx[i] >= (unsigned)size[i] ) + break; + ptrs[i] += deltas[i*2]; + } + + if( i == dims ) + ++*(int*)hist.ptr(idx, true); + else + for( ; i < dims; i++ ) + ptrs[i] += deltas[i*2]; + } + for( i = 0; i < dims; i++ ) + ptrs[i] += deltas[i*2 + 1]; + } + } + else + { + // non-uniform histogram + const float* ranges[CV_MAX_DIM]; + for( i = 0; i < dims; i++ ) + ranges[i] = &_ranges[i][0]; + + for( ; imsize.height--; mask += mstep ) + { + for( x = 0; x < imsize.width; x++ ) + { + i = 0; + + if( !mask || mask[x] ) + for( ; i < dims; i++ ) + { + float v = (float)*ptrs[i]; + const float* R = ranges[i]; + int j = -1, sz = size[i]; + + while( v >= R[j+1] && ++j < sz ) + ; // nop + + if( (unsigned)j >= (unsigned)sz ) + break; + ptrs[i] += deltas[i*2]; + idx[i] = j; + } + + if( i == dims ) + ++*(int*)hist.ptr(idx, true); + else + for( ; i < dims; i++ ) + ptrs[i] += deltas[i*2]; + } + + for( i = 0; i < dims; i++ ) + ptrs[i] += deltas[i*2 + 1]; + } + } +} + + +static void +calcSparseHist_8u( vector& _ptrs, const vector& _deltas, + Size imsize, SparseMat& hist, int dims, const float** _ranges, + const double* _uniranges, bool uniform ) +{ + uchar** ptrs = (uchar**)&_ptrs[0]; + const int* deltas = &_deltas[0]; + int i, x; + const uchar* mask = _ptrs[dims]; + int mstep = _deltas[dims*2 + 1]; + int idx[CV_MAX_DIM]; + vector _tab; + + calcHistLookupTables_8u( Mat(), hist, dims, _ranges, _uniranges, uniform, true, _tab ); + const size_t* tab = &_tab[0]; + + for( ; imsize.height--; mask += mstep ) + { + for( x = 0; x < imsize.width; x++ ) + { + int i = 0; + if( !mask || mask[x] ) + for( ; i < dims; i++ ) + { + size_t hidx = tab[*ptrs[i] + i*256]; + if( hidx >= OUT_OF_RANGE ) + break; + ptrs[i] += deltas[i*2]; + idx[i] = (int)hidx; + } + + if( i == dims ) + ++*(int*)hist.ptr(idx,true); + else + for( ; i < dims; i++ ) + ptrs[i] += deltas[i*2]; + } + for( i = 0; i < dims; i++ ) + ptrs[i] += deltas[i*2 + 1]; + } +} + + +static void calcHist( const Mat* images, int nimages, const int* channels, + const Mat& mask, SparseMat& hist, int dims, const int* histSize, + const float** ranges, bool uniform, bool accumulate, bool keepInt ) +{ + size_t i, N; + + if( !accumulate ) + hist.create(dims, histSize, CV_32F); + else + { + SparseMatIterator it = hist.begin(); + for( i = 0, N = hist.nzcount(); i < N; i++, ++it ) + { + Cv32suf* val = (Cv32suf*)it.ptr; + val->i = cvRound(val->f); + } + } + + vector ptrs; + vector deltas; + vector uniranges; + Size imsize; + + CV_Assert( !mask.data || mask.type() == CV_8UC1 ); + histPrepareImages( images, nimages, channels, mask, dims, hist.hdr->size, ranges, + uniform, ptrs, deltas, imsize, uniranges ); + const double* _uniranges = uniform ? &uniranges[0] : 0; + + int depth = images[0].depth(); + if( depth == CV_8U ) + calcSparseHist_8u(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, uniform ); + else if( depth == CV_16U ) + calcSparseHist_(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, uniform ); + else if( depth == CV_32F ) + calcSparseHist_(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, uniform ); + else + CV_Error(CV_StsUnsupportedFormat, ""); + + if( !keepInt ) + { + SparseMatIterator it = hist.begin(); + for( i = 0, N = hist.nzcount(); i < N; i++, ++it ) + { + Cv32suf* val = (Cv32suf*)it.ptr; + val->f = (float)val->i; + } + } +} + +} + +void cv::calcHist( const Mat* images, int nimages, const int* channels, + InputArray _mask, SparseMat& hist, int dims, const int* histSize, + const float** ranges, bool uniform, bool accumulate ) +{ + Mat mask = _mask.getMat(); + calcHist( images, nimages, channels, mask, hist, dims, histSize, + ranges, uniform, accumulate, false ); +} + + +void cv::calcHist( InputArrayOfArrays images, const vector& channels, + InputArray mask, OutputArray hist, + const vector& histSize, + const vector& ranges, + bool accumulate ) +{ + int i, dims = (int)histSize.size(), rsz = (int)ranges.size(), csz = (int)channels.size(); + int nimages = (int)images.total(); + + CV_Assert(nimages > 0 && dims > 0); + CV_Assert(rsz == dims*2 || (rsz == 0 && images.depth(0) == CV_8U)); + CV_Assert(csz == 0 || csz == dims); + float* _ranges[CV_MAX_DIM]; + if( rsz > 0 ) + { + for( i = 0; i < rsz/2; i++ ) + _ranges[i] = (float*)&ranges[i*2]; + } + + AutoBuffer buf(nimages); + for( i = 0; i < nimages; i++ ) + buf[i] = images.getMat(i); + + calcHist(&buf[0], nimages, csz ? &channels[0] : 0, + mask, hist, dims, &histSize[0], rsz ? (const float**)_ranges : 0, + true, accumulate); +} + + +/////////////////////////////////////// B A C K P R O J E C T //////////////////////////////////// + +namespace cv +{ + +template static void +calcBackProj_( vector& _ptrs, const vector& _deltas, + Size imsize, const Mat& hist, int dims, const float** _ranges, + const double* _uniranges, float scale, bool uniform ) +{ + T** ptrs = (T**)&_ptrs[0]; + const int* deltas = &_deltas[0]; + uchar* H = hist.data; + int i, x; + BT* bproj = (BT*)_ptrs[dims]; + int bpstep = _deltas[dims*2 + 1]; + int size[CV_MAX_DIM]; + size_t hstep[CV_MAX_DIM]; + + for( i = 0; i < dims; i++ ) + { + size[i] = hist.size[i]; + hstep[i] = hist.step[i]; + } + + if( uniform ) + { + const double* uniranges = &_uniranges[0]; + + if( dims == 1 ) + { + double a = uniranges[0], b = uniranges[1]; + int sz = size[0], d0 = deltas[0], step0 = deltas[1]; + const T* p0 = (const T*)ptrs[0]; + + for( ; imsize.height--; p0 += step0, bproj += bpstep ) + { + for( x = 0; x < imsize.width; x++, p0 += d0 ) + { + int idx = cvFloor(*p0*a + b); + bproj[x] = (unsigned)idx < (unsigned)sz ? saturate_cast(((float*)H)[idx]*scale) : 0; + } + } + } + else if( dims == 2 ) + { + double a0 = uniranges[0], b0 = uniranges[1], + a1 = uniranges[2], b1 = uniranges[3]; + int sz0 = size[0], sz1 = size[1]; + int d0 = deltas[0], step0 = deltas[1], + d1 = deltas[2], step1 = deltas[3]; + size_t hstep0 = hstep[0]; + const T* p0 = (const T*)ptrs[0]; + const T* p1 = (const T*)ptrs[1]; + + for( ; imsize.height--; p0 += step0, p1 += step1, bproj += bpstep ) + { + for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 ) + { + int idx0 = cvFloor(*p0*a0 + b0); + int idx1 = cvFloor(*p1*a1 + b1); + bproj[x] = (unsigned)idx0 < (unsigned)sz0 && + (unsigned)idx1 < (unsigned)sz1 ? + saturate_cast(((float*)(H + hstep0*idx0))[idx1]*scale) : 0; + } + } + } + else if( dims == 3 ) + { + double a0 = uniranges[0], b0 = uniranges[1], + a1 = uniranges[2], b1 = uniranges[3], + a2 = uniranges[4], b2 = uniranges[5]; + int sz0 = size[0], sz1 = size[1], sz2 = size[2]; + int d0 = deltas[0], step0 = deltas[1], + d1 = deltas[2], step1 = deltas[3], + d2 = deltas[4], step2 = deltas[5]; + size_t hstep0 = hstep[0], hstep1 = hstep[1]; + const T* p0 = (const T*)ptrs[0]; + const T* p1 = (const T*)ptrs[1]; + const T* p2 = (const T*)ptrs[2]; + + for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, bproj += bpstep ) + { + for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 ) + { + int idx0 = cvFloor(*p0*a0 + b0); + int idx1 = cvFloor(*p1*a1 + b1); + int idx2 = cvFloor(*p2*a2 + b2); + bproj[x] = (unsigned)idx0 < (unsigned)sz0 && + (unsigned)idx1 < (unsigned)sz1 && + (unsigned)idx2 < (unsigned)sz2 ? + saturate_cast(((float*)(H + hstep0*idx0 + hstep1*idx1))[idx2]*scale) : 0; + } + } + } + else + { + for( ; imsize.height--; bproj += bpstep ) + { + for( x = 0; x < imsize.width; x++ ) + { + uchar* Hptr = H; + for( i = 0; i < dims; i++ ) + { + int idx = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]); + if( (unsigned)idx >= (unsigned)size[i] || (_ranges && *ptrs[i] >= _ranges[i][1])) + break; + ptrs[i] += deltas[i*2]; + Hptr += idx*hstep[i]; + } + + if( i == dims ) + bproj[x] = saturate_cast(*(float*)Hptr*scale); + else + { + bproj[x] = 0; + for( ; i < dims; i++ ) + ptrs[i] += deltas[i*2]; + } + } + for( i = 0; i < dims; i++ ) + ptrs[i] += deltas[i*2 + 1]; + } + } + } + else + { + // non-uniform histogram + const float* ranges[CV_MAX_DIM]; + for( i = 0; i < dims; i++ ) + ranges[i] = &_ranges[i][0]; + + for( ; imsize.height--; bproj += bpstep ) + { + for( x = 0; x < imsize.width; x++ ) + { + uchar* Hptr = H; + for( i = 0; i < dims; i++ ) + { + float v = (float)*ptrs[i]; + const float* R = ranges[i]; + int idx = -1, sz = size[i]; + + while( v >= R[idx+1] && ++idx < sz ) + ; // nop + + if( (unsigned)idx >= (unsigned)sz ) + break; + + ptrs[i] += deltas[i*2]; + Hptr += idx*hstep[i]; + } + + if( i == dims ) + bproj[x] = saturate_cast(*(float*)Hptr*scale); + else + { + bproj[x] = 0; + for( ; i < dims; i++ ) + ptrs[i] += deltas[i*2]; + } + } + + for( i = 0; i < dims; i++ ) + ptrs[i] += deltas[i*2 + 1]; + } + } +} + + +static void +calcBackProj_8u( vector& _ptrs, const vector& _deltas, + Size imsize, const Mat& hist, int dims, const float** _ranges, + const double* _uniranges, float scale, bool uniform ) +{ + uchar** ptrs = &_ptrs[0]; + const int* deltas = &_deltas[0]; + uchar* H = hist.data; + int i, x; + uchar* bproj = _ptrs[dims]; + int bpstep = _deltas[dims*2 + 1]; + vector _tab; + + calcHistLookupTables_8u( hist, SparseMat(), dims, _ranges, _uniranges, uniform, false, _tab ); + const size_t* tab = &_tab[0]; + + if( dims == 1 ) + { + int d0 = deltas[0], step0 = deltas[1]; + uchar matH[256] = {0}; + const uchar* p0 = (const uchar*)ptrs[0]; + + for( i = 0; i < 256; i++ ) + { + size_t hidx = tab[i]; + if( hidx < OUT_OF_RANGE ) + matH[i] = saturate_cast(*(float*)(H + hidx)*scale); + } + + for( ; imsize.height--; p0 += step0, bproj += bpstep ) + { + if( d0 == 1 ) + { + for( x = 0; x <= imsize.width - 4; x += 4 ) + { + uchar t0 = matH[p0[x]], t1 = matH[p0[x+1]]; + bproj[x] = t0; bproj[x+1] = t1; + t0 = matH[p0[x+2]]; t1 = matH[p0[x+3]]; + bproj[x+2] = t0; bproj[x+3] = t1; + } + p0 += x; + } + else + for( x = 0; x <= imsize.width - 4; x += 4 ) + { + uchar t0 = matH[p0[0]], t1 = matH[p0[d0]]; + bproj[x] = t0; bproj[x+1] = t1; + p0 += d0*2; + t0 = matH[p0[0]]; t1 = matH[p0[d0]]; + bproj[x+2] = t0; bproj[x+3] = t1; + p0 += d0*2; + } + + for( ; x < imsize.width; x++, p0 += d0 ) + bproj[x] = matH[*p0]; + } + } + else if( dims == 2 ) + { + int d0 = deltas[0], step0 = deltas[1], + d1 = deltas[2], step1 = deltas[3]; + const uchar* p0 = (const uchar*)ptrs[0]; + const uchar* p1 = (const uchar*)ptrs[1]; + + for( ; imsize.height--; p0 += step0, p1 += step1, bproj += bpstep ) + { + for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1 ) + { + size_t idx = tab[*p0] + tab[*p1 + 256]; + bproj[x] = idx < OUT_OF_RANGE ? saturate_cast(*(float*)(H + idx)*scale) : 0; + } + } + } + else if( dims == 3 ) + { + int d0 = deltas[0], step0 = deltas[1], + d1 = deltas[2], step1 = deltas[3], + d2 = deltas[4], step2 = deltas[5]; + const uchar* p0 = (const uchar*)ptrs[0]; + const uchar* p1 = (const uchar*)ptrs[1]; + const uchar* p2 = (const uchar*)ptrs[2]; + + for( ; imsize.height--; p0 += step0, p1 += step1, p2 += step2, bproj += bpstep ) + { + for( x = 0; x < imsize.width; x++, p0 += d0, p1 += d1, p2 += d2 ) + { + size_t idx = tab[*p0] + tab[*p1 + 256] + tab[*p2 + 512]; + bproj[x] = idx < OUT_OF_RANGE ? saturate_cast(*(float*)(H + idx)*scale) : 0; + } + } + } + else + { + for( ; imsize.height--; bproj += bpstep ) + { + for( x = 0; x < imsize.width; x++ ) + { + uchar* Hptr = H; + for( i = 0; i < dims; i++ ) + { + size_t idx = tab[*ptrs[i] + i*256]; + if( idx >= OUT_OF_RANGE ) + break; + ptrs[i] += deltas[i*2]; + Hptr += idx; + } + + if( i == dims ) + bproj[x] = saturate_cast(*(float*)Hptr*scale); + else + { + bproj[x] = 0; + for( ; i < dims; i++ ) + ptrs[i] += deltas[i*2]; + } + } + for( i = 0; i < dims; i++ ) + ptrs[i] += deltas[i*2 + 1]; + } + } +} + +} + +void cv::calcBackProject( const Mat* images, int nimages, const int* channels, + InputArray _hist, OutputArray _backProject, + const float** ranges, double scale, bool uniform ) +{ + Mat hist = _hist.getMat(); + vector ptrs; + vector deltas; + vector uniranges; + Size imsize; + int dims = hist.dims == 2 && hist.size[1] == 1 ? 1 : hist.dims; + + CV_Assert( dims > 0 && hist.data ); + _backProject.create( images[0].size(), images[0].depth() ); + Mat backProject = _backProject.getMat(); + histPrepareImages( images, nimages, channels, backProject, dims, hist.size, ranges, + uniform, ptrs, deltas, imsize, uniranges ); + const double* _uniranges = uniform ? &uniranges[0] : 0; + + int depth = images[0].depth(); + if( depth == CV_8U ) + calcBackProj_8u(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, (float)scale, uniform); + else if( depth == CV_16U ) + calcBackProj_(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, (float)scale, uniform ); + else if( depth == CV_32F ) + calcBackProj_(ptrs, deltas, imsize, hist, dims, ranges, _uniranges, (float)scale, uniform ); + else + CV_Error(CV_StsUnsupportedFormat, ""); +} + + +namespace cv +{ + +template static void +calcSparseBackProj_( vector& _ptrs, const vector& _deltas, + Size imsize, const SparseMat& hist, int dims, const float** _ranges, + const double* _uniranges, float scale, bool uniform ) +{ + T** ptrs = (T**)&_ptrs[0]; + const int* deltas = &_deltas[0]; + int i, x; + BT* bproj = (BT*)_ptrs[dims]; + int bpstep = _deltas[dims*2 + 1]; + const int* size = hist.hdr->size; + int idx[CV_MAX_DIM]; + const SparseMat_& hist_ = (const SparseMat_&)hist; + + if( uniform ) + { + const double* uniranges = &_uniranges[0]; + for( ; imsize.height--; bproj += bpstep ) + { + for( x = 0; x < imsize.width; x++ ) + { + for( i = 0; i < dims; i++ ) + { + idx[i] = cvFloor(*ptrs[i]*uniranges[i*2] + uniranges[i*2+1]); + if( (unsigned)idx[i] >= (unsigned)size[i] ) + break; + ptrs[i] += deltas[i*2]; + } + + if( i == dims ) + bproj[x] = saturate_cast(hist_(idx)*scale); + else + { + bproj[x] = 0; + for( ; i < dims; i++ ) + ptrs[i] += deltas[i*2]; + } + } + for( i = 0; i < dims; i++ ) + ptrs[i] += deltas[i*2 + 1]; + } + } + else + { + // non-uniform histogram + const float* ranges[CV_MAX_DIM]; + for( i = 0; i < dims; i++ ) + ranges[i] = &_ranges[i][0]; + + for( ; imsize.height--; bproj += bpstep ) + { + for( x = 0; x < imsize.width; x++ ) + { + for( i = 0; i < dims; i++ ) + { + float v = (float)*ptrs[i]; + const float* R = ranges[i]; + int j = -1, sz = size[i]; + + while( v >= R[j+1] && ++j < sz ) + ; // nop + + if( (unsigned)j >= (unsigned)sz ) + break; + idx[i] = j; + ptrs[i] += deltas[i*2]; + } + + if( i == dims ) + bproj[x] = saturate_cast(hist_(idx)*scale); + else + { + bproj[x] = 0; + for( ; i < dims; i++ ) + ptrs[i] += deltas[i*2]; + } + } + + for( i = 0; i < dims; i++ ) + ptrs[i] += deltas[i*2 + 1]; + } + } +} + + +static void +calcSparseBackProj_8u( vector& _ptrs, const vector& _deltas, + Size imsize, const SparseMat& hist, int dims, const float** _ranges, + const double* _uniranges, float scale, bool uniform ) +{ + uchar** ptrs = &_ptrs[0]; + const int* deltas = &_deltas[0]; + int i, x; + uchar* bproj = _ptrs[dims]; + int bpstep = _deltas[dims*2 + 1]; + vector _tab; + int idx[CV_MAX_DIM]; + + calcHistLookupTables_8u( Mat(), hist, dims, _ranges, _uniranges, uniform, true, _tab ); + const size_t* tab = &_tab[0]; + + for( ; imsize.height--; bproj += bpstep ) + { + for( x = 0; x < imsize.width; x++ ) + { + for( i = 0; i < dims; i++ ) + { + size_t hidx = tab[*ptrs[i] + i*256]; + if( hidx >= OUT_OF_RANGE ) + break; + idx[i] = (int)hidx; + ptrs[i] += deltas[i*2]; + } + + if( i == dims ) + bproj[x] = saturate_cast(hist.value(idx)*scale); + else + { + bproj[x] = 0; + for( ; i < dims; i++ ) + ptrs[i] += deltas[i*2]; + } + } + for( i = 0; i < dims; i++ ) + ptrs[i] += deltas[i*2 + 1]; + } +} + +} + +void cv::calcBackProject( const Mat* images, int nimages, const int* channels, + const SparseMat& hist, OutputArray _backProject, + const float** ranges, double scale, bool uniform ) +{ + vector ptrs; + vector deltas; + vector uniranges; + Size imsize; + int dims = hist.dims(); + + CV_Assert( dims > 0 ); + _backProject.create( images[0].size(), images[0].depth() ); + Mat backProject = _backProject.getMat(); + histPrepareImages( images, nimages, channels, backProject, + dims, hist.hdr->size, ranges, + uniform, ptrs, deltas, imsize, uniranges ); + const double* _uniranges = uniform ? &uniranges[0] : 0; + + int depth = images[0].depth(); + if( depth == CV_8U ) + calcSparseBackProj_8u(ptrs, deltas, imsize, hist, dims, ranges, + _uniranges, (float)scale, uniform); + else if( depth == CV_16U ) + calcSparseBackProj_(ptrs, deltas, imsize, hist, dims, ranges, + _uniranges, (float)scale, uniform ); + else if( depth == CV_32F ) + calcSparseBackProj_(ptrs, deltas, imsize, hist, dims, ranges, + _uniranges, (float)scale, uniform ); + else + CV_Error(CV_StsUnsupportedFormat, ""); +} + + +void cv::calcBackProject( InputArrayOfArrays images, const vector& channels, + InputArray hist, OutputArray dst, + const vector& ranges, + double scale ) +{ + Mat H = hist.getMat(); + bool _1d = H.rows == 1 || H.cols == 1; + int i, dims = H.dims, rsz = (int)ranges.size(), csz = (int)channels.size(); + int nimages = (int)images.total(); + CV_Assert(nimages > 0); + CV_Assert(rsz == dims*2 || (rsz == 2 && _1d) || (rsz == 0 && images.depth(0) == CV_8U)); + CV_Assert(csz == 0 || csz == dims || (csz == 1 && _1d)); + float* _ranges[CV_MAX_DIM]; + if( rsz > 0 ) + { + for( i = 0; i < rsz/2; i++ ) + _ranges[i] = (float*)&ranges[i*2]; + } + + AutoBuffer buf(nimages); + for( i = 0; i < nimages; i++ ) + buf[i] = images.getMat(i); + + calcBackProject(&buf[0], nimages, csz ? &channels[0] : 0, + hist, dst, rsz ? (const float**)_ranges : 0, scale, true); +} + + +////////////////// C O M P A R E H I S T O G R A M S //////////////////////// + +double cv::compareHist( InputArray _H1, InputArray _H2, int method ) +{ + Mat H1 = _H1.getMat(), H2 = _H2.getMat(); + const Mat* arrays[] = {&H1, &H2, 0}; + Mat planes[2]; + NAryMatIterator it(arrays, planes); + double result = 0; + int j, len = (int)it.size; + + CV_Assert( H1.type() == H2.type() && H1.type() == CV_32F ); + + double s1 = 0, s2 = 0, s11 = 0, s12 = 0, s22 = 0; + + CV_Assert( it.planes[0].isContinuous() && it.planes[1].isContinuous() ); + + for( size_t i = 0; i < it.nplanes; i++, ++it ) + { + const float* h1 = (const float*)it.planes[0].data; + const float* h2 = (const float*)it.planes[1].data; + len = it.planes[0].rows*it.planes[0].cols; + + if( method == CV_COMP_CHISQR ) + { + for( j = 0; j < len; j++ ) + { + double a = h1[j] - h2[j]; + double b = h1[j] + h2[j]; + if( fabs(b) > FLT_EPSILON ) + result += a*a/b; + } + } + else if( method == CV_COMP_CORREL ) + { + for( j = 0; j < len; j++ ) + { + double a = h1[j]; + double b = h2[j]; + + s12 += a*b; + s1 += a; + s11 += a*a; + s2 += b; + s22 += b*b; + } + } + else if( method == CV_COMP_INTERSECT ) + { + for( j = 0; j < len; j++ ) + result += std::min(h1[j], h2[j]); + } + else if( method == CV_COMP_BHATTACHARYYA ) + { + for( j = 0; j < len; j++ ) + { + double a = h1[j]; + double b = h2[j]; + result += std::sqrt(a*b); + s1 += a; + s2 += b; + } + } + else + CV_Error( CV_StsBadArg, "Unknown comparison method" ); + } + + if( method == CV_COMP_CORREL ) + { + size_t total = H1.total(); + double scale = 1./total; + double num = s12 - s1*s2*scale; + double denom2 = (s11 - s1*s1*scale)*(s22 - s2*s2*scale); + result = std::abs(denom2) > DBL_EPSILON ? num/std::sqrt(denom2) : 1.; + } + else if( method == CV_COMP_BHATTACHARYYA ) + { + s1 *= s2; + s1 = fabs(s1) > FLT_EPSILON ? 1./std::sqrt(s1) : 1.; + result = std::sqrt(std::max(1. - result*s1, 0.)); + } + + return result; +} + + +double cv::compareHist( const SparseMat& H1, const SparseMat& H2, int method ) +{ + double result = 0; + int i, dims = H1.dims(); + + CV_Assert( dims > 0 && dims == H2.dims() && H1.type() == H2.type() && H1.type() == CV_32F ); + for( i = 0; i < dims; i++ ) + CV_Assert( H1.size(i) == H2.size(i) ); + + const SparseMat *PH1 = &H1, *PH2 = &H2; + if( PH1->nzcount() > PH2->nzcount() ) + std::swap(PH1, PH2); + + SparseMatConstIterator it = PH1->begin(); + int N1 = (int)PH1->nzcount(), N2 = (int)PH2->nzcount(); + + if( method == CV_COMP_CHISQR ) + { + for( i = 0; i < N1; i++, ++it ) + { + float v1 = it.value(); + const SparseMat::Node* node = it.node(); + float v2 = PH2->value(node->idx, (size_t*)&node->hashval); + if( !v2 ) + result += v1; + else + { + double a = v1 - v2; + double b = v1 + v2; + if( b > FLT_EPSILON ) + result += a*a/b; + } + } + + it = PH2->begin(); + for( i = 0; i < N2; i++, ++it ) + { + float v2 = it.value(); + const SparseMat::Node* node = it.node(); + if( !PH1->find(node->idx, (size_t*)&node->hashval) ) + result += v2; + } + } + else if( method == CV_COMP_CORREL ) + { + double s1 = 0, s2 = 0, s11 = 0, s12 = 0, s22 = 0; + + for( i = 0; i < N1; i++, ++it ) + { + double v1 = it.value(); + const SparseMat::Node* node = it.node(); + s12 += v1*PH2->value(node->idx, (size_t*)&node->hashval); + s1 += v1; + s11 += v1*v1; + } + + it = PH2->begin(); + for( i = 0; i < N2; i++, ++it ) + { + double v2 = it.value(); + s2 += v2; + s22 += v2*v2; + } + + size_t total = 1; + for( i = 0; i < H1.dims(); i++ ) + total *= H1.size(i); + double scale = 1./total; + double num = s12 - s1*s2*scale; + double denom2 = (s11 - s1*s1*scale)*(s22 - s2*s2*scale); + result = std::abs(denom2) > DBL_EPSILON ? num/std::sqrt(denom2) : 1.; + } + else if( method == CV_COMP_INTERSECT ) + { + for( i = 0; i < N1; i++, ++it ) + { + float v1 = it.value(); + const SparseMat::Node* node = it.node(); + float v2 = PH2->value(node->idx, (size_t*)&node->hashval); + if( v2 ) + result += std::min(v1, v2); + } + } + else if( method == CV_COMP_BHATTACHARYYA ) + { + double s1 = 0, s2 = 0; + + for( i = 0; i < N1; i++, ++it ) + { + double v1 = it.value(); + const SparseMat::Node* node = it.node(); + double v2 = PH2->value(node->idx, (size_t*)&node->hashval); + result += std::sqrt(v1*v2); + s1 += v1; + } + + it = PH2->begin(); + for( i = 0; i < N2; i++, ++it ) + s2 += it.value(); + + s1 *= s2; + s1 = fabs(s1) > FLT_EPSILON ? 1./std::sqrt(s1) : 1.; + result = std::sqrt(std::max(1. - result*s1, 0.)); + } + else + CV_Error( CV_StsBadArg, "Unknown comparison method" ); + + return result; +} + + +const int CV_HIST_DEFAULT_TYPE = CV_32F; + +/* Creates new histogram */ +CvHistogram * +cvCreateHist( int dims, int *sizes, CvHistType type, float** ranges, int uniform ) +{ + CvHistogram *hist = 0; + + if( (unsigned)dims > CV_MAX_DIM ) + CV_Error( CV_BadOrder, "Number of dimensions is out of range" ); + + if( !sizes ) + CV_Error( CV_HeaderIsNull, "Null pointer" ); + + hist = (CvHistogram *)cvAlloc( sizeof( CvHistogram )); + hist->type = CV_HIST_MAGIC_VAL + ((int)type & 1); + if (uniform) hist->type|= CV_HIST_UNIFORM_FLAG; + hist->thresh2 = 0; + hist->bins = 0; + if( type == CV_HIST_ARRAY ) + { + hist->bins = cvInitMatNDHeader( &hist->mat, dims, sizes, + CV_HIST_DEFAULT_TYPE ); + cvCreateData( hist->bins ); + } + else if( type == CV_HIST_SPARSE ) + hist->bins = cvCreateSparseMat( dims, sizes, CV_HIST_DEFAULT_TYPE ); + else + CV_Error( CV_StsBadArg, "Invalid histogram type" ); + + if( ranges ) + cvSetHistBinRanges( hist, ranges, uniform ); + + return hist; +} + + +/* Creates histogram wrapping header for given array */ +CV_IMPL CvHistogram* +cvMakeHistHeaderForArray( int dims, int *sizes, CvHistogram *hist, + float *data, float **ranges, int uniform ) +{ + if( !hist ) + CV_Error( CV_StsNullPtr, "Null histogram header pointer" ); + + if( !data ) + CV_Error( CV_StsNullPtr, "Null data pointer" ); + + hist->thresh2 = 0; + hist->type = CV_HIST_MAGIC_VAL; + hist->bins = cvInitMatNDHeader( &hist->mat, dims, sizes, CV_HIST_DEFAULT_TYPE, data ); + + if( ranges ) + { + if( !uniform ) + CV_Error( CV_StsBadArg, "Only uniform bin ranges can be used here " + "(to avoid memory allocation)" ); + cvSetHistBinRanges( hist, ranges, uniform ); + } + + return hist; +} + + +CV_IMPL void +cvReleaseHist( CvHistogram **hist ) +{ + if( !hist ) + CV_Error( CV_StsNullPtr, "" ); + + if( *hist ) + { + CvHistogram* temp = *hist; + + if( !CV_IS_HIST(temp)) + CV_Error( CV_StsBadArg, "Invalid histogram header" ); + *hist = 0; + + if( CV_IS_SPARSE_HIST( temp )) + cvReleaseSparseMat( (CvSparseMat**)&temp->bins ); + else + { + cvReleaseData( temp->bins ); + temp->bins = 0; + } + + if( temp->thresh2 ) + cvFree( &temp->thresh2 ); + cvFree( &temp ); + } +} + +CV_IMPL void +cvClearHist( CvHistogram *hist ) +{ + if( !CV_IS_HIST(hist) ) + CV_Error( CV_StsBadArg, "Invalid histogram header" ); + cvZero( hist->bins ); +} + + +// Clears histogram bins that are below than threshold +CV_IMPL void +cvThreshHist( CvHistogram* hist, double thresh ) +{ + if( !CV_IS_HIST(hist) ) + CV_Error( CV_StsBadArg, "Invalid histogram header" ); + + if( !CV_IS_SPARSE_MAT(hist->bins) ) + { + CvMat mat; + cvGetMat( hist->bins, &mat, 0, 1 ); + cvThreshold( &mat, &mat, thresh, 0, CV_THRESH_TOZERO ); + } + else + { + CvSparseMat* mat = (CvSparseMat*)hist->bins; + CvSparseMatIterator iterator; + CvSparseNode *node; + + for( node = cvInitSparseMatIterator( mat, &iterator ); + node != 0; node = cvGetNextSparseNode( &iterator )) + { + float* val = (float*)CV_NODE_VAL( mat, node ); + if( *val <= thresh ) + *val = 0; + } + } +} + + +// Normalizes histogram (make sum of the histogram bins == factor) +CV_IMPL void +cvNormalizeHist( CvHistogram* hist, double factor ) +{ + double sum = 0; + + if( !CV_IS_HIST(hist) ) + CV_Error( CV_StsBadArg, "Invalid histogram header" ); + + if( !CV_IS_SPARSE_HIST(hist) ) + { + CvMat mat; + cvGetMat( hist->bins, &mat, 0, 1 ); + sum = cvSum( &mat ).val[0]; + if( fabs(sum) < DBL_EPSILON ) + sum = 1; + cvScale( &mat, &mat, factor/sum, 0 ); + } + else + { + CvSparseMat* mat = (CvSparseMat*)hist->bins; + CvSparseMatIterator iterator; + CvSparseNode *node; + float scale; + + for( node = cvInitSparseMatIterator( mat, &iterator ); + node != 0; node = cvGetNextSparseNode( &iterator )) + { + sum += *(float*)CV_NODE_VAL(mat,node); + } + + if( fabs(sum) < DBL_EPSILON ) + sum = 1; + scale = (float)(factor/sum); + + for( node = cvInitSparseMatIterator( mat, &iterator ); + node != 0; node = cvGetNextSparseNode( &iterator )) + { + *(float*)CV_NODE_VAL(mat,node) *= scale; + } + } +} + + +// Retrieves histogram global min, max and their positions +CV_IMPL void +cvGetMinMaxHistValue( const CvHistogram* hist, + float *value_min, float* value_max, + int* idx_min, int* idx_max ) +{ + double minVal, maxVal; + int i, dims, size[CV_MAX_DIM]; + + if( !CV_IS_HIST(hist) ) + CV_Error( CV_StsBadArg, "Invalid histogram header" ); + + dims = cvGetDims( hist->bins, size ); + + if( !CV_IS_SPARSE_HIST(hist) ) + { + CvMat mat; + CvPoint minPt, maxPt; + + cvGetMat( hist->bins, &mat, 0, 1 ); + cvMinMaxLoc( &mat, &minVal, &maxVal, &minPt, &maxPt ); + + if( dims == 1 ) + { + if( idx_min ) + *idx_min = minPt.y + minPt.x; + if( idx_max ) + *idx_max = maxPt.y + maxPt.x; + } + else if( dims == 2 ) + { + if( idx_min ) + idx_min[0] = minPt.y, idx_min[1] = minPt.x; + if( idx_max ) + idx_max[0] = maxPt.y, idx_max[1] = maxPt.x; + } + else if( idx_min || idx_max ) + { + int imin = minPt.y*mat.cols + minPt.x; + int imax = maxPt.y*mat.cols + maxPt.x; + int i; + + for( i = dims - 1; i >= 0; i-- ) + { + if( idx_min ) + { + int t = imin / size[i]; + idx_min[i] = imin - t*size[i]; + imin = t; + } + + if( idx_max ) + { + int t = imax / size[i]; + idx_max[i] = imax - t*size[i]; + imax = t; + } + } + } + } + else + { + CvSparseMat* mat = (CvSparseMat*)hist->bins; + CvSparseMatIterator iterator; + CvSparseNode *node; + int minv = INT_MAX; + int maxv = INT_MIN; + CvSparseNode* minNode = 0; + CvSparseNode* maxNode = 0; + const int *_idx_min = 0, *_idx_max = 0; + Cv32suf m; + + for( node = cvInitSparseMatIterator( mat, &iterator ); + node != 0; node = cvGetNextSparseNode( &iterator )) + { + int value = *(int*)CV_NODE_VAL(mat,node); + value = CV_TOGGLE_FLT(value); + if( value < minv ) + { + minv = value; + minNode = node; + } + + if( value > maxv ) + { + maxv = value; + maxNode = node; + } + } + + if( minNode ) + { + _idx_min = CV_NODE_IDX(mat,minNode); + _idx_max = CV_NODE_IDX(mat,maxNode); + m.i = CV_TOGGLE_FLT(minv); minVal = m.f; + m.i = CV_TOGGLE_FLT(maxv); maxVal = m.f; + } + else + { + minVal = maxVal = 0; + } + + for( i = 0; i < dims; i++ ) + { + if( idx_min ) + idx_min[i] = _idx_min ? _idx_min[i] : -1; + if( idx_max ) + idx_max[i] = _idx_max ? _idx_max[i] : -1; + } + } + + if( value_min ) + *value_min = (float)minVal; + + if( value_max ) + *value_max = (float)maxVal; +} + + +// Compares two histograms using one of a few methods +CV_IMPL double +cvCompareHist( const CvHistogram* hist1, + const CvHistogram* hist2, + int method ) +{ + int i; + int size1[CV_MAX_DIM], size2[CV_MAX_DIM], total = 1; + + if( !CV_IS_HIST(hist1) || !CV_IS_HIST(hist2) ) + CV_Error( CV_StsBadArg, "Invalid histogram header[s]" ); + + if( CV_IS_SPARSE_MAT(hist1->bins) != CV_IS_SPARSE_MAT(hist2->bins)) + CV_Error(CV_StsUnmatchedFormats, "One of histograms is sparse and other is not"); + + if( !CV_IS_SPARSE_MAT(hist1->bins) ) + { + cv::Mat H1((const CvMatND*)hist1->bins), H2((const CvMatND*)hist2->bins); + return cv::compareHist(H1, H2, method); + } + + int dims1 = cvGetDims( hist1->bins, size1 ); + int dims2 = cvGetDims( hist2->bins, size2 ); + + if( dims1 != dims2 ) + CV_Error( CV_StsUnmatchedSizes, + "The histograms have different numbers of dimensions" ); + + for( i = 0; i < dims1; i++ ) + { + if( size1[i] != size2[i] ) + CV_Error( CV_StsUnmatchedSizes, "The histograms have different sizes" ); + total *= size1[i]; + } + + double result = 0; + CvSparseMat* mat1 = (CvSparseMat*)(hist1->bins); + CvSparseMat* mat2 = (CvSparseMat*)(hist2->bins); + CvSparseMatIterator iterator; + CvSparseNode *node1, *node2; + + if( mat1->heap->active_count > mat2->heap->active_count ) + { + CvSparseMat* t; + CV_SWAP( mat1, mat2, t ); + } + + if( method == CV_COMP_CHISQR ) + { + for( node1 = cvInitSparseMatIterator( mat1, &iterator ); + node1 != 0; node1 = cvGetNextSparseNode( &iterator )) + { + double v1 = *(float*)CV_NODE_VAL(mat1,node1); + uchar* node2_data = cvPtrND( mat2, CV_NODE_IDX(mat1,node1), 0, 0, &node1->hashval ); + if( !node2_data ) + result += v1; + else + { + double v2 = *(float*)node2_data; + double a = v1 - v2; + double b = v1 + v2; + if( fabs(b) > DBL_EPSILON ) + result += a*a/b; + } + } + + for( node2 = cvInitSparseMatIterator( mat2, &iterator ); + node2 != 0; node2 = cvGetNextSparseNode( &iterator )) + { + double v2 = *(float*)CV_NODE_VAL(mat2,node2); + if( !cvPtrND( mat1, CV_NODE_IDX(mat2,node2), 0, 0, &node2->hashval )) + result += v2; + } + } + else if( method == CV_COMP_CORREL ) + { + double s1 = 0, s11 = 0; + double s2 = 0, s22 = 0; + double s12 = 0; + double num, denom2, scale = 1./total; + + for( node1 = cvInitSparseMatIterator( mat1, &iterator ); + node1 != 0; node1 = cvGetNextSparseNode( &iterator )) + { + double v1 = *(float*)CV_NODE_VAL(mat1,node1); + uchar* node2_data = cvPtrND( mat2, CV_NODE_IDX(mat1,node1), + 0, 0, &node1->hashval ); + if( node2_data ) + { + double v2 = *(float*)node2_data; + s12 += v1*v2; + } + s1 += v1; + s11 += v1*v1; + } + + for( node2 = cvInitSparseMatIterator( mat2, &iterator ); + node2 != 0; node2 = cvGetNextSparseNode( &iterator )) + { + double v2 = *(float*)CV_NODE_VAL(mat2,node2); + s2 += v2; + s22 += v2*v2; + } + + num = s12 - s1*s2*scale; + denom2 = (s11 - s1*s1*scale)*(s22 - s2*s2*scale); + result = fabs(denom2) > DBL_EPSILON ? num/sqrt(denom2) : 1; + } + else if( method == CV_COMP_INTERSECT ) + { + for( node1 = cvInitSparseMatIterator( mat1, &iterator ); + node1 != 0; node1 = cvGetNextSparseNode( &iterator )) + { + float v1 = *(float*)CV_NODE_VAL(mat1,node1); + uchar* node2_data = cvPtrND( mat2, CV_NODE_IDX(mat1,node1), + 0, 0, &node1->hashval ); + if( node2_data ) + { + float v2 = *(float*)node2_data; + if( v1 <= v2 ) + result += v1; + else + result += v2; + } + } + } + else if( method == CV_COMP_BHATTACHARYYA ) + { + double s1 = 0, s2 = 0; + + for( node1 = cvInitSparseMatIterator( mat1, &iterator ); + node1 != 0; node1 = cvGetNextSparseNode( &iterator )) + { + double v1 = *(float*)CV_NODE_VAL(mat1,node1); + uchar* node2_data = cvPtrND( mat2, CV_NODE_IDX(mat1,node1), + 0, 0, &node1->hashval ); + s1 += v1; + if( node2_data ) + { + double v2 = *(float*)node2_data; + result += sqrt(v1 * v2); + } + } + + for( node1 = cvInitSparseMatIterator( mat2, &iterator ); + node1 != 0; node1 = cvGetNextSparseNode( &iterator )) + { + double v2 = *(float*)CV_NODE_VAL(mat2,node1); + s2 += v2; + } + + s1 *= s2; + s1 = fabs(s1) > FLT_EPSILON ? 1./sqrt(s1) : 1.; + result = 1. - result*s1; + result = sqrt(MAX(result,0.)); + } + else + CV_Error( CV_StsBadArg, "Unknown comparison method" ); + + return result; +} + +// copies one histogram to another +CV_IMPL void +cvCopyHist( const CvHistogram* src, CvHistogram** _dst ) +{ + int eq = 0; + int is_sparse; + int i, dims1, dims2; + int size1[CV_MAX_DIM], size2[CV_MAX_DIM], total = 1; + float* ranges[CV_MAX_DIM]; + float** thresh = 0; + CvHistogram* dst; + + if( !_dst ) + CV_Error( CV_StsNullPtr, "Destination double pointer is NULL" ); + + dst = *_dst; + + if( !CV_IS_HIST(src) || (dst && !CV_IS_HIST(dst)) ) + CV_Error( CV_StsBadArg, "Invalid histogram header[s]" ); + + is_sparse = CV_IS_SPARSE_MAT(src->bins); + dims1 = cvGetDims( src->bins, size1 ); + for( i = 0; i < dims1; i++ ) + total *= size1[i]; + + if( dst && is_sparse == CV_IS_SPARSE_MAT(dst->bins)) + { + dims2 = cvGetDims( dst->bins, size2 ); + + if( dims1 == dims2 ) + { + for( i = 0; i < dims1; i++ ) + if( size1[i] != size2[i] ) + break; + } + + eq = i == dims1; + } + + if( !eq ) + { + cvReleaseHist( _dst ); + dst = cvCreateHist( dims1, size1, !is_sparse ? CV_HIST_ARRAY : CV_HIST_SPARSE, 0, 0 ); + *_dst = dst; + } + + if( CV_HIST_HAS_RANGES( src )) + { + if( CV_IS_UNIFORM_HIST( src )) + { + for( i = 0; i < dims1; i++ ) + ranges[i] = (float*)src->thresh[i]; + thresh = ranges; + } + else + thresh = src->thresh2; + cvSetHistBinRanges( dst, thresh, CV_IS_UNIFORM_HIST(src)); + } + + cvCopy( src->bins, dst->bins ); +} + + +// Sets a value range for every histogram bin +CV_IMPL void +cvSetHistBinRanges( CvHistogram* hist, float** ranges, int uniform ) +{ + int dims, size[CV_MAX_DIM], total = 0; + int i, j; + + if( !ranges ) + CV_Error( CV_StsNullPtr, "NULL ranges pointer" ); + + if( !CV_IS_HIST(hist) ) + CV_Error( CV_StsBadArg, "Invalid histogram header" ); + + dims = cvGetDims( hist->bins, size ); + for( i = 0; i < dims; i++ ) + total += size[i]+1; + + if( uniform ) + { + for( i = 0; i < dims; i++ ) + { + if( !ranges[i] ) + CV_Error( CV_StsNullPtr, "One of elements is NULL" ); + hist->thresh[i][0] = ranges[i][0]; + hist->thresh[i][1] = ranges[i][1]; + } + + hist->type |= CV_HIST_UNIFORM_FLAG + CV_HIST_RANGES_FLAG; + } + else + { + float* dim_ranges; + + if( !hist->thresh2 ) + { + hist->thresh2 = (float**)cvAlloc( + dims*sizeof(hist->thresh2[0])+ + total*sizeof(hist->thresh2[0][0])); + } + dim_ranges = (float*)(hist->thresh2 + dims); + + for( i = 0; i < dims; i++ ) + { + float val0 = -FLT_MAX; + + if( !ranges[i] ) + CV_Error( CV_StsNullPtr, "One of elements is NULL" ); + + for( j = 0; j <= size[i]; j++ ) + { + float val = ranges[i][j]; + if( val <= val0 ) + CV_Error(CV_StsOutOfRange, "Bin ranges should go in ascenting order"); + val0 = dim_ranges[j] = val; + } + + hist->thresh2[i] = dim_ranges; + dim_ranges += size[i] + 1; + } + + hist->type |= CV_HIST_RANGES_FLAG; + hist->type &= ~CV_HIST_UNIFORM_FLAG; + } +} + + +CV_IMPL void +cvCalcArrHist( CvArr** img, CvHistogram* hist, int accumulate, const CvArr* mask ) +{ + if( !CV_IS_HIST(hist)) + CV_Error( CV_StsBadArg, "Bad histogram pointer" ); + + if( !img ) + CV_Error( CV_StsNullPtr, "Null double array pointer" ); + + int size[CV_MAX_DIM]; + int i, dims = cvGetDims( hist->bins, size); + bool uniform = CV_IS_UNIFORM_HIST(hist); + + cv::vector images(dims); + for( i = 0; i < dims; i++ ) + images[i] = cv::cvarrToMat(img[i]); + + cv::Mat _mask; + if( mask ) + _mask = cv::cvarrToMat(mask); + + const float* uranges[CV_MAX_DIM] = {0}; + const float** ranges = 0; + + if( hist->type & CV_HIST_RANGES_FLAG ) + { + ranges = (const float**)hist->thresh2; + if( uniform ) + { + for( i = 0; i < dims; i++ ) + uranges[i] = &hist->thresh[i][0]; + ranges = uranges; + } + } + + if( !CV_IS_SPARSE_HIST(hist) ) + { + cv::Mat H((const CvMatND*)hist->bins); + cv::calcHist( &images[0], (int)images.size(), 0, _mask, + H, cvGetDims(hist->bins), H.size, ranges, uniform, accumulate != 0 ); + } + else + { + CvSparseMat* sparsemat = (CvSparseMat*)hist->bins; + + if( !accumulate ) + cvZero( hist->bins ); + cv::SparseMat sH(sparsemat); + cv::calcHist( &images[0], (int)images.size(), 0, _mask, sH, sH.dims(), + sH.dims() > 0 ? sH.hdr->size : 0, ranges, uniform, accumulate != 0, true ); + + if( accumulate ) + cvZero( sparsemat ); + + cv::SparseMatConstIterator it = sH.begin(); + int nz = (int)sH.nzcount(); + for( i = 0; i < nz; i++, ++it ) + *(float*)cvPtrND(sparsemat, it.node()->idx, 0, -2) = (float)*(const int*)it.ptr; + } +} + + +CV_IMPL void +cvCalcArrBackProject( CvArr** img, CvArr* dst, const CvHistogram* hist ) +{ + if( !CV_IS_HIST(hist)) + CV_Error( CV_StsBadArg, "Bad histogram pointer" ); + + if( !img ) + CV_Error( CV_StsNullPtr, "Null double array pointer" ); + + int size[CV_MAX_DIM]; + int i, dims = cvGetDims( hist->bins, size ); + + bool uniform = CV_IS_UNIFORM_HIST(hist); + const float* uranges[CV_MAX_DIM] = {0}; + const float** ranges = 0; + + if( hist->type & CV_HIST_RANGES_FLAG ) + { + ranges = (const float**)hist->thresh2; + if( uniform ) + { + for( i = 0; i < dims; i++ ) + uranges[i] = &hist->thresh[i][0]; + ranges = uranges; + } + } + + cv::vector images(dims); + for( i = 0; i < dims; i++ ) + images[i] = cv::cvarrToMat(img[i]); + + cv::Mat _dst = cv::cvarrToMat(dst); + + CV_Assert( _dst.size() == images[0].size() && _dst.depth() == images[0].depth() ); + + if( !CV_IS_SPARSE_HIST(hist) ) + { + cv::Mat H((const CvMatND*)hist->bins); + cv::calcBackProject( &images[0], (int)images.size(), + 0, H, _dst, ranges, 1, uniform ); + } + else + { + cv::SparseMat sH((const CvSparseMat*)hist->bins); + cv::calcBackProject( &images[0], (int)images.size(), + 0, sH, _dst, ranges, 1, uniform ); + } +} + + +////////////////////// B A C K P R O J E C T P A T C H ///////////////////////// + +CV_IMPL void +cvCalcArrBackProjectPatch( CvArr** arr, CvArr* dst, CvSize patch_size, CvHistogram* hist, + int method, double norm_factor ) +{ + CvHistogram* model = 0; + + IplImage imgstub[CV_MAX_DIM], *img[CV_MAX_DIM]; + IplROI roi; + CvMat dststub, *dstmat; + int i, dims; + int x, y; + CvSize size; + + if( !CV_IS_HIST(hist)) + CV_Error( CV_StsBadArg, "Bad histogram pointer" ); + + if( !arr ) + CV_Error( CV_StsNullPtr, "Null double array pointer" ); + + if( norm_factor <= 0 ) + CV_Error( CV_StsOutOfRange, + "Bad normalization factor (set it to 1.0 if unsure)" ); + + if( patch_size.width <= 0 || patch_size.height <= 0 ) + CV_Error( CV_StsBadSize, "The patch width and height must be positive" ); + + dims = cvGetDims( hist->bins ); + cvNormalizeHist( hist, norm_factor ); + + for( i = 0; i < dims; i++ ) + { + CvMat stub, *mat; + mat = cvGetMat( arr[i], &stub, 0, 0 ); + img[i] = cvGetImage( mat, &imgstub[i] ); + img[i]->roi = &roi; + } + + dstmat = cvGetMat( dst, &dststub, 0, 0 ); + if( CV_MAT_TYPE( dstmat->type ) != CV_32FC1 ) + CV_Error( CV_StsUnsupportedFormat, "Resultant image must have 32fC1 type" ); + + if( dstmat->cols != img[0]->width - patch_size.width + 1 || + dstmat->rows != img[0]->height - patch_size.height + 1 ) + CV_Error( CV_StsUnmatchedSizes, + "The output map must be (W-w+1 x H-h+1), " + "where the input images are (W x H) each and the patch is (w x h)" ); + + cvCopyHist( hist, &model ); + + size = cvGetMatSize(dstmat); + roi.coi = 0; + roi.width = patch_size.width; + roi.height = patch_size.height; + + for( y = 0; y < size.height; y++ ) + { + for( x = 0; x < size.width; x++ ) + { + double result; + roi.xOffset = x; + roi.yOffset = y; + + cvCalcHist( img, model ); + cvNormalizeHist( model, norm_factor ); + result = cvCompareHist( model, hist, method ); + CV_MAT_ELEM( *dstmat, float, y, x ) = (float)result; + } + } + + cvReleaseHist( &model ); +} + + +// Calculates Bayes probabilistic histograms +CV_IMPL void +cvCalcBayesianProb( CvHistogram** src, int count, CvHistogram** dst ) +{ + int i; + + if( !src || !dst ) + CV_Error( CV_StsNullPtr, "NULL histogram array pointer" ); + + if( count < 2 ) + CV_Error( CV_StsOutOfRange, "Too small number of histograms" ); + + for( i = 0; i < count; i++ ) + { + if( !CV_IS_HIST(src[i]) || !CV_IS_HIST(dst[i]) ) + CV_Error( CV_StsBadArg, "Invalid histogram header" ); + + if( !CV_IS_MATND(src[i]->bins) || !CV_IS_MATND(dst[i]->bins) ) + CV_Error( CV_StsBadArg, "The function supports dense histograms only" ); + } + + cvZero( dst[0]->bins ); + // dst[0] = src[0] + ... + src[count-1] + for( i = 0; i < count; i++ ) + cvAdd( src[i]->bins, dst[0]->bins, dst[0]->bins ); + + cvDiv( 0, dst[0]->bins, dst[0]->bins ); + + // dst[i] = src[i]*(1/dst[0]) + for( i = count - 1; i >= 0; i-- ) + cvMul( src[i]->bins, dst[0]->bins, dst[i]->bins ); +} + + +CV_IMPL void +cvCalcProbDensity( const CvHistogram* hist, const CvHistogram* hist_mask, + CvHistogram* hist_dens, double scale ) +{ + if( scale <= 0 ) + CV_Error( CV_StsOutOfRange, "scale must be positive" ); + + if( !CV_IS_HIST(hist) || !CV_IS_HIST(hist_mask) || !CV_IS_HIST(hist_dens) ) + CV_Error( CV_StsBadArg, "Invalid histogram pointer[s]" ); + + { + CvArr* arrs[] = { hist->bins, hist_mask->bins, hist_dens->bins }; + CvMatND stubs[3]; + CvNArrayIterator iterator; + + cvInitNArrayIterator( 3, arrs, 0, stubs, &iterator ); + + if( CV_MAT_TYPE(iterator.hdr[0]->type) != CV_32FC1 ) + CV_Error( CV_StsUnsupportedFormat, "All histograms must have 32fC1 type" ); + + do + { + const float* srcdata = (const float*)(iterator.ptr[0]); + const float* maskdata = (const float*)(iterator.ptr[1]); + float* dstdata = (float*)(iterator.ptr[2]); + int i; + + for( i = 0; i < iterator.size.width; i++ ) + { + float s = srcdata[i]; + float m = maskdata[i]; + if( s > FLT_EPSILON ) + if( m <= s ) + dstdata[i] = (float)(m*scale/s); + else + dstdata[i] = (float)scale; + else + dstdata[i] = (float)0; + } + } + while( cvNextNArraySlice( &iterator )); + } +} + + +CV_IMPL void cvEqualizeHist( const CvArr* srcarr, CvArr* dstarr ) +{ + CvMat sstub, *src = cvGetMat(srcarr, &sstub); + CvMat dstub, *dst = cvGetMat(dstarr, &dstub); + + CV_Assert( CV_ARE_SIZES_EQ(src, dst) && CV_ARE_TYPES_EQ(src, dst) && + CV_MAT_TYPE(src->type) == CV_8UC1 ); + CvSize size = cvGetMatSize(src); + if( CV_IS_MAT_CONT(src->type & dst->type) ) + { + size.width *= size.height; + size.height = 1; + } + int x, y; + const int hist_sz = 256; + int hist[hist_sz]; + memset(hist, 0, sizeof(hist)); + + for( y = 0; y < size.height; y++ ) + { + const uchar* sptr = src->data.ptr + src->step*y; + for( x = 0; x < size.width; x++ ) + hist[sptr[x]]++; + } + + float scale = 255.f/(size.width*size.height); + int sum = 0; + uchar lut[hist_sz+1]; + + for( int i = 0; i < hist_sz; i++ ) + { + sum += hist[i]; + int val = cvRound(sum*scale); + lut[i] = CV_CAST_8U(val); + } + + lut[0] = 0; + for( y = 0; y < size.height; y++ ) + { + const uchar* sptr = src->data.ptr + src->step*y; + uchar* dptr = dst->data.ptr + dst->step*y; + for( x = 0; x < size.width; x++ ) + dptr[x] = lut[sptr[x]]; + } +} + + +void cv::equalizeHist( InputArray _src, OutputArray _dst ) +{ + Mat src = _src.getMat(); + _dst.create( src.size(), src.type() ); + Mat dst = _dst.getMat(); + CvMat _csrc = src, _cdst = dst; + cvEqualizeHist( &_csrc, &_cdst ); +} + +/* Implementation of RTTI and Generic Functions for CvHistogram */ +#define CV_TYPE_NAME_HIST "opencv-hist" + +static int icvIsHist( const void * ptr ) +{ + return CV_IS_HIST( ((CvHistogram*)ptr) ); +} + +static CvHistogram * icvCloneHist( const CvHistogram * src ) +{ + CvHistogram * dst=NULL; + cvCopyHist(src, &dst); + return dst; +} + +static void *icvReadHist( CvFileStorage * fs, CvFileNode * node ) +{ + CvHistogram * h = 0; + int type = 0; + int is_uniform = 0; + int have_ranges = 0; + + h = (CvHistogram *)cvAlloc( sizeof(CvHistogram) ); + + type = cvReadIntByName( fs, node, "type", 0 ); + is_uniform = cvReadIntByName( fs, node, "is_uniform", 0 ); + have_ranges = cvReadIntByName( fs, node, "have_ranges", 0 ); + h->type = CV_HIST_MAGIC_VAL | type | + (is_uniform ? CV_HIST_UNIFORM_FLAG : 0) | + (have_ranges ? CV_HIST_RANGES_FLAG : 0); + + if(type == CV_HIST_ARRAY) + { + // read histogram bins + CvMatND* mat = (CvMatND*)cvReadByName( fs, node, "mat" ); + int i, sizes[CV_MAX_DIM]; + + if(!CV_IS_MATND(mat)) + CV_Error( CV_StsError, "Expected CvMatND"); + + for(i=0; idims; i++) + sizes[i] = mat->dim[i].size; + + cvInitMatNDHeader( &(h->mat), mat->dims, sizes, mat->type, mat->data.ptr ); + h->bins = &(h->mat); + + // take ownership of refcount pointer as well + h->mat.refcount = mat->refcount; + + // increase refcount so freeing temp header doesn't free data + cvIncRefData( mat ); + + // free temporary header + cvReleaseMatND( &mat ); + } + else + { + h->bins = cvReadByName( fs, node, "bins" ); + if(!CV_IS_SPARSE_MAT(h->bins)){ + CV_Error( CV_StsError, "Unknown Histogram type"); + } + } + + // read thresholds + if(have_ranges) + { + int i, dims, size[CV_MAX_DIM], total = 0; + CvSeqReader reader; + CvFileNode * thresh_node; + + dims = cvGetDims( h->bins, size ); + for( i = 0; i < dims; i++ ) + total += size[i]+1; + + thresh_node = cvGetFileNodeByName( fs, node, "thresh" ); + if(!thresh_node) + CV_Error( CV_StsError, "'thresh' node is missing"); + cvStartReadRawData( fs, thresh_node, &reader ); + + if(is_uniform) + { + for(i=0; ithresh[i], "f" ); + h->thresh2 = NULL; + } + else + { + float* dim_ranges; + h->thresh2 = (float**)cvAlloc( + dims*sizeof(h->thresh2[0])+ + total*sizeof(h->thresh2[0][0])); + dim_ranges = (float*)(h->thresh2 + dims); + for(i=0; i < dims; i++) + { + h->thresh2[i] = dim_ranges; + cvReadRawDataSlice( fs, &reader, size[i]+1, dim_ranges, "f" ); + dim_ranges += size[i] + 1; + } + } + } + + return h; +} + +static void icvWriteHist( CvFileStorage* fs, const char* name, + const void* struct_ptr, CvAttrList /*attributes*/ ) +{ + const CvHistogram * hist = (const CvHistogram *) struct_ptr; + int sizes[CV_MAX_DIM]; + int dims; + int i; + int is_uniform, have_ranges; + + cvStartWriteStruct( fs, name, CV_NODE_MAP, CV_TYPE_NAME_HIST ); + + is_uniform = (CV_IS_UNIFORM_HIST(hist) ? 1 : 0); + have_ranges = (hist->type & CV_HIST_RANGES_FLAG ? 1 : 0); + + cvWriteInt( fs, "type", (hist->type & 1) ); + cvWriteInt( fs, "is_uniform", is_uniform ); + cvWriteInt( fs, "have_ranges", have_ranges ); + if(!CV_IS_SPARSE_HIST(hist)) + cvWrite( fs, "mat", &(hist->mat) ); + else + cvWrite( fs, "bins", hist->bins ); + + // write thresholds + if(have_ranges){ + dims = cvGetDims( hist->bins, sizes ); + cvStartWriteStruct( fs, "thresh", CV_NODE_SEQ + CV_NODE_FLOW ); + if(is_uniform){ + for(i=0; ithresh[i], 2, "f" ); + } + } + else{ + for(i=0; ithresh2[i], sizes[i]+1, "f" ); + } + } + cvEndWriteStruct( fs ); + } + + cvEndWriteStruct( fs ); +} + + +CvType hist_type( CV_TYPE_NAME_HIST, icvIsHist, (CvReleaseFunc)cvReleaseHist, + icvReadHist, icvWriteHist, (CvCloneFunc)icvCloneHist ); + +/* End of file. */ + diff --git a/opencv/imgproc/hough.cpp b/opencv/imgproc/hough.cpp new file mode 100644 index 0000000..54d1f44 --- /dev/null +++ b/opencv/imgproc/hough.cpp @@ -0,0 +1,1145 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include "_list.h" + +#define halfPi ((float)(CV_PI*0.5)) +#define Pi ((float)CV_PI) +#define a0 0 /*-4.172325e-7f*/ /*(-(float)0x7)/((float)0x1000000); */ +#define a1 1.000025f /*((float)0x1922253)/((float)0x1000000)*2/Pi; */ +#define a2 -2.652905e-4f /*(-(float)0x2ae6)/((float)0x1000000)*4/(Pi*Pi); */ +#define a3 -0.165624f /*(-(float)0xa45511)/((float)0x1000000)*8/(Pi*Pi*Pi); */ +#define a4 -1.964532e-3f /*(-(float)0x30fd3)/((float)0x1000000)*16/(Pi*Pi*Pi*Pi); */ +#define a5 1.02575e-2f /*((float)0x191cac)/((float)0x1000000)*32/(Pi*Pi*Pi*Pi*Pi); */ +#define a6 -9.580378e-4f /*(-(float)0x3af27)/((float)0x1000000)*64/(Pi*Pi*Pi*Pi*Pi*Pi); */ + +#define _sin(x) ((((((a6*(x) + a5)*(x) + a4)*(x) + a3)*(x) + a2)*(x) + a1)*(x) + a0) +#define _cos(x) _sin(halfPi - (x)) + +/****************************************************************************************\ +* Classical Hough Transform * +\****************************************************************************************/ + +typedef struct CvLinePolar +{ + float rho; + float angle; +} +CvLinePolar; + +/*=====================================================================================*/ + +#define hough_cmp_gt(l1,l2) (aux[l1] > aux[l2]) + +static CV_IMPLEMENT_QSORT_EX( icvHoughSortDescent32s, int, hough_cmp_gt, const int* ) + +/* +Here image is an input raster; +step is it's step; size characterizes it's ROI; +rho and theta are discretization steps (in pixels and radians correspondingly). +threshold is the minimum number of pixels in the feature for it +to be a candidate for line. lines is the output +array of (rho, theta) pairs. linesMax is the buffer size (number of pairs). +Functions return the actual number of found lines. +*/ +static void +icvHoughLinesStandard( const CvMat* img, float rho, float theta, + int threshold, CvSeq *lines, int linesMax ) +{ + cv::AutoBuffer _accum, _sort_buf; + cv::AutoBuffer _tabSin, _tabCos; + + const uchar* image; + int step, width, height; + int numangle, numrho; + int total = 0; + float ang; + int r, n; + int i, j; + float irho = 1 / rho; + double scale; + + CV_Assert( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 ); + + image = img->data.ptr; + step = img->step; + width = img->cols; + height = img->rows; + + numangle = cvRound(CV_PI / theta); + numrho = cvRound(((width + height) * 2 + 1) / rho); + + _accum.allocate((numangle+2) * (numrho+2)); + _sort_buf.allocate(numangle * numrho); + _tabSin.allocate(numangle); + _tabCos.allocate(numangle); + int *accum = _accum, *sort_buf = _sort_buf; + float *tabSin = _tabSin, *tabCos = _tabCos; + + memset( accum, 0, sizeof(accum[0]) * (numangle+2) * (numrho+2) ); + + for( ang = 0, n = 0; n < numangle; ang += theta, n++ ) + { + tabSin[n] = (float)(sin(ang) * irho); + tabCos[n] = (float)(cos(ang) * irho); + } + + // stage 1. fill accumulator + for( i = 0; i < height; i++ ) + for( j = 0; j < width; j++ ) + { + if( image[i * step + j] != 0 ) + for( n = 0; n < numangle; n++ ) + { + r = cvRound( j * tabCos[n] + i * tabSin[n] ); + r += (numrho - 1) / 2; + accum[(n+1) * (numrho+2) + r+1]++; + } + } + + // stage 2. find local maximums + for( r = 0; r < numrho; r++ ) + for( n = 0; n < numangle; n++ ) + { + int base = (n+1) * (numrho+2) + r+1; + if( accum[base] > threshold && + accum[base] > accum[base - 1] && accum[base] >= accum[base + 1] && + accum[base] > accum[base - numrho - 2] && accum[base] >= accum[base + numrho + 2] ) + sort_buf[total++] = base; + } + + // stage 3. sort the detected lines by accumulator value + icvHoughSortDescent32s( sort_buf, total, accum ); + + // stage 4. store the first min(total,linesMax) lines to the output buffer + linesMax = MIN(linesMax, total); + scale = 1./(numrho+2); + for( i = 0; i < linesMax; i++ ) + { + CvLinePolar line; + int idx = sort_buf[i]; + int n = cvFloor(idx*scale) - 1; + int r = idx - (n+1)*(numrho+2) - 1; + line.rho = (r - (numrho - 1)*0.5f) * rho; + line.angle = n * theta; + cvSeqPush( lines, &line ); + } +} + + +/****************************************************************************************\ +* Multi-Scale variant of Classical Hough Transform * +\****************************************************************************************/ + +#if defined _MSC_VER && _MSC_VER >= 1200 +#pragma warning( disable: 4714 ) +#endif + +//DECLARE_AND_IMPLEMENT_LIST( _index, h_ ); +IMPLEMENT_LIST( _index, h_ ) + +static void +icvHoughLinesSDiv( const CvMat* img, + float rho, float theta, int threshold, + int srn, int stn, + CvSeq* lines, int linesMax ) +{ + std::vector _caccum, _buffer; + std::vector _sinTable; + std::vector _x, _y; + float* sinTable; + int *x, *y; + uchar *caccum, *buffer; + _CVLIST* list = 0; + +#define _POINT(row, column)\ + (image_src[(row)*step+(column)]) + + uchar *mcaccum = 0; + int rn, tn; /* number of rho and theta discrete values */ + int index, i; + int ri, ti, ti1, ti0; + int row, col; + float r, t; /* Current rho and theta */ + float rv; /* Some temporary rho value */ + float irho; + float itheta; + float srho, stheta; + float isrho, istheta; + + const uchar* image_src; + int w, h, step; + int fn = 0; + float xc, yc; + + const float d2r = (float)(Pi / 180); + int sfn = srn * stn; + int fi; + int count; + int cmax = 0; + + CVPOS pos; + _index *pindex; + _index vi; + + CV_Assert( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 ); + CV_Assert( linesMax > 0 && rho > 0 && theta > 0 ); + + threshold = MIN( threshold, 255 ); + + image_src = img->data.ptr; + step = img->step; + w = img->cols; + h = img->rows; + + irho = 1 / rho; + itheta = 1 / theta; + srho = rho / srn; + stheta = theta / stn; + isrho = 1 / srho; + istheta = 1 / stheta; + + rn = cvFloor( sqrt( (double)w * w + (double)h * h ) * irho ); + tn = cvFloor( 2 * Pi * itheta ); + + list = h_create_list__index( linesMax < 1000 ? linesMax : 1000 ); + vi.value = threshold; + vi.rho = -1; + h_add_head__index( list, &vi ); + + /* Precalculating sin */ + _sinTable.resize( 5 * tn * stn ); + sinTable = &_sinTable[0]; + + for( index = 0; index < 5 * tn * stn; index++ ) + sinTable[index] = (float)cos( stheta * index * 0.2f ); + + _caccum.resize(rn * tn); + caccum = &_caccum[0]; + memset( caccum, 0, rn * tn * sizeof( caccum[0] )); + + /* Counting all feature pixels */ + for( row = 0; row < h; row++ ) + for( col = 0; col < w; col++ ) + fn += _POINT( row, col ) != 0; + + _x.resize(fn); + _y.resize(fn); + x = &_x[0]; + y = &_y[0]; + + /* Full Hough Transform (it's accumulator update part) */ + fi = 0; + for( row = 0; row < h; row++ ) + { + for( col = 0; col < w; col++ ) + { + if( _POINT( row, col )) + { + int halftn; + float r0; + float scale_factor; + int iprev = -1; + float phi, phi1; + float theta_it; /* Value of theta for iterating */ + + /* Remember the feature point */ + x[fi] = col; + y[fi] = row; + fi++; + + yc = (float) row + 0.5f; + xc = (float) col + 0.5f; + + /* Update the accumulator */ + t = (float) fabs( cvFastArctan( yc, xc ) * d2r ); + r = (float) sqrt( (double)xc * xc + (double)yc * yc ); + r0 = r * irho; + ti0 = cvFloor( (t + Pi / 2) * itheta ); + + caccum[ti0]++; + + theta_it = rho / r; + theta_it = theta_it < theta ? theta_it : theta; + scale_factor = theta_it * itheta; + halftn = cvFloor( Pi / theta_it ); + for( ti1 = 1, phi = theta_it - halfPi, phi1 = (theta_it + t) * itheta; + ti1 < halftn; ti1++, phi += theta_it, phi1 += scale_factor ) + { + rv = r0 * _cos( phi ); + i = cvFloor( rv ) * tn; + i += cvFloor( phi1 ); + assert( i >= 0 ); + assert( i < rn * tn ); + caccum[i] = (uchar) (caccum[i] + ((i ^ iprev) != 0)); + iprev = i; + if( cmax < caccum[i] ) + cmax = caccum[i]; + } + } + } + } + + /* Starting additional analysis */ + count = 0; + for( ri = 0; ri < rn; ri++ ) + { + for( ti = 0; ti < tn; ti++ ) + { + if( caccum[ri * tn + ti] > threshold ) + { + count++; + } + } + } + + if( count * 100 > rn * tn ) + { + icvHoughLinesStandard( img, rho, theta, threshold, lines, linesMax ); + return; + } + + _buffer.resize(srn * stn + 2); + buffer = &_buffer[0]; + mcaccum = buffer + 1; + + count = 0; + for( ri = 0; ri < rn; ri++ ) + { + for( ti = 0; ti < tn; ti++ ) + { + if( caccum[ri * tn + ti] > threshold ) + { + count++; + memset( mcaccum, 0, sfn * sizeof( uchar )); + + for( index = 0; index < fn; index++ ) + { + int ti2; + float r0; + + yc = (float) y[index] + 0.5f; + xc = (float) x[index] + 0.5f; + + /* Update the accumulator */ + t = (float) fabs( cvFastArctan( yc, xc ) * d2r ); + r = (float) sqrt( (double)xc * xc + (double)yc * yc ) * isrho; + ti0 = cvFloor( (t + Pi * 0.5f) * istheta ); + ti2 = (ti * stn - ti0) * 5; + r0 = (float) ri *srn; + + for( ti1 = 0 /*, phi = ti*theta - Pi/2 - t */ ; ti1 < stn; ti1++, ti2 += 5 + /*phi += stheta */ ) + { + /*rv = r*_cos(phi) - r0; */ + rv = r * sinTable[(int) (abs( ti2 ))] - r0; + i = cvFloor( rv ) * stn + ti1; + + i = CV_IMAX( i, -1 ); + i = CV_IMIN( i, sfn ); + mcaccum[i]++; + assert( i >= -1 ); + assert( i <= sfn ); + } + } + + /* Find peaks in maccum... */ + for( index = 0; index < sfn; index++ ) + { + i = 0; + pos = h_get_tail_pos__index( list ); + if( h_get_prev__index( &pos )->value < mcaccum[index] ) + { + vi.value = mcaccum[index]; + vi.rho = index / stn * srho + ri * rho; + vi.theta = index % stn * stheta + ti * theta - halfPi; + while( h_is_pos__index( pos )) + { + if( h_get__index( pos )->value > mcaccum[index] ) + { + h_insert_after__index( list, pos, &vi ); + if( h_get_count__index( list ) > linesMax ) + { + h_remove_tail__index( list ); + } + break; + } + h_get_prev__index( &pos ); + } + if( !h_is_pos__index( pos )) + { + h_add_head__index( list, &vi ); + if( h_get_count__index( list ) > linesMax ) + { + h_remove_tail__index( list ); + } + } + } + } + } + } + } + + pos = h_get_head_pos__index( list ); + if( h_get_count__index( list ) == 1 ) + { + if( h_get__index( pos )->rho < 0 ) + { + h_clear_list__index( list ); + } + } + else + { + while( h_is_pos__index( pos )) + { + CvLinePolar line; + pindex = h_get__index( pos ); + if( pindex->rho < 0 ) + { + /* This should be the last element... */ + h_get_next__index( &pos ); + assert( !h_is_pos__index( pos )); + break; + } + line.rho = pindex->rho; + line.angle = pindex->theta; + cvSeqPush( lines, &line ); + + if( lines->total >= linesMax ) + break; + h_get_next__index( &pos ); + } + } + + h_destroy_list__index(list); +} + + +/****************************************************************************************\ +* Probabilistic Hough Transform * +\****************************************************************************************/ + +static void +icvHoughLinesProbabalistic( CvMat* image, + float rho, float theta, int threshold, + int lineLength, int lineGap, + CvSeq *lines, int linesMax ) +{ + cv::Mat accum, mask; + cv::vector trigtab; + cv::MemStorage storage(cvCreateMemStorage(0)); + + CvSeq* seq; + CvSeqWriter writer; + int width, height; + int numangle, numrho; + float ang; + int r, n, count; + CvPoint pt; + float irho = 1 / rho; + CvRNG rng = cvRNG(-1); + const float* ttab; + uchar* mdata0; + + CV_Assert( CV_IS_MAT(image) && CV_MAT_TYPE(image->type) == CV_8UC1 ); + + width = image->cols; + height = image->rows; + + numangle = cvRound(CV_PI / theta); + numrho = cvRound(((width + height) * 2 + 1) / rho); + + accum.create( numangle, numrho, CV_32SC1 ); + mask.create( height, width, CV_8UC1 ); + trigtab.resize(numangle*2); + accum = cv::Scalar(0); + + for( ang = 0, n = 0; n < numangle; ang += theta, n++ ) + { + trigtab[n*2] = (float)(cos(ang) * irho); + trigtab[n*2+1] = (float)(sin(ang) * irho); + } + ttab = &trigtab[0]; + mdata0 = mask.data; + + cvStartWriteSeq( CV_32SC2, sizeof(CvSeq), sizeof(CvPoint), storage, &writer ); + + // stage 1. collect non-zero image points + for( pt.y = 0, count = 0; pt.y < height; pt.y++ ) + { + const uchar* data = image->data.ptr + pt.y*image->step; + uchar* mdata = mdata0 + pt.y*width; + for( pt.x = 0; pt.x < width; pt.x++ ) + { + if( data[pt.x] ) + { + mdata[pt.x] = (uchar)1; + CV_WRITE_SEQ_ELEM( pt, writer ); + } + else + mdata[pt.x] = 0; + } + } + + seq = cvEndWriteSeq( &writer ); + count = seq->total; + + // stage 2. process all the points in random order + for( ; count > 0; count-- ) + { + // choose random point out of the remaining ones + int idx = cvRandInt(&rng) % count; + int max_val = threshold-1, max_n = 0; + CvPoint* pt = (CvPoint*)cvGetSeqElem( seq, idx ); + CvPoint line_end[2] = {{0,0}, {0,0}}; + float a, b; + int* adata = (int*)accum.data; + int i, j, k, x0, y0, dx0, dy0, xflag; + int good_line; + const int shift = 16; + + i = pt->y; + j = pt->x; + + // "remove" it by overriding it with the last element + *pt = *(CvPoint*)cvGetSeqElem( seq, count-1 ); + + // check if it has been excluded already (i.e. belongs to some other line) + if( !mdata0[i*width + j] ) + continue; + + // update accumulator, find the most probable line + for( n = 0; n < numangle; n++, adata += numrho ) + { + r = cvRound( j * ttab[n*2] + i * ttab[n*2+1] ); + r += (numrho - 1) / 2; + int val = ++adata[r]; + if( max_val < val ) + { + max_val = val; + max_n = n; + } + } + + // if it is too "weak" candidate, continue with another point + if( max_val < threshold ) + continue; + + // from the current point walk in each direction + // along the found line and extract the line segment + a = -ttab[max_n*2+1]; + b = ttab[max_n*2]; + x0 = j; + y0 = i; + if( fabs(a) > fabs(b) ) + { + xflag = 1; + dx0 = a > 0 ? 1 : -1; + dy0 = cvRound( b*(1 << shift)/fabs(a) ); + y0 = (y0 << shift) + (1 << (shift-1)); + } + else + { + xflag = 0; + dy0 = b > 0 ? 1 : -1; + dx0 = cvRound( a*(1 << shift)/fabs(b) ); + x0 = (x0 << shift) + (1 << (shift-1)); + } + + for( k = 0; k < 2; k++ ) + { + int gap = 0, x = x0, y = y0, dx = dx0, dy = dy0; + + if( k > 0 ) + dx = -dx, dy = -dy; + + // walk along the line using fixed-point arithmetics, + // stop at the image border or in case of too big gap + for( ;; x += dx, y += dy ) + { + uchar* mdata; + int i1, j1; + + if( xflag ) + { + j1 = x; + i1 = y >> shift; + } + else + { + j1 = x >> shift; + i1 = y; + } + + if( j1 < 0 || j1 >= width || i1 < 0 || i1 >= height ) + break; + + mdata = mdata0 + i1*width + j1; + + // for each non-zero point: + // update line end, + // clear the mask element + // reset the gap + if( *mdata ) + { + gap = 0; + line_end[k].y = i1; + line_end[k].x = j1; + } + else if( ++gap > lineGap ) + break; + } + } + + good_line = abs(line_end[1].x - line_end[0].x) >= lineLength || + abs(line_end[1].y - line_end[0].y) >= lineLength; + + for( k = 0; k < 2; k++ ) + { + int x = x0, y = y0, dx = dx0, dy = dy0; + + if( k > 0 ) + dx = -dx, dy = -dy; + + // walk along the line using fixed-point arithmetics, + // stop at the image border or in case of too big gap + for( ;; x += dx, y += dy ) + { + uchar* mdata; + int i1, j1; + + if( xflag ) + { + j1 = x; + i1 = y >> shift; + } + else + { + j1 = x >> shift; + i1 = y; + } + + mdata = mdata0 + i1*width + j1; + + // for each non-zero point: + // update line end, + // clear the mask element + // reset the gap + if( *mdata ) + { + if( good_line ) + { + adata = (int*)accum.data; + for( n = 0; n < numangle; n++, adata += numrho ) + { + r = cvRound( j1 * ttab[n*2] + i1 * ttab[n*2+1] ); + r += (numrho - 1) / 2; + adata[r]--; + } + } + *mdata = 0; + } + + if( i1 == line_end[k].y && j1 == line_end[k].x ) + break; + } + } + + if( good_line ) + { + CvRect lr = { line_end[0].x, line_end[0].y, line_end[1].x, line_end[1].y }; + cvSeqPush( lines, &lr ); + if( lines->total >= linesMax ) + return; + } + } +} + +/* Wrapper function for standard hough transform */ +CV_IMPL CvSeq* +cvHoughLines2( CvArr* src_image, void* lineStorage, int method, + double rho, double theta, int threshold, + double param1, double param2 ) +{ + CvSeq* result = 0; + + CvMat stub, *img = (CvMat*)src_image; + CvMat* mat = 0; + CvSeq* lines = 0; + CvSeq lines_header; + CvSeqBlock lines_block; + int lineType, elemSize; + int linesMax = INT_MAX; + int iparam1, iparam2; + + img = cvGetMat( img, &stub ); + + if( !CV_IS_MASK_ARR(img)) + CV_Error( CV_StsBadArg, "The source image must be 8-bit, single-channel" ); + + if( !lineStorage ) + CV_Error( CV_StsNullPtr, "NULL destination" ); + + if( rho <= 0 || theta <= 0 || threshold <= 0 ) + CV_Error( CV_StsOutOfRange, "rho, theta and threshold must be positive" ); + + if( method != CV_HOUGH_PROBABILISTIC ) + { + lineType = CV_32FC2; + elemSize = sizeof(float)*2; + } + else + { + lineType = CV_32SC4; + elemSize = sizeof(int)*4; + } + + if( CV_IS_STORAGE( lineStorage )) + { + lines = cvCreateSeq( lineType, sizeof(CvSeq), elemSize, (CvMemStorage*)lineStorage ); + } + else if( CV_IS_MAT( lineStorage )) + { + mat = (CvMat*)lineStorage; + + if( !CV_IS_MAT_CONT( mat->type ) || (mat->rows != 1 && mat->cols != 1) ) + CV_Error( CV_StsBadArg, + "The destination matrix should be continuous and have a single row or a single column" ); + + if( CV_MAT_TYPE( mat->type ) != lineType ) + CV_Error( CV_StsBadArg, + "The destination matrix data type is inappropriate, see the manual" ); + + lines = cvMakeSeqHeaderForArray( lineType, sizeof(CvSeq), elemSize, mat->data.ptr, + mat->rows + mat->cols - 1, &lines_header, &lines_block ); + linesMax = lines->total; + cvClearSeq( lines ); + } + else + CV_Error( CV_StsBadArg, "Destination is not CvMemStorage* nor CvMat*" ); + + iparam1 = cvRound(param1); + iparam2 = cvRound(param2); + + switch( method ) + { + case CV_HOUGH_STANDARD: + icvHoughLinesStandard( img, (float)rho, + (float)theta, threshold, lines, linesMax ); + break; + case CV_HOUGH_MULTI_SCALE: + icvHoughLinesSDiv( img, (float)rho, (float)theta, + threshold, iparam1, iparam2, lines, linesMax ); + break; + case CV_HOUGH_PROBABILISTIC: + icvHoughLinesProbabalistic( img, (float)rho, (float)theta, + threshold, iparam1, iparam2, lines, linesMax ); + break; + default: + CV_Error( CV_StsBadArg, "Unrecognized method id" ); + } + + if( mat ) + { + if( mat->cols > mat->rows ) + mat->cols = lines->total; + else + mat->rows = lines->total; + } + else + result = lines; + + return result; +} + + +/****************************************************************************************\ +* Circle Detection * +\****************************************************************************************/ + +static void +icvHoughCirclesGradient( CvMat* img, float dp, float min_dist, + int min_radius, int max_radius, + int canny_threshold, int acc_threshold, + CvSeq* circles, int circles_max ) +{ + const int SHIFT = 10, ONE = 1 << SHIFT, R_THRESH = 30; + cv::Ptr dx, dy; + cv::Ptr edges, accum, dist_buf; + std::vector sort_buf; + cv::Ptr storage; + + int x, y, i, j, k, center_count, nz_count; + float min_radius2 = (float)min_radius*min_radius; + float max_radius2 = (float)max_radius*max_radius; + int rows, cols, arows, acols; + int astep, *adata; + float* ddata; + CvSeq *nz, *centers; + float idp, dr; + CvSeqReader reader; + + edges = cvCreateMat( img->rows, img->cols, CV_8UC1 ); + cvCanny( img, edges, MAX(canny_threshold/2,1), canny_threshold, 3 ); + + dx = cvCreateMat( img->rows, img->cols, CV_16SC1 ); + dy = cvCreateMat( img->rows, img->cols, CV_16SC1 ); + cvSobel( img, dx, 1, 0, 3 ); + cvSobel( img, dy, 0, 1, 3 ); + + if( dp < 1.f ) + dp = 1.f; + idp = 1.f/dp; + accum = cvCreateMat( cvCeil(img->rows*idp)+2, cvCeil(img->cols*idp)+2, CV_32SC1 ); + cvZero(accum); + + storage = cvCreateMemStorage(); + nz = cvCreateSeq( CV_32SC2, sizeof(CvSeq), sizeof(CvPoint), storage ); + centers = cvCreateSeq( CV_32SC1, sizeof(CvSeq), sizeof(int), storage ); + + rows = img->rows; + cols = img->cols; + arows = accum->rows - 2; + acols = accum->cols - 2; + adata = accum->data.i; + astep = accum->step/sizeof(adata[0]); + + for( y = 0; y < rows; y++ ) + { + const uchar* edges_row = edges->data.ptr + y*edges->step; + const short* dx_row = (const short*)(dx->data.ptr + y*dx->step); + const short* dy_row = (const short*)(dy->data.ptr + y*dy->step); + + for( x = 0; x < cols; x++ ) + { + float vx, vy; + int sx, sy, x0, y0, x1, y1, r, k; + CvPoint pt; + + vx = dx_row[x]; + vy = dy_row[x]; + + if( !edges_row[x] || (vx == 0 && vy == 0) ) + continue; + + float mag = sqrt(vx*vx+vy*vy); + assert( mag >= 1 ); + sx = cvRound((vx*idp)*ONE/mag); + sy = cvRound((vy*idp)*ONE/mag); + + x0 = cvRound((x*idp)*ONE); + y0 = cvRound((y*idp)*ONE); + + for( k = 0; k < 2; k++ ) + { + x1 = x0 + min_radius * sx; + y1 = y0 + min_radius * sy; + + for( r = min_radius; r <= max_radius; x1 += sx, y1 += sy, r++ ) + { + int x2 = x1 >> SHIFT, y2 = y1 >> SHIFT; + if( (unsigned)x2 >= (unsigned)acols || + (unsigned)y2 >= (unsigned)arows ) + break; + adata[y2*astep + x2]++; + } + + sx = -sx; sy = -sy; + } + + pt.x = x; pt.y = y; + cvSeqPush( nz, &pt ); + } + } + + nz_count = nz->total; + if( !nz_count ) + return; + + for( y = 1; y < arows - 1; y++ ) + { + for( x = 1; x < acols - 1; x++ ) + { + int base = y*(acols+2) + x; + if( adata[base] > acc_threshold && + adata[base] > adata[base-1] && adata[base] > adata[base+1] && + adata[base] > adata[base-acols-2] && adata[base] > adata[base+acols+2] ) + cvSeqPush(centers, &base); + } + } + + center_count = centers->total; + if( !center_count ) + return; + + sort_buf.resize( MAX(center_count,nz_count) ); + cvCvtSeqToArray( centers, &sort_buf[0] ); + + icvHoughSortDescent32s( &sort_buf[0], center_count, adata ); + cvClearSeq( centers ); + cvSeqPushMulti( centers, &sort_buf[0], center_count ); + + dist_buf = cvCreateMat( 1, nz_count, CV_32FC1 ); + ddata = dist_buf->data.fl; + + dr = dp; + min_dist = MAX( min_dist, dp ); + min_dist *= min_dist; + + for( i = 0; i < centers->total; i++ ) + { + int ofs = *(int*)cvGetSeqElem( centers, i ); + y = ofs/(acols+2) - 1; + x = ofs - (y+1)*(acols+2) - 1; + float cx = (float)(x*dp), cy = (float)(y*dp); + float start_dist, dist_sum; + float r_best = 0, c[3]; + int max_count = R_THRESH; + + for( j = 0; j < circles->total; j++ ) + { + float* c = (float*)cvGetSeqElem( circles, j ); + if( (c[0] - cx)*(c[0] - cx) + (c[1] - cy)*(c[1] - cy) < min_dist ) + break; + } + + if( j < circles->total ) + continue; + + cvStartReadSeq( nz, &reader ); + for( j = k = 0; j < nz_count; j++ ) + { + CvPoint pt; + float _dx, _dy, _r2; + CV_READ_SEQ_ELEM( pt, reader ); + _dx = cx - pt.x; _dy = cy - pt.y; + _r2 = _dx*_dx + _dy*_dy; + if(min_radius2 <= _r2 && _r2 <= max_radius2 ) + { + ddata[k] = _r2; + sort_buf[k] = k; + k++; + } + } + + int nz_count1 = k, start_idx = nz_count1 - 1; + if( nz_count1 == 0 ) + continue; + dist_buf->cols = nz_count1; + cvPow( dist_buf, dist_buf, 0.5 ); + icvHoughSortDescent32s( &sort_buf[0], nz_count1, (int*)ddata ); + + dist_sum = start_dist = ddata[sort_buf[nz_count1-1]]; + for( j = nz_count1 - 2; j >= 0; j-- ) + { + float d = ddata[sort_buf[j]]; + + if( d > max_radius ) + break; + + if( d - start_dist > dr ) + { + float r_cur = ddata[sort_buf[(j + start_idx)/2]]; + if( (start_idx - j)*r_best >= max_count*r_cur || + (r_best < FLT_EPSILON && start_idx - j >= max_count) ) + { + r_best = r_cur; + max_count = start_idx - j; + } + start_dist = d; + start_idx = j; + dist_sum = 0; + } + dist_sum += d; + } + + if( max_count > R_THRESH ) + { + c[0] = cx; + c[1] = cy; + c[2] = (float)r_best; + cvSeqPush( circles, c ); + if( circles->total > circles_max ) + return; + } + } +} + +CV_IMPL CvSeq* +cvHoughCircles( CvArr* src_image, void* circle_storage, + int method, double dp, double min_dist, + double param1, double param2, + int min_radius, int max_radius ) +{ + CvSeq* result = 0; + + CvMat stub, *img = (CvMat*)src_image; + CvMat* mat = 0; + CvSeq* circles = 0; + CvSeq circles_header; + CvSeqBlock circles_block; + int circles_max = INT_MAX; + int canny_threshold = cvRound(param1); + int acc_threshold = cvRound(param2); + + img = cvGetMat( img, &stub ); + + if( !CV_IS_MASK_ARR(img)) + CV_Error( CV_StsBadArg, "The source image must be 8-bit, single-channel" ); + + if( !circle_storage ) + CV_Error( CV_StsNullPtr, "NULL destination" ); + + if( dp <= 0 || min_dist <= 0 || canny_threshold <= 0 || acc_threshold <= 0 ) + CV_Error( CV_StsOutOfRange, "dp, min_dist, canny_threshold and acc_threshold must be all positive numbers" ); + + min_radius = MAX( min_radius, 0 ); + if( max_radius <= 0 ) + max_radius = MAX( img->rows, img->cols ); + else if( max_radius <= min_radius ) + max_radius = min_radius + 2; + + if( CV_IS_STORAGE( circle_storage )) + { + circles = cvCreateSeq( CV_32FC3, sizeof(CvSeq), + sizeof(float)*3, (CvMemStorage*)circle_storage ); + } + else if( CV_IS_MAT( circle_storage )) + { + mat = (CvMat*)circle_storage; + + if( !CV_IS_MAT_CONT( mat->type ) || (mat->rows != 1 && mat->cols != 1) || + CV_MAT_TYPE(mat->type) != CV_32FC3 ) + CV_Error( CV_StsBadArg, + "The destination matrix should be continuous and have a single row or a single column" ); + + circles = cvMakeSeqHeaderForArray( CV_32FC3, sizeof(CvSeq), sizeof(float)*3, + mat->data.ptr, mat->rows + mat->cols - 1, &circles_header, &circles_block ); + circles_max = circles->total; + cvClearSeq( circles ); + } + else + CV_Error( CV_StsBadArg, "Destination is not CvMemStorage* nor CvMat*" ); + + switch( method ) + { + case CV_HOUGH_GRADIENT: + icvHoughCirclesGradient( img, (float)dp, (float)min_dist, + min_radius, max_radius, canny_threshold, + acc_threshold, circles, circles_max ); + break; + default: + CV_Error( CV_StsBadArg, "Unrecognized method id" ); + } + + if( mat ) + { + if( mat->cols > mat->rows ) + mat->cols = circles->total; + else + mat->rows = circles->total; + } + else + result = circles; + + return result; +} + + +namespace cv +{ + +const int STORAGE_SIZE = 1 << 12; + +static void seqToMat(const CvSeq* seq, OutputArray _arr) +{ + if( seq && seq->total > 0 ) + { + _arr.create(1, seq->total, seq->flags, -1, true); + Mat arr = _arr.getMat(); + cvCvtSeqToArray(seq, arr.data); + } + else + _arr.release(); +} + +} + +void cv::HoughLines( InputArray _image, OutputArray _lines, + double rho, double theta, int threshold, + double srn, double stn ) +{ + Ptr storage = cvCreateMemStorage(STORAGE_SIZE); + Mat image = _image.getMat(); + CvMat c_image = image; + CvSeq* seq = cvHoughLines2( &c_image, storage, srn == 0 && stn == 0 ? + CV_HOUGH_STANDARD : CV_HOUGH_MULTI_SCALE, + rho, theta, threshold, srn, stn ); + seqToMat(seq, _lines); +} + +void cv::HoughLinesP( InputArray _image, OutputArray _lines, + double rho, double theta, int threshold, + double minLineLength, double maxGap ) +{ + Ptr storage = cvCreateMemStorage(STORAGE_SIZE); + Mat image = _image.getMat(); + CvMat c_image = image; + CvSeq* seq = cvHoughLines2( &c_image, storage, CV_HOUGH_PROBABILISTIC, + rho, theta, threshold, minLineLength, maxGap ); + seqToMat(seq, _lines); +} + +void cv::HoughCircles( InputArray _image, OutputArray _circles, + int method, double dp, double min_dist, + double param1, double param2, + int minRadius, int maxRadius ) +{ + Ptr storage = cvCreateMemStorage(STORAGE_SIZE); + Mat image = _image.getMat(); + CvMat c_image = image; + CvSeq* seq = cvHoughCircles( &c_image, storage, method, + dp, min_dist, param1, param2, minRadius, maxRadius ); + seqToMat(seq, _circles); +} + +/* End of file. */ diff --git a/opencv/imgproc/imgwarp.cpp b/opencv/imgproc/imgwarp.cpp new file mode 100644 index 0000000..30eda4a --- /dev/null +++ b/opencv/imgproc/imgwarp.cpp @@ -0,0 +1,3523 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* //////////////////////////////////////////////////////////////////// +// +// Geometrical transforms on images and matrices: rotation, zoom etc. +// +// */ + +#include "precomp.hpp" + +namespace cv +{ + +/************** interpolation formulas and tables ***************/ + +const int INTER_RESIZE_COEF_BITS=11; +const int INTER_RESIZE_COEF_SCALE=1 << INTER_RESIZE_COEF_BITS; + +const int INTER_REMAP_COEF_BITS=15; +const int INTER_REMAP_COEF_SCALE=1 << INTER_REMAP_COEF_BITS; + +static uchar NNDeltaTab_i[INTER_TAB_SIZE2][2]; + +static float BilinearTab_f[INTER_TAB_SIZE2][2][2]; +static short BilinearTab_i[INTER_TAB_SIZE2][2][2]; + +#if CV_SSE2 +static short CV_DECL_ALIGNED(16) BilinearTab_iC4[INTER_TAB_SIZE2][2][8]; +#endif + +static float BicubicTab_f[INTER_TAB_SIZE2][4][4]; +static short BicubicTab_i[INTER_TAB_SIZE2][4][4]; + +static float Lanczos4Tab_f[INTER_TAB_SIZE2][8][8]; +static short Lanczos4Tab_i[INTER_TAB_SIZE2][8][8]; + +static inline void interpolateLinear( float x, float* coeffs ) +{ + coeffs[0] = 1.f - x; + coeffs[1] = x; +} + +static inline void interpolateCubic( float x, float* coeffs ) +{ + const float A = -0.75f; + + coeffs[0] = ((A*(x + 1) - 5*A)*(x + 1) + 8*A)*(x + 1) - 4*A; + coeffs[1] = ((A + 2)*x - (A + 3))*x*x + 1; + coeffs[2] = ((A + 2)*(1 - x) - (A + 3))*(1 - x)*(1 - x) + 1; + coeffs[3] = 1.f - coeffs[0] - coeffs[1] - coeffs[2]; +} + +static inline void interpolateLanczos4( float x, float* coeffs ) +{ + static const double s45 = 0.70710678118654752440084436210485; + static const double cs[][2]= + {{1, 0}, {-s45, -s45}, {0, 1}, {s45, -s45}, {-1, 0}, {s45, s45}, {0, -1}, {-s45, s45}}; + + int i; + if( x < FLT_EPSILON ) + { + for( int i = 0; i < 8; i++ ) + coeffs[i] = 0; + coeffs[3] = 1; + return; + } + + float sum = 0; + double y0=-(x+3)*CV_PI*0.25, s0 = sin(y0), c0=cos(y0); + for( i = 0; i < 8; i++ ) + { + double y = -(x+3-i)*CV_PI*0.25; + coeffs[i] = (float)((cs[i][0]*s0 + cs[i][1]*c0)/(y*y)); + sum += coeffs[i]; + } + + sum = 1.f/sum; + for( i = 0; i < 8; i++ ) + coeffs[i] *= sum; +} + +static void initInterTab1D(int method, float* tab, int tabsz) +{ + float scale = 1.f/tabsz; + if( method == INTER_LINEAR ) + { + for( int i = 0; i < tabsz; i++, tab += 2 ) + interpolateLinear( i*scale, tab ); + } + else if( method == INTER_CUBIC ) + { + for( int i = 0; i < tabsz; i++, tab += 4 ) + interpolateCubic( i*scale, tab ); + } + else if( method == INTER_LANCZOS4 ) + { + for( int i = 0; i < tabsz; i++, tab += 8 ) + interpolateLanczos4( i*scale, tab ); + } + else + CV_Error( CV_StsBadArg, "Unknown interpolation method" ); +} + + +static const void* initInterTab2D( int method, bool fixpt ) +{ + static bool inittab[INTER_MAX+1] = {false}; + float* tab = 0; + short* itab = 0; + int ksize = 0; + if( method == INTER_LINEAR ) + tab = BilinearTab_f[0][0], itab = BilinearTab_i[0][0], ksize=2; + else if( method == INTER_CUBIC ) + tab = BicubicTab_f[0][0], itab = BicubicTab_i[0][0], ksize=4; + else if( method == INTER_LANCZOS4 ) + tab = Lanczos4Tab_f[0][0], itab = Lanczos4Tab_i[0][0], ksize=8; + else + CV_Error( CV_StsBadArg, "Unknown/unsupported interpolation type" ); + + if( !inittab[method] ) + { + AutoBuffer _tab(8*INTER_TAB_SIZE); + int i, j, k1, k2; + initInterTab1D(method, _tab, INTER_TAB_SIZE); + for( i = 0; i < INTER_TAB_SIZE; i++ ) + for( j = 0; j < INTER_TAB_SIZE; j++, tab += ksize*ksize, itab += ksize*ksize ) + { + int isum = 0; + NNDeltaTab_i[i*INTER_TAB_SIZE+j][0] = j < INTER_TAB_SIZE/2; + NNDeltaTab_i[i*INTER_TAB_SIZE+j][1] = i < INTER_TAB_SIZE/2; + + for( k1 = 0; k1 < ksize; k1++ ) + { + float vy = _tab[i*ksize + k1]; + for( k2 = 0; k2 < ksize; k2++ ) + { + float v = vy*_tab[j*ksize + k2]; + tab[k1*ksize + k2] = v; + isum += itab[k1*ksize + k2] = saturate_cast(v*INTER_REMAP_COEF_SCALE); + } + } + + if( isum != INTER_REMAP_COEF_SCALE ) + { + int diff = isum - INTER_REMAP_COEF_SCALE; + int ksize2 = ksize/2, Mk1=ksize2, Mk2=ksize2, mk1=ksize2, mk2=ksize2; + for( k1 = ksize2; k1 < ksize2+2; k1++ ) + for( k2 = ksize2; k2 < ksize2+2; k2++ ) + { + if( itab[k1*ksize+k2] < itab[mk1*ksize+mk2] ) + mk1 = k1, mk2 = k2; + else if( itab[k1*ksize+k2] > itab[Mk1*ksize+Mk2] ) + Mk1 = k1, Mk2 = k2; + } + if( diff < 0 ) + itab[Mk1*ksize + Mk2] = (short)(itab[Mk1*ksize + Mk2] - diff); + else + itab[mk1*ksize + mk2] = (short)(itab[mk1*ksize + mk2] - diff); + } + } + tab -= INTER_TAB_SIZE2*ksize*ksize; + itab -= INTER_TAB_SIZE2*ksize*ksize; +#if CV_SSE2 + if( method == INTER_LINEAR ) + { + for( i = 0; i < INTER_TAB_SIZE2; i++ ) + for( j = 0; j < 4; j++ ) + { + BilinearTab_iC4[i][0][j*2] = BilinearTab_i[i][0][0]; + BilinearTab_iC4[i][0][j*2+1] = BilinearTab_i[i][0][1]; + BilinearTab_iC4[i][1][j*2] = BilinearTab_i[i][1][0]; + BilinearTab_iC4[i][1][j*2+1] = BilinearTab_i[i][1][1]; + } + } +#endif + inittab[method] = true; + } + return fixpt ? (const void*)itab : (const void*)tab; +} + + +template struct Cast +{ + typedef ST type1; + typedef DT rtype; + + DT operator()(ST val) const { return saturate_cast
(val); } +}; + +template struct FixedPtCast +{ + typedef ST type1; + typedef DT rtype; + enum { SHIFT = bits, DELTA = 1 << (bits-1) }; + + DT operator()(ST val) const { return saturate_cast
((val + DELTA)>>SHIFT); } +}; + +/****************************************************************************************\ +* Resize * +\****************************************************************************************/ + +static void +resizeNN( const Mat& src, Mat& dst, double fx, double fy ) +{ + Size ssize = src.size(), dsize = dst.size(); + AutoBuffer _x_ofs(dsize.width); + int* x_ofs = _x_ofs; + int pix_size = (int)src.elemSize(); + int pix_size4 = (int)(pix_size / sizeof(int)); + double ifx = 1./fx, ify = 1./fy; + int x, y; + + for( x = 0; x < dsize.width; x++ ) + { + int sx = cvFloor(x*ifx); + x_ofs[x] = std::min(sx, ssize.width-1)*pix_size; + } + + for( y = 0; y < dsize.height; y++ ) + { + uchar* D = dst.data + dst.step*y; + int sy = std::min(cvFloor(y*ify), ssize.height-1); + const uchar* S = src.data + src.step*sy; + + switch( pix_size ) + { + case 1: + for( x = 0; x <= dsize.width - 2; x += 2 ) + { + uchar t0 = S[x_ofs[x]]; + uchar t1 = S[x_ofs[x+1]]; + D[x] = t0; + D[x+1] = t1; + } + + for( ; x < dsize.width; x++ ) + D[x] = S[x_ofs[x]]; + break; + case 2: + for( x = 0; x < dsize.width; x++ ) + *(ushort*)(D + x*2) = *(ushort*)(S + x_ofs[x]); + break; + case 3: + for( x = 0; x < dsize.width; x++, D += 3 ) + { + const uchar* _tS = S + x_ofs[x]; + D[0] = _tS[0]; D[1] = _tS[1]; D[2] = _tS[2]; + } + break; + case 4: + for( x = 0; x < dsize.width; x++ ) + *(int*)(D + x*4) = *(int*)(S + x_ofs[x]); + break; + case 6: + for( x = 0; x < dsize.width; x++, D += 6 ) + { + const ushort* _tS = (const ushort*)(S + x_ofs[x]); + ushort* _tD = (ushort*)D; + _tD[0] = _tS[0]; _tD[1] = _tS[1]; _tD[2] = _tS[2]; + } + break; + case 8: + for( x = 0; x < dsize.width; x++, D += 8 ) + { + const int* _tS = (const int*)(S + x_ofs[x]); + int* _tD = (int*)D; + _tD[0] = _tS[0]; _tD[1] = _tS[1]; + } + break; + case 12: + for( x = 0; x < dsize.width; x++, D += 12 ) + { + const int* _tS = (const int*)(S + x_ofs[x]); + int* _tD = (int*)D; + _tD[0] = _tS[0]; _tD[1] = _tS[1]; _tD[2] = _tS[2]; + } + break; + default: + for( x = 0; x < dsize.width; x++, D += pix_size ) + { + const int* _tS = (const int*)(S + x_ofs[x]); + int* _tD = (int*)D; + for( int k = 0; k < pix_size4; k++ ) + _tD[k] = _tS[k]; + } + } + } +} + + +struct VResizeNoVec +{ + int operator()(const uchar**, uchar*, const uchar*, int ) const { return 0; } +}; + +struct HResizeNoVec +{ + int operator()(const uchar**, uchar**, int, const int*, + const uchar*, int, int, int, int, int) const { return 0; } +}; + +#if CV_SSE2 + +struct VResizeLinearVec_32s8u +{ + int operator()(const uchar** _src, uchar* dst, const uchar* _beta, int width ) const + { + if( !checkHardwareSupport(CV_CPU_SSE2) ) + return 0; + + const int** src = (const int**)_src; + const short* beta = (const short*)_beta; + const int *S0 = src[0], *S1 = src[1]; + int x = 0; + __m128i b0 = _mm_set1_epi16(beta[0]), b1 = _mm_set1_epi16(beta[1]); + __m128i delta = _mm_set1_epi16(2); + + if( (((size_t)S0|(size_t)S1)&15) == 0 ) + for( ; x <= width - 16; x += 16 ) + { + __m128i x0, x1, x2, y0, y1, y2; + x0 = _mm_load_si128((const __m128i*)(S0 + x)); + x1 = _mm_load_si128((const __m128i*)(S0 + x + 4)); + y0 = _mm_load_si128((const __m128i*)(S1 + x)); + y1 = _mm_load_si128((const __m128i*)(S1 + x + 4)); + x0 = _mm_packs_epi32(_mm_srai_epi32(x0, 4), _mm_srai_epi32(x1, 4)); + y0 = _mm_packs_epi32(_mm_srai_epi32(y0, 4), _mm_srai_epi32(y1, 4)); + + x1 = _mm_load_si128((const __m128i*)(S0 + x + 8)); + x2 = _mm_load_si128((const __m128i*)(S0 + x + 12)); + y1 = _mm_load_si128((const __m128i*)(S1 + x + 8)); + y2 = _mm_load_si128((const __m128i*)(S1 + x + 12)); + x1 = _mm_packs_epi32(_mm_srai_epi32(x1, 4), _mm_srai_epi32(x2, 4)); + y1 = _mm_packs_epi32(_mm_srai_epi32(y1, 4), _mm_srai_epi32(y2, 4)); + + x0 = _mm_adds_epi16(_mm_mulhi_epi16( x0, b0 ), _mm_mulhi_epi16( y0, b1 )); + x1 = _mm_adds_epi16(_mm_mulhi_epi16( x1, b0 ), _mm_mulhi_epi16( y1, b1 )); + + x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2); + x1 = _mm_srai_epi16(_mm_adds_epi16(x1, delta), 2); + _mm_storeu_si128( (__m128i*)(dst + x), _mm_packus_epi16(x0, x1)); + } + else + for( ; x <= width - 16; x += 16 ) + { + __m128i x0, x1, x2, y0, y1, y2; + x0 = _mm_loadu_si128((const __m128i*)(S0 + x)); + x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 4)); + y0 = _mm_loadu_si128((const __m128i*)(S1 + x)); + y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 4)); + x0 = _mm_packs_epi32(_mm_srai_epi32(x0, 4), _mm_srai_epi32(x1, 4)); + y0 = _mm_packs_epi32(_mm_srai_epi32(y0, 4), _mm_srai_epi32(y1, 4)); + + x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 8)); + x2 = _mm_loadu_si128((const __m128i*)(S0 + x + 12)); + y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 8)); + y2 = _mm_loadu_si128((const __m128i*)(S1 + x + 12)); + x1 = _mm_packs_epi32(_mm_srai_epi32(x1, 4), _mm_srai_epi32(x2, 4)); + y1 = _mm_packs_epi32(_mm_srai_epi32(y1, 4), _mm_srai_epi32(y2, 4)); + + x0 = _mm_adds_epi16(_mm_mulhi_epi16( x0, b0 ), _mm_mulhi_epi16( y0, b1 )); + x1 = _mm_adds_epi16(_mm_mulhi_epi16( x1, b0 ), _mm_mulhi_epi16( y1, b1 )); + + x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2); + x1 = _mm_srai_epi16(_mm_adds_epi16(x1, delta), 2); + _mm_storeu_si128( (__m128i*)(dst + x), _mm_packus_epi16(x0, x1)); + } + + for( ; x < width - 4; x += 4 ) + { + __m128i x0, y0; + x0 = _mm_srai_epi32(_mm_loadu_si128((const __m128i*)(S0 + x)), 4); + y0 = _mm_srai_epi32(_mm_loadu_si128((const __m128i*)(S1 + x)), 4); + x0 = _mm_packs_epi32(x0, x0); + y0 = _mm_packs_epi32(y0, y0); + x0 = _mm_adds_epi16(_mm_mulhi_epi16(x0, b0), _mm_mulhi_epi16(y0, b1)); + x0 = _mm_srai_epi16(_mm_adds_epi16(x0, delta), 2); + x0 = _mm_packus_epi16(x0, x0); + *(int*)(dst + x) = _mm_cvtsi128_si32(x0); + } + + return x; + } +}; + + +template struct VResizeLinearVec_32f16 +{ + int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const + { + if( !checkHardwareSupport(CV_CPU_SSE2) ) + return 0; + + const float** src = (const float**)_src; + const float* beta = (const float*)_beta; + const float *S0 = src[0], *S1 = src[1]; + ushort* dst = (ushort*)_dst; + int x = 0; + + __m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]); + __m128i preshift = _mm_set1_epi32(shiftval); + __m128i postshift = _mm_set1_epi16((short)shiftval); + + if( (((size_t)S0|(size_t)S1)&15) == 0 ) + for( ; x <= width - 16; x += 16 ) + { + __m128 x0, x1, y0, y1; + __m128i t0, t1, t2; + x0 = _mm_load_ps(S0 + x); + x1 = _mm_load_ps(S0 + x + 4); + y0 = _mm_load_ps(S1 + x); + y1 = _mm_load_ps(S1 + x + 4); + + x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1)); + x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1)); + t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift); + t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift); + t0 = _mm_add_epi16(_mm_packs_epi32(t0, t2), postshift); + + x0 = _mm_load_ps(S0 + x + 8); + x1 = _mm_load_ps(S0 + x + 12); + y0 = _mm_load_ps(S1 + x + 8); + y1 = _mm_load_ps(S1 + x + 12); + + x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1)); + x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1)); + t1 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift); + t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift); + t1 = _mm_add_epi16(_mm_packs_epi32(t1, t2), postshift); + + _mm_storeu_si128( (__m128i*)(dst + x), t0); + _mm_storeu_si128( (__m128i*)(dst + x + 8), t1); + } + else + for( ; x <= width - 16; x += 16 ) + { + __m128 x0, x1, y0, y1; + __m128i t0, t1, t2; + x0 = _mm_loadu_ps(S0 + x); + x1 = _mm_loadu_ps(S0 + x + 4); + y0 = _mm_loadu_ps(S1 + x); + y1 = _mm_loadu_ps(S1 + x + 4); + + x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1)); + x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1)); + t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift); + t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift); + t0 = _mm_add_epi16(_mm_packs_epi32(t0, t2), postshift); + + x0 = _mm_loadu_ps(S0 + x + 8); + x1 = _mm_loadu_ps(S0 + x + 12); + y0 = _mm_loadu_ps(S1 + x + 8); + y1 = _mm_loadu_ps(S1 + x + 12); + + x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1)); + x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1)); + t1 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift); + t2 = _mm_add_epi32(_mm_cvtps_epi32(x1), preshift); + t1 = _mm_add_epi16(_mm_packs_epi32(t1, t2), postshift); + + _mm_storeu_si128( (__m128i*)(dst + x), t0); + _mm_storeu_si128( (__m128i*)(dst + x + 8), t1); + } + + for( ; x < width - 4; x += 4 ) + { + __m128 x0, y0; + __m128i t0; + x0 = _mm_loadu_ps(S0 + x); + y0 = _mm_loadu_ps(S1 + x); + + x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1)); + t0 = _mm_add_epi32(_mm_cvtps_epi32(x0), preshift); + t0 = _mm_add_epi16(_mm_packs_epi32(t0, t0), postshift); + _mm_storel_epi64( (__m128i*)(dst + x), t0); + } + + return x; + } +}; + +typedef VResizeLinearVec_32f16 VResizeLinearVec_32f16u; +typedef VResizeLinearVec_32f16<0> VResizeLinearVec_32f16s; + +struct VResizeLinearVec_32f +{ + int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const + { + if( !checkHardwareSupport(CV_CPU_SSE) ) + return 0; + + const float** src = (const float**)_src; + const float* beta = (const float*)_beta; + const float *S0 = src[0], *S1 = src[1]; + float* dst = (float*)_dst; + int x = 0; + + __m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]); + + if( (((size_t)S0|(size_t)S1)&15) == 0 ) + for( ; x <= width - 8; x += 8 ) + { + __m128 x0, x1, y0, y1; + x0 = _mm_load_ps(S0 + x); + x1 = _mm_load_ps(S0 + x + 4); + y0 = _mm_load_ps(S1 + x); + y1 = _mm_load_ps(S1 + x + 4); + + x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1)); + x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1)); + + _mm_storeu_ps( dst + x, x0); + _mm_storeu_ps( dst + x + 4, x1); + } + else + for( ; x <= width - 8; x += 8 ) + { + __m128 x0, x1, y0, y1; + x0 = _mm_loadu_ps(S0 + x); + x1 = _mm_loadu_ps(S0 + x + 4); + y0 = _mm_loadu_ps(S1 + x); + y1 = _mm_loadu_ps(S1 + x + 4); + + x0 = _mm_add_ps(_mm_mul_ps(x0, b0), _mm_mul_ps(y0, b1)); + x1 = _mm_add_ps(_mm_mul_ps(x1, b0), _mm_mul_ps(y1, b1)); + + _mm_storeu_ps( dst + x, x0); + _mm_storeu_ps( dst + x + 4, x1); + } + + return x; + } +}; + + +struct VResizeCubicVec_32s8u +{ + int operator()(const uchar** _src, uchar* dst, const uchar* _beta, int width ) const + { + if( !checkHardwareSupport(CV_CPU_SSE2) ) + return 0; + + const int** src = (const int**)_src; + const short* beta = (const short*)_beta; + const int *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3]; + int x = 0; + float scale = 1.f/(INTER_RESIZE_COEF_SCALE*INTER_RESIZE_COEF_SCALE); + __m128 b0 = _mm_set1_ps(beta[0]*scale), b1 = _mm_set1_ps(beta[1]*scale), + b2 = _mm_set1_ps(beta[2]*scale), b3 = _mm_set1_ps(beta[3]*scale); + + if( (((size_t)S0|(size_t)S1|(size_t)S2|(size_t)S3)&15) == 0 ) + for( ; x <= width - 8; x += 8 ) + { + __m128i x0, x1, y0, y1; + __m128 s0, s1, f0, f1; + x0 = _mm_load_si128((const __m128i*)(S0 + x)); + x1 = _mm_load_si128((const __m128i*)(S0 + x + 4)); + y0 = _mm_load_si128((const __m128i*)(S1 + x)); + y1 = _mm_load_si128((const __m128i*)(S1 + x + 4)); + + s0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b0); + s1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b0); + f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b1); + f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b1); + s0 = _mm_add_ps(s0, f0); + s1 = _mm_add_ps(s1, f1); + + x0 = _mm_load_si128((const __m128i*)(S2 + x)); + x1 = _mm_load_si128((const __m128i*)(S2 + x + 4)); + y0 = _mm_load_si128((const __m128i*)(S3 + x)); + y1 = _mm_load_si128((const __m128i*)(S3 + x + 4)); + + f0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b2); + f1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b2); + s0 = _mm_add_ps(s0, f0); + s1 = _mm_add_ps(s1, f1); + f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b3); + f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b3); + s0 = _mm_add_ps(s0, f0); + s1 = _mm_add_ps(s1, f1); + + x0 = _mm_cvtps_epi32(s0); + x1 = _mm_cvtps_epi32(s1); + + x0 = _mm_packs_epi32(x0, x1); + _mm_storel_epi64( (__m128i*)(dst + x), _mm_packus_epi16(x0, x0)); + } + else + for( ; x <= width - 8; x += 8 ) + { + __m128i x0, x1, y0, y1; + __m128 s0, s1, f0, f1; + x0 = _mm_loadu_si128((const __m128i*)(S0 + x)); + x1 = _mm_loadu_si128((const __m128i*)(S0 + x + 4)); + y0 = _mm_loadu_si128((const __m128i*)(S1 + x)); + y1 = _mm_loadu_si128((const __m128i*)(S1 + x + 4)); + + s0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b0); + s1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b0); + f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b1); + f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b1); + s0 = _mm_add_ps(s0, f0); + s1 = _mm_add_ps(s1, f1); + + x0 = _mm_loadu_si128((const __m128i*)(S2 + x)); + x1 = _mm_loadu_si128((const __m128i*)(S2 + x + 4)); + y0 = _mm_loadu_si128((const __m128i*)(S3 + x)); + y1 = _mm_loadu_si128((const __m128i*)(S3 + x + 4)); + + f0 = _mm_mul_ps(_mm_cvtepi32_ps(x0), b2); + f1 = _mm_mul_ps(_mm_cvtepi32_ps(x1), b2); + s0 = _mm_add_ps(s0, f0); + s1 = _mm_add_ps(s1, f1); + f0 = _mm_mul_ps(_mm_cvtepi32_ps(y0), b3); + f1 = _mm_mul_ps(_mm_cvtepi32_ps(y1), b3); + s0 = _mm_add_ps(s0, f0); + s1 = _mm_add_ps(s1, f1); + + x0 = _mm_cvtps_epi32(s0); + x1 = _mm_cvtps_epi32(s1); + + x0 = _mm_packs_epi32(x0, x1); + _mm_storel_epi64( (__m128i*)(dst + x), _mm_packus_epi16(x0, x0)); + } + + return x; + } +}; + + +template struct VResizeCubicVec_32f16 +{ + int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const + { + if( !checkHardwareSupport(CV_CPU_SSE2) ) + return 0; + + const float** src = (const float**)_src; + const float* beta = (const float*)_beta; + const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3]; + ushort* dst = (ushort*)_dst; + int x = 0; + __m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]), + b2 = _mm_set1_ps(beta[2]), b3 = _mm_set1_ps(beta[3]); + __m128i preshift = _mm_set1_epi32(shiftval); + __m128i postshift = _mm_set1_epi16((short)shiftval); + + for( ; x <= width - 8; x += 8 ) + { + __m128 x0, x1, y0, y1, s0, s1; + __m128i t0, t1; + x0 = _mm_loadu_ps(S0 + x); + x1 = _mm_loadu_ps(S0 + x + 4); + y0 = _mm_loadu_ps(S1 + x); + y1 = _mm_loadu_ps(S1 + x + 4); + + s0 = _mm_mul_ps(x0, b0); + s1 = _mm_mul_ps(x1, b0); + y0 = _mm_mul_ps(y0, b1); + y1 = _mm_mul_ps(y1, b1); + s0 = _mm_add_ps(s0, y0); + s1 = _mm_add_ps(s1, y1); + + x0 = _mm_loadu_ps(S2 + x); + x1 = _mm_loadu_ps(S2 + x + 4); + y0 = _mm_loadu_ps(S3 + x); + y1 = _mm_loadu_ps(S3 + x + 4); + + x0 = _mm_mul_ps(x0, b2); + x1 = _mm_mul_ps(x1, b2); + y0 = _mm_mul_ps(y0, b3); + y1 = _mm_mul_ps(y1, b3); + s0 = _mm_add_ps(s0, x0); + s1 = _mm_add_ps(s1, x1); + s0 = _mm_add_ps(s0, y0); + s1 = _mm_add_ps(s1, y1); + + t0 = _mm_add_epi32(_mm_cvtps_epi32(s0), preshift); + t1 = _mm_add_epi32(_mm_cvtps_epi32(s1), preshift); + + t0 = _mm_add_epi16(_mm_packs_epi32(t0, t1), postshift); + _mm_storeu_si128( (__m128i*)(dst + x), t0); + } + + return x; + } +}; + +typedef VResizeCubicVec_32f16 VResizeCubicVec_32f16u; +typedef VResizeCubicVec_32f16<0> VResizeCubicVec_32f16s; + +struct VResizeCubicVec_32f +{ + int operator()(const uchar** _src, uchar* _dst, const uchar* _beta, int width ) const + { + if( !checkHardwareSupport(CV_CPU_SSE) ) + return 0; + + const float** src = (const float**)_src; + const float* beta = (const float*)_beta; + const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3]; + float* dst = (float*)_dst; + int x = 0; + __m128 b0 = _mm_set1_ps(beta[0]), b1 = _mm_set1_ps(beta[1]), + b2 = _mm_set1_ps(beta[2]), b3 = _mm_set1_ps(beta[3]); + + for( ; x <= width - 8; x += 8 ) + { + __m128 x0, x1, y0, y1, s0, s1; + x0 = _mm_loadu_ps(S0 + x); + x1 = _mm_loadu_ps(S0 + x + 4); + y0 = _mm_loadu_ps(S1 + x); + y1 = _mm_loadu_ps(S1 + x + 4); + + s0 = _mm_mul_ps(x0, b0); + s1 = _mm_mul_ps(x1, b0); + y0 = _mm_mul_ps(y0, b1); + y1 = _mm_mul_ps(y1, b1); + s0 = _mm_add_ps(s0, y0); + s1 = _mm_add_ps(s1, y1); + + x0 = _mm_loadu_ps(S2 + x); + x1 = _mm_loadu_ps(S2 + x + 4); + y0 = _mm_loadu_ps(S3 + x); + y1 = _mm_loadu_ps(S3 + x + 4); + + x0 = _mm_mul_ps(x0, b2); + x1 = _mm_mul_ps(x1, b2); + y0 = _mm_mul_ps(y0, b3); + y1 = _mm_mul_ps(y1, b3); + s0 = _mm_add_ps(s0, x0); + s1 = _mm_add_ps(s1, x1); + s0 = _mm_add_ps(s0, y0); + s1 = _mm_add_ps(s1, y1); + + _mm_storeu_ps( dst + x, s0); + _mm_storeu_ps( dst + x + 4, s1); + } + + return x; + } +}; + +typedef HResizeNoVec HResizeLinearVec_8u32s; +typedef HResizeNoVec HResizeLinearVec_16u32f; +typedef HResizeNoVec HResizeLinearVec_16s32f; +typedef HResizeNoVec HResizeLinearVec_32f; +typedef HResizeNoVec HResizeLinearVec_64f; + +#else + +typedef HResizeNoVec HResizeLinearVec_8u32s; +typedef HResizeNoVec HResizeLinearVec_16u32f; +typedef HResizeNoVec HResizeLinearVec_16s32f; +typedef HResizeNoVec HResizeLinearVec_32f; + +typedef VResizeNoVec VResizeLinearVec_32s8u; +typedef VResizeNoVec VResizeLinearVec_32f16u; +typedef VResizeNoVec VResizeLinearVec_32f16s; +typedef VResizeNoVec VResizeLinearVec_32f; + +typedef VResizeNoVec VResizeCubicVec_32s8u; +typedef VResizeNoVec VResizeCubicVec_32f16u; +typedef VResizeNoVec VResizeCubicVec_32f16s; +typedef VResizeNoVec VResizeCubicVec_32f; + +#endif + + +template +struct HResizeLinear +{ + typedef T value_type; + typedef WT buf_type; + typedef AT alpha_type; + + void operator()(const T** src, WT** dst, int count, + const int* xofs, const AT* alpha, + int swidth, int dwidth, int cn, int xmin, int xmax ) const + { + int dx, k; + VecOp vecOp; + + int dx0 = vecOp((const uchar**)src, (uchar**)dst, count, + xofs, (const uchar*)alpha, swidth, dwidth, cn, xmin, xmax ); + + for( k = 0; k <= count - 2; k++ ) + { + const T *S0 = src[k], *S1 = src[k+1]; + WT *D0 = dst[k], *D1 = dst[k+1]; + for( dx = dx0; dx < xmax; dx++ ) + { + int sx = xofs[dx]; + WT a0 = alpha[dx*2], a1 = alpha[dx*2+1]; + WT t0 = S0[sx]*a0 + S0[sx + cn]*a1; + WT t1 = S1[sx]*a0 + S1[sx + cn]*a1; + D0[dx] = t0; D1[dx] = t1; + } + + for( ; dx < dwidth; dx++ ) + { + int sx = xofs[dx]; + D0[dx] = WT(S0[sx]*ONE); D1[dx] = WT(S1[sx]*ONE); + } + } + + for( ; k < count; k++ ) + { + const T *S = src[k]; + WT *D = dst[k]; + for( dx = 0; dx < xmax; dx++ ) + { + int sx = xofs[dx]; + D[dx] = S[sx]*alpha[dx*2] + S[sx+cn]*alpha[dx*2+1]; + } + + for( ; dx < dwidth; dx++ ) + D[dx] = WT(S[xofs[dx]]*ONE); + } + } +}; + + +template +struct VResizeLinear +{ + typedef T value_type; + typedef WT buf_type; + typedef AT alpha_type; + + void operator()(const WT** src, T* dst, const AT* beta, int width ) const + { + WT b0 = beta[0], b1 = beta[1]; + const WT *S0 = src[0], *S1 = src[1]; + CastOp castOp; + VecOp vecOp; + + int x = vecOp((const uchar**)src, (uchar*)dst, (const uchar*)beta, width); + for( ; x <= width - 4; x += 4 ) + { + WT t0, t1; + t0 = S0[x]*b0 + S1[x]*b1; + t1 = S0[x+1]*b0 + S1[x+1]*b1; + dst[x] = castOp(t0); dst[x+1] = castOp(t1); + t0 = S0[x+2]*b0 + S1[x+2]*b1; + t1 = S0[x+3]*b0 + S1[x+3]*b1; + dst[x+2] = castOp(t0); dst[x+3] = castOp(t1); + } + + for( ; x < width; x++ ) + dst[x] = castOp(S0[x]*b0 + S1[x]*b1); + } +}; + + +template +struct HResizeCubic +{ + typedef T value_type; + typedef WT buf_type; + typedef AT alpha_type; + + void operator()(const T** src, WT** dst, int count, + const int* xofs, const AT* alpha, + int swidth, int dwidth, int cn, int xmin, int xmax ) const + { + for( int k = 0; k < count; k++ ) + { + const T *S = src[k]; + WT *D = dst[k]; + int dx = 0, limit = xmin; + for(;;) + { + for( ; dx < limit; dx++, alpha += 4 ) + { + int j, sx = xofs[dx] - cn; + WT v = 0; + for( j = 0; j < 4; j++ ) + { + int sxj = sx + j*cn; + if( (unsigned)sxj >= (unsigned)swidth ) + { + while( sxj < 0 ) + sxj += cn; + while( sxj >= swidth ) + sxj -= cn; + } + v += S[sxj]*alpha[j]; + } + D[dx] = v; + } + if( limit == dwidth ) + break; + for( ; dx < xmax; dx++, alpha += 4 ) + { + int sx = xofs[dx]; + D[dx] = S[sx-cn]*alpha[0] + S[sx]*alpha[1] + + S[sx+cn]*alpha[2] + S[sx+cn*2]*alpha[3]; + } + limit = dwidth; + } + alpha -= dwidth*4; + } + } +}; + + +template +struct VResizeCubic +{ + typedef T value_type; + typedef WT buf_type; + typedef AT alpha_type; + + void operator()(const WT** src, T* dst, const AT* beta, int width ) const + { + WT b0 = beta[0], b1 = beta[1], b2 = beta[2], b3 = beta[3]; + const WT *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3]; + CastOp castOp; + VecOp vecOp; + + int x = vecOp((const uchar**)src, (uchar*)dst, (const uchar*)beta, width); + for( ; x < width; x++ ) + dst[x] = castOp(S0[x]*b0 + S1[x]*b1 + S2[x]*b2 + S3[x]*b3); + } +}; + + +template +struct HResizeLanczos4 +{ + typedef T value_type; + typedef WT buf_type; + typedef AT alpha_type; + + void operator()(const T** src, WT** dst, int count, + const int* xofs, const AT* alpha, + int swidth, int dwidth, int cn, int xmin, int xmax ) const + { + for( int k = 0; k < count; k++ ) + { + const T *S = src[k]; + WT *D = dst[k]; + int dx = 0, limit = xmin; + for(;;) + { + for( ; dx < limit; dx++, alpha += 8 ) + { + int j, sx = xofs[dx] - cn*3; + WT v = 0; + for( j = 0; j < 8; j++ ) + { + int sxj = sx + j*cn; + if( (unsigned)sxj >= (unsigned)swidth ) + { + while( sxj < 0 ) + sxj += cn; + while( sxj >= swidth ) + sxj -= cn; + } + v += S[sxj]*alpha[j]; + } + D[dx] = v; + } + if( limit == dwidth ) + break; + for( ; dx < xmax; dx++, alpha += 8 ) + { + int sx = xofs[dx]; + D[dx] = S[sx-cn*3]*alpha[0] + S[sx-cn*2]*alpha[1] + + S[sx-cn]*alpha[2] + S[sx]*alpha[3] + + S[sx+cn]*alpha[4] + S[sx+cn*2]*alpha[5] + + S[sx+cn*3]*alpha[6] + S[sx+cn*4]*alpha[7]; + } + limit = dwidth; + } + alpha -= dwidth*8; + } + } +}; + + +template +struct VResizeLanczos4 +{ + typedef T value_type; + typedef WT buf_type; + typedef AT alpha_type; + + void operator()(const WT** src, T* dst, const AT* beta, int width ) const + { + CastOp castOp; + VecOp vecOp; + int k, x = vecOp((const uchar**)src, (uchar*)dst, (const uchar*)beta, width); + + for( ; x <= width - 4; x += 4 ) + { + WT b = beta[0]; + const WT* S = src[0]; + WT s0 = S[x]*b, s1 = S[x+1]*b, s2 = S[x+2]*b, s3 = S[x+3]*b; + + for( k = 1; k < 8; k++ ) + { + b = beta[k]; S = src[k]; + s0 += S[x]*b; s1 += S[x+1]*b; + s2 += S[x+2]*b; s3 += S[x+3]*b; + } + + dst[x] = castOp(s0); dst[x+1] = castOp(s1); + dst[x+2] = castOp(s2); dst[x+3] = castOp(s3); + } + + for( ; x < width; x++ ) + { + dst[x] = castOp(src[0][x]*beta[0] + src[1][x]*beta[1] + + src[2][x]*beta[2] + src[3][x]*beta[3] + src[4][x]*beta[4] + + src[5][x]*beta[5] + src[6][x]*beta[6] + src[7][x]*beta[7]); + } + } +}; + + +static inline int clip(int x, int a, int b) +{ + return x >= a ? (x < b ? x : b-1) : a; +} + +static const int MAX_ESIZE=16; + +template +static void resizeGeneric_( const Mat& src, Mat& dst, + const int* xofs, const void* _alpha, + const int* yofs, const void* _beta, + int xmin, int xmax, int ksize ) +{ + typedef typename HResize::value_type T; + typedef typename HResize::buf_type WT; + typedef typename HResize::alpha_type AT; + + const AT* alpha = (const AT*)_alpha; + const AT* beta = (const AT*)_beta; + Size ssize = src.size(), dsize = dst.size(); + int cn = src.channels(); + ssize.width *= cn; + dsize.width *= cn; + int bufstep = (int)alignSize(dsize.width, 16); + AutoBuffer _buffer(bufstep*ksize); + const T* srows[MAX_ESIZE]={0}; + WT* rows[MAX_ESIZE]={0}; + int prev_sy[MAX_ESIZE]; + int k, dy; + xmin *= cn; + xmax *= cn; + + HResize hresize; + VResize vresize; + + for( k = 0; k < ksize; k++ ) + { + prev_sy[k] = -1; + rows[k] = (WT*)_buffer + bufstep*k; + } + + // image resize is a separable operation. In case of not too strong + for( dy = 0; dy < dsize.height; dy++, beta += ksize ) + { + int sy0 = yofs[dy], k, k0=ksize, k1=0, ksize2 = ksize/2; + + for( k = 0; k < ksize; k++ ) + { + int sy = clip(sy0 - ksize2 + 1 + k, 0, ssize.height); + for( k1 = std::max(k1, k); k1 < ksize; k1++ ) + { + if( sy == prev_sy[k1] ) // if the sy-th row has been computed already, reuse it. + { + if( k1 > k ) + memcpy( rows[k], rows[k1], bufstep*sizeof(rows[0][0]) ); + break; + } + } + if( k1 == ksize ) + k0 = std::min(k0, k); // remember the first row that needs to be computed + srows[k] = (const T*)(src.data + src.step*sy); + prev_sy[k] = sy; + } + + if( k0 < ksize ) + hresize( srows + k0, rows + k0, ksize - k0, xofs, alpha, + ssize.width, dsize.width, cn, xmin, xmax ); + + vresize( (const WT**)rows, (T*)(dst.data + dst.step*dy), beta, dsize.width ); + } +} + + +template +static void resizeAreaFast_( const Mat& src, Mat& dst, const int* ofs, const int* xofs, + int scale_x, int scale_y ) +{ + Size ssize = src.size(), dsize = dst.size(); + int cn = src.channels(); + int dy, dx, k = 0; + int area = scale_x*scale_y; + float scale = 1.f/(scale_x*scale_y); + int dwidth1 = (ssize.width/scale_x)*cn; + dsize.width *= cn; + ssize.width *= cn; + + for( dy = 0; dy < dsize.height; dy++ ) + { + T* D = (T*)(dst.data + dst.step*dy); + int sy0 = dy*scale_y, w = sy0 + scale_y <= ssize.height ? dwidth1 : 0; + if( sy0 >= ssize.height ) + { + for( dx = 0; dx < dsize.width; dx++ ) + D[dx] = 0; + continue; + } + + for( dx = 0; dx < w; dx++ ) + { + const T* S = (const T*)(src.data + src.step*sy0) + xofs[dx]; + WT sum = 0; + for( k = 0; k <= area - 4; k += 4 ) + sum += S[ofs[k]] + S[ofs[k+1]] + S[ofs[k+2]] + S[ofs[k+3]]; + for( ; k < area; k++ ) + sum += S[ofs[k]]; + + D[dx] = saturate_cast(sum*scale); + } + + for( ; dx < dsize.width; dx++ ) + { + WT sum = 0; + int count = 0, sx0 = xofs[dx]; + if( sx0 >= ssize.width ) + D[dx] = 0; + + for( int sy = 0; sy < scale_y; sy++ ) + { + if( sy0 + sy >= ssize.height ) + break; + const T* S = (const T*)(src.data + src.step*(sy0 + sy)) + sx0; + for( int sx = 0; sx < scale_x*cn; sx += cn ) + { + if( sx0 + sx >= ssize.width ) + break; + sum += S[sx]; + count++; + } + } + + D[dx] = saturate_cast((float)sum/count); + } + } +} + +struct DecimateAlpha +{ + int si, di; + float alpha; +}; + +template +static void resizeArea_( const Mat& src, Mat& dst, const DecimateAlpha* xofs, int xofs_count ) +{ + Size ssize = src.size(), dsize = dst.size(); + int cn = src.channels(); + dsize.width *= cn; + AutoBuffer _buffer(dsize.width*2); + WT *buf = _buffer, *sum = buf + dsize.width; + int k, sy, dx, cur_dy = 0; + WT scale_y = (WT)ssize.height/dsize.height; + + CV_Assert( cn <= 4 ); + for( dx = 0; dx < dsize.width; dx++ ) + buf[dx] = sum[dx] = 0; + + for( sy = 0; sy < ssize.height; sy++ ) + { + const T* S = (const T*)(src.data + src.step*sy); + if( cn == 1 ) + for( k = 0; k < xofs_count; k++ ) + { + int dxn = xofs[k].di; + WT alpha = xofs[k].alpha; + buf[dxn] += S[xofs[k].si]*alpha; + } + else if( cn == 2 ) + for( k = 0; k < xofs_count; k++ ) + { + int sxn = xofs[k].si; + int dxn = xofs[k].di; + WT alpha = xofs[k].alpha; + WT t0 = buf[dxn] + S[sxn]*alpha; + WT t1 = buf[dxn+1] + S[sxn+1]*alpha; + buf[dxn] = t0; buf[dxn+1] = t1; + } + else if( cn == 3 ) + for( k = 0; k < xofs_count; k++ ) + { + int sxn = xofs[k].si; + int dxn = xofs[k].di; + WT alpha = xofs[k].alpha; + WT t0 = buf[dxn] + S[sxn]*alpha; + WT t1 = buf[dxn+1] + S[sxn+1]*alpha; + WT t2 = buf[dxn+2] + S[sxn+2]*alpha; + buf[dxn] = t0; buf[dxn+1] = t1; buf[dxn+2] = t2; + } + else + for( k = 0; k < xofs_count; k++ ) + { + int sxn = xofs[k].si; + int dxn = xofs[k].di; + WT alpha = xofs[k].alpha; + WT t0 = buf[dxn] + S[sxn]*alpha; + WT t1 = buf[dxn+1] + S[sxn+1]*alpha; + buf[dxn] = t0; buf[dxn+1] = t1; + t0 = buf[dxn+2] + S[sxn+2]*alpha; + t1 = buf[dxn+3] + S[sxn+3]*alpha; + buf[dxn+2] = t0; buf[dxn+3] = t1; + } + + if( (cur_dy + 1)*scale_y <= sy + 1 || sy == ssize.height - 1 ) + { + WT beta = std::max(sy + 1 - (cur_dy+1)*scale_y, (WT)0); + WT beta1 = 1 - beta; + T* D = (T*)(dst.data + dst.step*cur_dy); + if( fabs(beta) < 1e-3 ) + for( dx = 0; dx < dsize.width; dx++ ) + { + D[dx] = saturate_cast(sum[dx] + buf[dx]); + sum[dx] = buf[dx] = 0; + } + else + for( dx = 0; dx < dsize.width; dx++ ) + { + D[dx] = saturate_cast(sum[dx] + buf[dx]*beta1); + sum[dx] = buf[dx]*beta; + buf[dx] = 0; + } + cur_dy++; + } + else + { + for( dx = 0; dx <= dsize.width - 2; dx += 2 ) + { + WT t0 = sum[dx] + buf[dx]; + WT t1 = sum[dx+1] + buf[dx+1]; + sum[dx] = t0; sum[dx+1] = t1; + buf[dx] = buf[dx+1] = 0; + } + for( ; dx < dsize.width; dx++ ) + { + sum[dx] += buf[dx]; + buf[dx] = 0; + } + } + } +} + + +typedef void (*ResizeFunc)( const Mat& src, Mat& dst, + const int* xofs, const void* alpha, + const int* yofs, const void* beta, + int xmin, int xmax, int ksize ); + +typedef void (*ResizeAreaFastFunc)( const Mat& src, Mat& dst, + const int* ofs, const int *xofs, + int scale_x, int scale_y ); + +typedef void (*ResizeAreaFunc)( const Mat& src, Mat& dst, + const DecimateAlpha* xofs, int xofs_count ); + +} + +////////////////////////////////////////////////////////////////////////////////////////// + +void cv::resize( InputArray _src, OutputArray _dst, Size dsize, + double inv_scale_x, double inv_scale_y, int interpolation ) +{ + static ResizeFunc linear_tab[] = + { + resizeGeneric_< + HResizeLinear, + VResizeLinear, + VResizeLinearVec_32s8u> >, + 0, + resizeGeneric_< + HResizeLinear, + VResizeLinear, + VResizeLinearVec_32f16u> >, + resizeGeneric_< + HResizeLinear, + VResizeLinear, + VResizeLinearVec_32f16s> >, + 0, + resizeGeneric_< + HResizeLinear, + VResizeLinear, + VResizeLinearVec_32f> >, + resizeGeneric_< + HResizeLinear, + VResizeLinear, + VResizeNoVec> >, + 0 + }; + + static ResizeFunc cubic_tab[] = + { + resizeGeneric_< + HResizeCubic, + VResizeCubic, + VResizeCubicVec_32s8u> >, + 0, + resizeGeneric_< + HResizeCubic, + VResizeCubic, + VResizeCubicVec_32f16u> >, + resizeGeneric_< + HResizeCubic, + VResizeCubic, + VResizeCubicVec_32f16s> >, + 0, + resizeGeneric_< + HResizeCubic, + VResizeCubic, + VResizeCubicVec_32f> >, + resizeGeneric_< + HResizeCubic, + VResizeCubic, + VResizeNoVec> >, + 0 + }; + + static ResizeFunc lanczos4_tab[] = + { + resizeGeneric_, + VResizeLanczos4, + VResizeNoVec> >, + 0, + resizeGeneric_, + VResizeLanczos4, + VResizeNoVec> >, + resizeGeneric_, + VResizeLanczos4, + VResizeNoVec> >, + 0, + resizeGeneric_, + VResizeLanczos4, + VResizeNoVec> >, + resizeGeneric_, + VResizeLanczos4, + VResizeNoVec> >, + 0 + }; + + static ResizeAreaFastFunc areafast_tab[] = + { + resizeAreaFast_, 0, + resizeAreaFast_, + resizeAreaFast_, + 0, + resizeAreaFast_, + resizeAreaFast_, + 0 + }; + + static ResizeAreaFunc area_tab[] = + { + resizeArea_, 0, resizeArea_, resizeArea_, + 0, resizeArea_, resizeArea_, 0 + }; + + Mat src = _src.getMat(); + Size ssize = src.size(); + + CV_Assert( ssize.area() > 0 ); + CV_Assert( !(dsize == Size()) || (inv_scale_x > 0 && inv_scale_y > 0) ); + if( dsize == Size() ) + { + dsize = Size(saturate_cast(src.cols*inv_scale_x), + saturate_cast(src.rows*inv_scale_y)); + } + else + { + inv_scale_x = (double)dsize.width/src.cols; + inv_scale_y = (double)dsize.height/src.rows; + } + _dst.create(dsize, src.type()); + Mat dst = _dst.getMat(); + + int depth = src.depth(), cn = src.channels(); + double scale_x = 1./inv_scale_x, scale_y = 1./inv_scale_y; + int k, sx, sy, dx, dy; + + if( interpolation == INTER_NEAREST ) + { + resizeNN( src, dst, inv_scale_x, inv_scale_y ); + return; + } + + // true "area" interpolation is only implemented for the case (scale_x <= 1 && scale_y <= 1). + // In other cases it is emulated using some variant of bilinear interpolation + if( interpolation == INTER_AREA && scale_x >= 1 && scale_y >= 1 ) + { + int iscale_x = saturate_cast(scale_x); + int iscale_y = saturate_cast(scale_y); + + if( std::abs(scale_x - iscale_x) < DBL_EPSILON && + std::abs(scale_y - iscale_y) < DBL_EPSILON ) + { + int area = iscale_x*iscale_y; + size_t srcstep = src.step / src.elemSize1(); + AutoBuffer _ofs(area + dsize.width*cn); + int* ofs = _ofs; + int* xofs = ofs + area; + ResizeAreaFastFunc func = areafast_tab[depth]; + CV_Assert( func != 0 ); + + for( sy = 0, k = 0; sy < iscale_y; sy++ ) + for( sx = 0; sx < iscale_x; sx++ ) + ofs[k++] = (int)(sy*srcstep + sx*cn); + + for( dx = 0; dx < dsize.width; dx++ ) + { + sx = dx*iscale_x*cn; + for( k = 0; k < cn; k++ ) + xofs[dx*cn + k] = sx + k; + } + + func( src, dst, ofs, xofs, iscale_x, iscale_y ); + return; + } + + ResizeAreaFunc func = area_tab[depth]; + CV_Assert( func != 0 && cn <= 4 ); + + AutoBuffer _xofs(ssize.width*2); + DecimateAlpha* xofs = _xofs; + double scale = 1.f/(scale_x*scale_y); + + for( dx = 0, k = 0; dx < dsize.width; dx++ ) + { + double fsx1 = dx*scale_x, fsx2 = fsx1 + scale_x; + int sx1 = cvCeil(fsx1), sx2 = cvFloor(fsx2); + sx1 = std::min(sx1, ssize.width-1); + sx2 = std::min(sx2, ssize.width-1); + + if( sx1 > fsx1 ) + { + assert( k < ssize.width*2 ); + xofs[k].di = dx*cn; + xofs[k].si = (sx1-1)*cn; + xofs[k++].alpha = (float)((sx1 - fsx1)*scale); + } + + for( sx = sx1; sx < sx2; sx++ ) + { + assert( k < ssize.width*2 ); + xofs[k].di = dx*cn; + xofs[k].si = sx*cn; + xofs[k++].alpha = (float)scale; + } + + if( fsx2 - sx2 > 1e-3 ) + { + assert( k < ssize.width*2 ); + xofs[k].di = dx*cn; + xofs[k].si = sx2*cn; + xofs[k++].alpha = (float)((fsx2 - sx2)*scale); + } + } + + func( src, dst, xofs, k ); + return; + } + + int xmin = 0, xmax = dsize.width, width = dsize.width*cn; + bool area_mode = interpolation == INTER_AREA; + bool fixpt = depth == CV_8U; + float fx, fy; + ResizeFunc func=0; + int ksize=0, ksize2; + if( interpolation == INTER_CUBIC ) + ksize = 4, func = cubic_tab[depth]; + else if( interpolation == INTER_LANCZOS4 ) + ksize = 8, func = lanczos4_tab[depth]; + else if( interpolation == INTER_LINEAR || interpolation == INTER_AREA ) + ksize = 2, func = linear_tab[depth]; + else + CV_Error( CV_StsBadArg, "Unknown interpolation method" ); + ksize2 = ksize/2; + + CV_Assert( func != 0 ); + + AutoBuffer _buffer((width + dsize.height)*(sizeof(int) + sizeof(float)*ksize)); + int* xofs = (int*)(uchar*)_buffer; + int* yofs = xofs + width; + float* alpha = (float*)(yofs + dsize.height); + short* ialpha = (short*)alpha; + float* beta = alpha + width*ksize; + short* ibeta = ialpha + width*ksize; + float cbuf[MAX_ESIZE]; + + for( dx = 0; dx < dsize.width; dx++ ) + { + if( !area_mode ) + { + fx = (float)((dx+0.5)*scale_x - 0.5); + sx = cvFloor(fx); + fx -= sx; + } + else + { + sx = cvFloor(dx*scale_x); + fx = (float)((dx+1) - (sx+1)*inv_scale_x); + fx = fx <= 0 ? 0.f : fx - cvFloor(fx); + } + + if( sx < ksize2-1 ) + { + xmin = dx+1; + if( sx < 0 ) + fx = 0, sx = 0; + } + + if( sx + ksize2 >= ssize.width ) + { + xmax = std::min( xmax, dx ); + if( sx >= ssize.width-1 ) + fx = 0, sx = ssize.width-1; + } + + for( k = 0, sx *= cn; k < cn; k++ ) + xofs[dx*cn + k] = sx + k; + + if( interpolation == INTER_CUBIC ) + interpolateCubic( fx, cbuf ); + else if( interpolation == INTER_LANCZOS4 ) + interpolateLanczos4( fx, cbuf ); + else + { + cbuf[0] = 1.f - fx; + cbuf[1] = fx; + } + if( fixpt ) + { + for( k = 0; k < ksize; k++ ) + ialpha[dx*cn*ksize + k] = saturate_cast(cbuf[k]*INTER_RESIZE_COEF_SCALE); + for( ; k < cn*ksize; k++ ) + ialpha[dx*cn*ksize + k] = ialpha[dx*cn*ksize + k - ksize]; + } + else + { + for( k = 0; k < ksize; k++ ) + alpha[dx*cn*ksize + k] = cbuf[k]; + for( ; k < cn*ksize; k++ ) + alpha[dx*cn*ksize + k] = alpha[dx*cn*ksize + k - ksize]; + } + } + + for( dy = 0; dy < dsize.height; dy++ ) + { + if( !area_mode ) + { + fy = (float)((dy+0.5)*scale_y - 0.5); + sy = cvFloor(fy); + fy -= sy; + } + else + { + sy = cvFloor(dy*scale_y); + fy = (float)((dy+1) - (sy+1)*inv_scale_y); + fy = fy <= 0 ? 0.f : fy - cvFloor(fy); + } + + yofs[dy] = sy; + if( interpolation == INTER_CUBIC ) + interpolateCubic( fy, cbuf ); + else if( interpolation == INTER_LANCZOS4 ) + interpolateLanczos4( fy, cbuf ); + else + { + cbuf[0] = 1.f - fy; + cbuf[1] = fy; + } + + if( fixpt ) + { + for( k = 0; k < ksize; k++ ) + ibeta[dy*ksize + k] = saturate_cast(cbuf[k]*INTER_RESIZE_COEF_SCALE); + } + else + { + for( k = 0; k < ksize; k++ ) + beta[dy*ksize + k] = cbuf[k]; + } + } + + func( src, dst, xofs, fixpt ? (void*)ialpha : (void*)alpha, yofs, + fixpt ? (void*)ibeta : (void*)beta, xmin, xmax, ksize ); +} + + +/****************************************************************************************\ +* General warping (affine, perspective, remap) * +\****************************************************************************************/ + +namespace cv +{ + +template +static void remapNearest( const Mat& _src, Mat& _dst, const Mat& _xy, + int borderType, const Scalar& _borderValue ) +{ + Size ssize = _src.size(), dsize = _dst.size(); + int cn = _src.channels(); + const T* S0 = (const T*)_src.data; + size_t sstep = _src.step/sizeof(S0[0]); + Scalar_ cval(saturate_cast(_borderValue[0]), + saturate_cast(_borderValue[1]), + saturate_cast(_borderValue[2]), + saturate_cast(_borderValue[3])); + int dx, dy; + + unsigned width1 = ssize.width, height1 = ssize.height; + + if( _dst.isContinuous() && _xy.isContinuous() ) + { + dsize.width *= dsize.height; + dsize.height = 1; + } + + for( dy = 0; dy < dsize.height; dy++ ) + { + T* D = (T*)(_dst.data + _dst.step*dy); + const short* XY = (const short*)(_xy.data + _xy.step*dy); + + if( cn == 1 ) + { + for( dx = 0; dx < dsize.width; dx++ ) + { + int sx = XY[dx*2], sy = XY[dx*2+1]; + if( (unsigned)sx < width1 && (unsigned)sy < height1 ) + D[dx] = S0[sy*sstep + sx]; + else + { + if( borderType == BORDER_REPLICATE ) + { + sx = clip(sx, 0, ssize.width); + sy = clip(sy, 0, ssize.height); + D[dx] = S0[sy*sstep + sx]; + } + else if( borderType == BORDER_CONSTANT ) + D[dx] = cval[0]; + else if( borderType != BORDER_TRANSPARENT ) + { + sx = borderInterpolate(sx, ssize.width, borderType); + sy = borderInterpolate(sy, ssize.height, borderType); + D[dx] = S0[sy*sstep + sx]; + } + } + } + } + else + { + for( dx = 0; dx < dsize.width; dx++, D += cn ) + { + int sx = XY[dx*2], sy = XY[dx*2+1], k; + const T *S; + if( (unsigned)sx < width1 && (unsigned)sy < height1 ) + { + if( cn == 3 ) + { + S = S0 + sy*sstep + sx*3; + D[0] = S[0], D[1] = S[1], D[2] = S[2]; + } + else if( cn == 4 ) + { + S = S0 + sy*sstep + sx*4; + D[0] = S[0], D[1] = S[1], D[2] = S[2], D[3] = S[3]; + } + else + { + S = S0 + sy*sstep + sx*cn; + for( k = 0; k < cn; k++ ) + D[k] = S[k]; + } + } + else if( borderType != BORDER_TRANSPARENT ) + { + if( borderType == BORDER_REPLICATE ) + { + sx = clip(sx, 0, ssize.width); + sy = clip(sy, 0, ssize.height); + S = S0 + sy*sstep + sx*cn; + } + else if( borderType == BORDER_CONSTANT ) + S = &cval[0]; + else + { + sx = borderInterpolate(sx, ssize.width, borderType); + sy = borderInterpolate(sy, ssize.height, borderType); + S = S0 + sy*sstep + sx*cn; + } + for( k = 0; k < cn; k++ ) + D[k] = S[k]; + } + } + } + } +} + + +struct RemapNoVec +{ + int operator()( const Mat&, void*, const short*, const ushort*, + const void*, int ) const { return 0; } +}; + +#if CV_SSE2 + +struct RemapVec_8u +{ + int operator()( const Mat& _src, void* _dst, const short* XY, + const ushort* FXY, const void* _wtab, int width ) const + { + int cn = _src.channels(); + + if( (cn != 1 && cn != 3 && cn != 4) || !checkHardwareSupport(CV_CPU_SSE2) ) + return 0; + + const uchar *S0 = _src.data, *S1 = _src.data + _src.step; + const short* wtab = cn == 1 ? (const short*)_wtab : &BilinearTab_iC4[0][0][0]; + uchar* D = (uchar*)_dst; + int x = 0, sstep = (int)_src.step; + __m128i delta = _mm_set1_epi32(INTER_REMAP_COEF_SCALE/2); + __m128i xy2ofs = _mm_set1_epi32(cn + (sstep << 16)); + __m128i z = _mm_setzero_si128(); + int CV_DECL_ALIGNED(16) iofs0[4], iofs1[4]; + + if( cn == 1 ) + { + for( ; x <= width - 8; x += 8 ) + { + __m128i xy0 = _mm_loadu_si128( (const __m128i*)(XY + x*2)); + __m128i xy1 = _mm_loadu_si128( (const __m128i*)(XY + x*2 + 8)); + __m128i v0, v1, v2, v3, a0, a1, b0, b1; + unsigned i0, i1; + + xy0 = _mm_madd_epi16( xy0, xy2ofs ); + xy1 = _mm_madd_epi16( xy1, xy2ofs ); + _mm_store_si128( (__m128i*)iofs0, xy0 ); + _mm_store_si128( (__m128i*)iofs1, xy1 ); + + i0 = *(ushort*)(S0 + iofs0[0]) + (*(ushort*)(S0 + iofs0[1]) << 16); + i1 = *(ushort*)(S0 + iofs0[2]) + (*(ushort*)(S0 + iofs0[3]) << 16); + v0 = _mm_unpacklo_epi32(_mm_cvtsi32_si128(i0), _mm_cvtsi32_si128(i1)); + i0 = *(ushort*)(S1 + iofs0[0]) + (*(ushort*)(S1 + iofs0[1]) << 16); + i1 = *(ushort*)(S1 + iofs0[2]) + (*(ushort*)(S1 + iofs0[3]) << 16); + v1 = _mm_unpacklo_epi32(_mm_cvtsi32_si128(i0), _mm_cvtsi32_si128(i1)); + v0 = _mm_unpacklo_epi8(v0, z); + v1 = _mm_unpacklo_epi8(v1, z); + + a0 = _mm_unpacklo_epi32(_mm_loadl_epi64((__m128i*)(wtab+FXY[x]*4)), + _mm_loadl_epi64((__m128i*)(wtab+FXY[x+1]*4))); + a1 = _mm_unpacklo_epi32(_mm_loadl_epi64((__m128i*)(wtab+FXY[x+2]*4)), + _mm_loadl_epi64((__m128i*)(wtab+FXY[x+3]*4))); + b0 = _mm_unpacklo_epi64(a0, a1); + b1 = _mm_unpackhi_epi64(a0, a1); + v0 = _mm_madd_epi16(v0, b0); + v1 = _mm_madd_epi16(v1, b1); + v0 = _mm_add_epi32(_mm_add_epi32(v0, v1), delta); + + i0 = *(ushort*)(S0 + iofs1[0]) + (*(ushort*)(S0 + iofs1[1]) << 16); + i1 = *(ushort*)(S0 + iofs1[2]) + (*(ushort*)(S0 + iofs1[3]) << 16); + v2 = _mm_unpacklo_epi32(_mm_cvtsi32_si128(i0), _mm_cvtsi32_si128(i1)); + i0 = *(ushort*)(S1 + iofs1[0]) + (*(ushort*)(S1 + iofs1[1]) << 16); + i1 = *(ushort*)(S1 + iofs1[2]) + (*(ushort*)(S1 + iofs1[3]) << 16); + v3 = _mm_unpacklo_epi32(_mm_cvtsi32_si128(i0), _mm_cvtsi32_si128(i1)); + v2 = _mm_unpacklo_epi8(v2, z); + v3 = _mm_unpacklo_epi8(v3, z); + + a0 = _mm_unpacklo_epi32(_mm_loadl_epi64((__m128i*)(wtab+FXY[x+4]*4)), + _mm_loadl_epi64((__m128i*)(wtab+FXY[x+5]*4))); + a1 = _mm_unpacklo_epi32(_mm_loadl_epi64((__m128i*)(wtab+FXY[x+6]*4)), + _mm_loadl_epi64((__m128i*)(wtab+FXY[x+7]*4))); + b0 = _mm_unpacklo_epi64(a0, a1); + b1 = _mm_unpackhi_epi64(a0, a1); + v2 = _mm_madd_epi16(v2, b0); + v3 = _mm_madd_epi16(v3, b1); + v2 = _mm_add_epi32(_mm_add_epi32(v2, v3), delta); + + v0 = _mm_srai_epi32(v0, INTER_REMAP_COEF_BITS); + v2 = _mm_srai_epi32(v2, INTER_REMAP_COEF_BITS); + v0 = _mm_packus_epi16(_mm_packs_epi32(v0, v2), z); + _mm_storel_epi64( (__m128i*)(D + x), v0 ); + } + } + else if( cn == 3 ) + { + for( ; x <= width - 5; x += 4, D += 12 ) + { + __m128i xy0 = _mm_loadu_si128( (const __m128i*)(XY + x*2)); + __m128i u0, v0, u1, v1; + + xy0 = _mm_madd_epi16( xy0, xy2ofs ); + _mm_store_si128( (__m128i*)iofs0, xy0 ); + const __m128i *w0, *w1; + w0 = (const __m128i*)(wtab + FXY[x]*16); + w1 = (const __m128i*)(wtab + FXY[x+1]*16); + + u0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[0])), + _mm_cvtsi32_si128(*(int*)(S0 + iofs0[0] + 3))); + v0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[0])), + _mm_cvtsi32_si128(*(int*)(S1 + iofs0[0] + 3))); + u1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[1])), + _mm_cvtsi32_si128(*(int*)(S0 + iofs0[1] + 3))); + v1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[1])), + _mm_cvtsi32_si128(*(int*)(S1 + iofs0[1] + 3))); + u0 = _mm_unpacklo_epi8(u0, z); + v0 = _mm_unpacklo_epi8(v0, z); + u1 = _mm_unpacklo_epi8(u1, z); + v1 = _mm_unpacklo_epi8(v1, z); + u0 = _mm_add_epi32(_mm_madd_epi16(u0, w0[0]), _mm_madd_epi16(v0, w0[1])); + u1 = _mm_add_epi32(_mm_madd_epi16(u1, w1[0]), _mm_madd_epi16(v1, w1[1])); + u0 = _mm_srai_epi32(_mm_add_epi32(u0, delta), INTER_REMAP_COEF_BITS); + u1 = _mm_srai_epi32(_mm_add_epi32(u1, delta), INTER_REMAP_COEF_BITS); + u0 = _mm_slli_si128(u0, 4); + u0 = _mm_packs_epi32(u0, u1); + u0 = _mm_packus_epi16(u0, u0); + _mm_storel_epi64((__m128i*)D, _mm_srli_si128(u0,1)); + + w0 = (const __m128i*)(wtab + FXY[x+2]*16); + w1 = (const __m128i*)(wtab + FXY[x+3]*16); + + u0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[2])), + _mm_cvtsi32_si128(*(int*)(S0 + iofs0[2] + 3))); + v0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[2])), + _mm_cvtsi32_si128(*(int*)(S1 + iofs0[2] + 3))); + u1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[3])), + _mm_cvtsi32_si128(*(int*)(S0 + iofs0[3] + 3))); + v1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[3])), + _mm_cvtsi32_si128(*(int*)(S1 + iofs0[3] + 3))); + u0 = _mm_unpacklo_epi8(u0, z); + v0 = _mm_unpacklo_epi8(v0, z); + u1 = _mm_unpacklo_epi8(u1, z); + v1 = _mm_unpacklo_epi8(v1, z); + u0 = _mm_add_epi32(_mm_madd_epi16(u0, w0[0]), _mm_madd_epi16(v0, w0[1])); + u1 = _mm_add_epi32(_mm_madd_epi16(u1, w1[0]), _mm_madd_epi16(v1, w1[1])); + u0 = _mm_srai_epi32(_mm_add_epi32(u0, delta), INTER_REMAP_COEF_BITS); + u1 = _mm_srai_epi32(_mm_add_epi32(u1, delta), INTER_REMAP_COEF_BITS); + u0 = _mm_slli_si128(u0, 4); + u0 = _mm_packs_epi32(u0, u1); + u0 = _mm_packus_epi16(u0, u0); + _mm_storel_epi64((__m128i*)(D + 6), _mm_srli_si128(u0,1)); + } + } + else if( cn == 4 ) + { + for( ; x <= width - 4; x += 4, D += 16 ) + { + __m128i xy0 = _mm_loadu_si128( (const __m128i*)(XY + x*2)); + __m128i u0, v0, u1, v1; + + xy0 = _mm_madd_epi16( xy0, xy2ofs ); + _mm_store_si128( (__m128i*)iofs0, xy0 ); + const __m128i *w0, *w1; + w0 = (const __m128i*)(wtab + FXY[x]*16); + w1 = (const __m128i*)(wtab + FXY[x+1]*16); + + u0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[0])), + _mm_cvtsi32_si128(*(int*)(S0 + iofs0[0] + 4))); + v0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[0])), + _mm_cvtsi32_si128(*(int*)(S1 + iofs0[0] + 4))); + u1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[1])), + _mm_cvtsi32_si128(*(int*)(S0 + iofs0[1] + 4))); + v1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[1])), + _mm_cvtsi32_si128(*(int*)(S1 + iofs0[1] + 4))); + u0 = _mm_unpacklo_epi8(u0, z); + v0 = _mm_unpacklo_epi8(v0, z); + u1 = _mm_unpacklo_epi8(u1, z); + v1 = _mm_unpacklo_epi8(v1, z); + u0 = _mm_add_epi32(_mm_madd_epi16(u0, w0[0]), _mm_madd_epi16(v0, w0[1])); + u1 = _mm_add_epi32(_mm_madd_epi16(u1, w1[0]), _mm_madd_epi16(v1, w1[1])); + u0 = _mm_srai_epi32(_mm_add_epi32(u0, delta), INTER_REMAP_COEF_BITS); + u1 = _mm_srai_epi32(_mm_add_epi32(u1, delta), INTER_REMAP_COEF_BITS); + u0 = _mm_packs_epi32(u0, u1); + u0 = _mm_packus_epi16(u0, u0); + _mm_storel_epi64((__m128i*)D, u0); + + w0 = (const __m128i*)(wtab + FXY[x+2]*16); + w1 = (const __m128i*)(wtab + FXY[x+3]*16); + + u0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[2])), + _mm_cvtsi32_si128(*(int*)(S0 + iofs0[2] + 4))); + v0 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[2])), + _mm_cvtsi32_si128(*(int*)(S1 + iofs0[2] + 4))); + u1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S0 + iofs0[3])), + _mm_cvtsi32_si128(*(int*)(S0 + iofs0[3] + 4))); + v1 = _mm_unpacklo_epi8(_mm_cvtsi32_si128(*(int*)(S1 + iofs0[3])), + _mm_cvtsi32_si128(*(int*)(S1 + iofs0[3] + 4))); + u0 = _mm_unpacklo_epi8(u0, z); + v0 = _mm_unpacklo_epi8(v0, z); + u1 = _mm_unpacklo_epi8(u1, z); + v1 = _mm_unpacklo_epi8(v1, z); + u0 = _mm_add_epi32(_mm_madd_epi16(u0, w0[0]), _mm_madd_epi16(v0, w0[1])); + u1 = _mm_add_epi32(_mm_madd_epi16(u1, w1[0]), _mm_madd_epi16(v1, w1[1])); + u0 = _mm_srai_epi32(_mm_add_epi32(u0, delta), INTER_REMAP_COEF_BITS); + u1 = _mm_srai_epi32(_mm_add_epi32(u1, delta), INTER_REMAP_COEF_BITS); + u0 = _mm_packs_epi32(u0, u1); + u0 = _mm_packus_epi16(u0, u0); + _mm_storel_epi64((__m128i*)(D + 8), u0); + } + } + + return x; + } +}; + +#else + +typedef RemapNoVec RemapVec_8u; + +#endif + + +template +static void remapBilinear( const Mat& _src, Mat& _dst, const Mat& _xy, + const Mat& _fxy, const void* _wtab, + int borderType, const Scalar& _borderValue ) +{ + typedef typename CastOp::rtype T; + typedef typename CastOp::type1 WT; + Size ssize = _src.size(), dsize = _dst.size(); + int cn = _src.channels(); + const AT* wtab = (const AT*)_wtab; + const T* S0 = (const T*)_src.data; + size_t sstep = _src.step/sizeof(S0[0]); + Scalar_ cval(saturate_cast(_borderValue[0]), + saturate_cast(_borderValue[1]), + saturate_cast(_borderValue[2]), + saturate_cast(_borderValue[3])); + int dx, dy; + CastOp castOp; + VecOp vecOp; + + unsigned width1 = std::max(ssize.width-1, 0), height1 = std::max(ssize.height-1, 0); + CV_Assert( cn <= 4 && ssize.area() > 0 ); +#if CV_SSE2 + if( _src.type() == CV_8UC3 ) + width1 = std::max(ssize.width-2, 0); +#endif + + for( dy = 0; dy < dsize.height; dy++ ) + { + T* D = (T*)(_dst.data + _dst.step*dy); + const short* XY = (const short*)(_xy.data + _xy.step*dy); + const ushort* FXY = (const ushort*)(_fxy.data + _fxy.step*dy); + int X0 = 0; + bool prevInlier = false; + + for( dx = 0; dx <= dsize.width; dx++ ) + { + bool curInlier = dx < dsize.width ? + (unsigned)XY[dx*2] < width1 && + (unsigned)XY[dx*2+1] < height1 : !prevInlier; + if( curInlier == prevInlier ) + continue; + + int X1 = dx; + dx = X0; + X0 = X1; + prevInlier = curInlier; + + if( !curInlier ) + { + int len = vecOp( _src, D, XY + dx*2, FXY + dx, wtab, X1 - dx ); + D += len*cn; + dx += len; + + if( cn == 1 ) + { + for( ; dx < X1; dx++, D++ ) + { + int sx = XY[dx*2], sy = XY[dx*2+1]; + const AT* w = wtab + FXY[dx]*4; + const T* S = S0 + sy*sstep + sx; + *D = castOp(WT(S[0]*w[0] + S[1]*w[1] + S[sstep]*w[2] + S[sstep+1]*w[3])); + } + } + else if( cn == 2 ) + for( ; dx < X1; dx++, D += 2 ) + { + int sx = XY[dx*2], sy = XY[dx*2+1]; + const AT* w = wtab + FXY[dx]*4; + const T* S = S0 + sy*sstep + sx*2; + WT t0 = S[0]*w[0] + S[2]*w[1] + S[sstep]*w[2] + S[sstep+2]*w[3]; + WT t1 = S[1]*w[0] + S[3]*w[1] + S[sstep+1]*w[2] + S[sstep+3]*w[3]; + D[0] = castOp(t0); D[1] = castOp(t1); + } + else if( cn == 3 ) + for( ; dx < X1; dx++, D += 3 ) + { + int sx = XY[dx*2], sy = XY[dx*2+1]; + const AT* w = wtab + FXY[dx]*4; + const T* S = S0 + sy*sstep + sx*3; + WT t0 = S[0]*w[0] + S[3]*w[1] + S[sstep]*w[2] + S[sstep+3]*w[3]; + WT t1 = S[1]*w[0] + S[4]*w[1] + S[sstep+1]*w[2] + S[sstep+4]*w[3]; + WT t2 = S[2]*w[0] + S[5]*w[1] + S[sstep+2]*w[2] + S[sstep+5]*w[3]; + D[0] = castOp(t0); D[1] = castOp(t1); D[2] = castOp(t2); + } + else + for( ; dx < X1; dx++, D += 4 ) + { + int sx = XY[dx*2], sy = XY[dx*2+1]; + const AT* w = wtab + FXY[dx]*4; + const T* S = S0 + sy*sstep + sx*4; + WT t0 = S[0]*w[0] + S[4]*w[1] + S[sstep]*w[2] + S[sstep+4]*w[3]; + WT t1 = S[1]*w[0] + S[5]*w[1] + S[sstep+1]*w[2] + S[sstep+5]*w[3]; + D[0] = castOp(t0); D[1] = castOp(t1); + t0 = S[2]*w[0] + S[6]*w[1] + S[sstep+2]*w[2] + S[sstep+6]*w[3]; + t1 = S[3]*w[0] + S[7]*w[1] + S[sstep+3]*w[2] + S[sstep+7]*w[3]; + D[2] = castOp(t0); D[3] = castOp(t1); + } + } + else + { + if( borderType == BORDER_TRANSPARENT && cn != 3 ) + { + D += (X1 - dx)*cn; + dx = X1; + continue; + } + + if( cn == 1 ) + for( ; dx < X1; dx++, D++ ) + { + int sx = XY[dx*2], sy = XY[dx*2+1]; + if( borderType == BORDER_CONSTANT && + (sx >= ssize.width || sx+1 < 0 || + sy >= ssize.height || sy+1 < 0) ) + { + D[0] = cval[0]; + } + else + { + int sx0, sx1, sy0, sy1; + T v0, v1, v2, v3; + const AT* w = wtab + FXY[dx]*4; + if( borderType == BORDER_REPLICATE ) + { + sx0 = clip(sx, 0, ssize.width); + sx1 = clip(sx+1, 0, ssize.width); + sy0 = clip(sy, 0, ssize.height); + sy1 = clip(sy+1, 0, ssize.height); + v0 = S0[sy0*sstep + sx0]; + v1 = S0[sy0*sstep + sx1]; + v2 = S0[sy1*sstep + sx0]; + v3 = S0[sy1*sstep + sx1]; + } + else + { + sx0 = borderInterpolate(sx, ssize.width, borderType); + sx1 = borderInterpolate(sx+1, ssize.width, borderType); + sy0 = borderInterpolate(sy, ssize.height, borderType); + sy1 = borderInterpolate(sy+1, ssize.height, borderType); + v0 = sx0 >= 0 && sy0 >= 0 ? S0[sy0*sstep + sx0] : cval[0]; + v1 = sx1 >= 0 && sy0 >= 0 ? S0[sy0*sstep + sx1] : cval[0]; + v2 = sx0 >= 0 && sy1 >= 0 ? S0[sy1*sstep + sx0] : cval[0]; + v3 = sx1 >= 0 && sy1 >= 0 ? S0[sy1*sstep + sx1] : cval[0]; + } + D[0] = castOp(WT(v0*w[0] + v1*w[1] + v2*w[2] + v3*w[3])); + } + } + else + for( ; dx < X1; dx++, D += cn ) + { + int sx = XY[dx*2], sy = XY[dx*2+1], k; + if( borderType == BORDER_CONSTANT && + (sx >= ssize.width || sx+1 < 0 || + sy >= ssize.height || sy+1 < 0) ) + { + for( k = 0; k < cn; k++ ) + D[k] = cval[k]; + } + else + { + int sx0, sx1, sy0, sy1; + const T *v0, *v1, *v2, *v3; + const AT* w = wtab + FXY[dx]*4; + if( borderType == BORDER_REPLICATE ) + { + sx0 = clip(sx, 0, ssize.width); + sx1 = clip(sx+1, 0, ssize.width); + sy0 = clip(sy, 0, ssize.height); + sy1 = clip(sy+1, 0, ssize.height); + v0 = S0 + sy0*sstep + sx0*cn; + v1 = S0 + sy0*sstep + sx1*cn; + v2 = S0 + sy1*sstep + sx0*cn; + v3 = S0 + sy1*sstep + sx1*cn; + } + else if( borderType == BORDER_TRANSPARENT && + ((unsigned)sx >= (unsigned)(ssize.width-1) || + (unsigned)sy >= (unsigned)(ssize.height-1))) + continue; + else + { + sx0 = borderInterpolate(sx, ssize.width, borderType); + sx1 = borderInterpolate(sx+1, ssize.width, borderType); + sy0 = borderInterpolate(sy, ssize.height, borderType); + sy1 = borderInterpolate(sy+1, ssize.height, borderType); + v0 = sx0 >= 0 && sy0 >= 0 ? S0 + sy0*sstep + sx0*cn : &cval[0]; + v1 = sx1 >= 0 && sy0 >= 0 ? S0 + sy0*sstep + sx1*cn : &cval[0]; + v2 = sx0 >= 0 && sy1 >= 0 ? S0 + sy1*sstep + sx0*cn : &cval[0]; + v3 = sx1 >= 0 && sy1 >= 0 ? S0 + sy1*sstep + sx1*cn : &cval[0]; + } + for( k = 0; k < cn; k++ ) + D[k] = castOp(WT(v0[k]*w[0] + v1[k]*w[1] + v2[k]*w[2] + v3[k]*w[3])); + } + } + } + } + } +} + + +template +static void remapBicubic( const Mat& _src, Mat& _dst, const Mat& _xy, + const Mat& _fxy, const void* _wtab, + int borderType, const Scalar& _borderValue ) +{ + typedef typename CastOp::rtype T; + typedef typename CastOp::type1 WT; + Size ssize = _src.size(), dsize = _dst.size(); + int cn = _src.channels(); + const AT* wtab = (const AT*)_wtab; + const T* S0 = (const T*)_src.data; + size_t sstep = _src.step/sizeof(S0[0]); + Scalar_ cval(saturate_cast(_borderValue[0]), + saturate_cast(_borderValue[1]), + saturate_cast(_borderValue[2]), + saturate_cast(_borderValue[3])); + int dx, dy; + CastOp castOp; + int borderType1 = borderType != BORDER_TRANSPARENT ? borderType : BORDER_REFLECT_101; + + unsigned width1 = std::max(ssize.width-3, 0), height1 = std::max(ssize.height-3, 0); + + if( _dst.isContinuous() && _xy.isContinuous() && _fxy.isContinuous() ) + { + dsize.width *= dsize.height; + dsize.height = 1; + } + + for( dy = 0; dy < dsize.height; dy++ ) + { + T* D = (T*)(_dst.data + _dst.step*dy); + const short* XY = (const short*)(_xy.data + _xy.step*dy); + const ushort* FXY = (const ushort*)(_fxy.data + _fxy.step*dy); + + for( dx = 0; dx < dsize.width; dx++, D += cn ) + { + int sx = XY[dx*2]-1, sy = XY[dx*2+1]-1; + const AT* w = wtab + FXY[dx]*16; + int i, k; + if( (unsigned)sx < width1 && (unsigned)sy < height1 ) + { + const T* S = S0 + sy*sstep + sx*cn; + for( k = 0; k < cn; k++ ) + { + WT sum = S[0]*w[0] + S[cn]*w[1] + S[cn*2]*w[2] + S[cn*3]*w[3]; + S += sstep; + sum += S[0]*w[4] + S[cn]*w[5] + S[cn*2]*w[6] + S[cn*3]*w[7]; + S += sstep; + sum += S[0]*w[8] + S[cn]*w[9] + S[cn*2]*w[10] + S[cn*3]*w[11]; + S += sstep; + sum += S[0]*w[12] + S[cn]*w[13] + S[cn*2]*w[14] + S[cn*3]*w[15]; + S += 1 - sstep*3; + D[k] = castOp(sum); + } + } + else + { + int x[4], y[4]; + if( borderType == BORDER_TRANSPARENT && + ((unsigned)(sx+1) >= (unsigned)ssize.width || + (unsigned)(sy+1) >= (unsigned)ssize.height) ) + continue; + + if( borderType1 == BORDER_CONSTANT && + (sx >= ssize.width || sx+4 <= 0 || + sy >= ssize.height || sy+4 <= 0)) + { + for( k = 0; k < cn; k++ ) + D[k] = cval[k]; + continue; + } + + for( i = 0; i < 4; i++ ) + { + x[i] = borderInterpolate(sx + i, ssize.width, borderType1)*cn; + y[i] = borderInterpolate(sy + i, ssize.height, borderType1); + } + + for( k = 0; k < cn; k++, S0++, w -= 16 ) + { + WT cv = cval[k], sum = cv*ONE; + for( i = 0; i < 4; i++, w += 4 ) + { + int yi = y[i]; + const T* S = S0 + yi*sstep; + if( yi < 0 ) + continue; + if( x[0] >= 0 ) + sum += (S[x[0]] - cv)*w[0]; + if( x[1] >= 0 ) + sum += (S[x[1]] - cv)*w[1]; + if( x[2] >= 0 ) + sum += (S[x[2]] - cv)*w[2]; + if( x[3] >= 0 ) + sum += (S[x[3]] - cv)*w[3]; + } + D[k] = castOp(sum); + } + S0 -= cn; + } + } + } +} + + +template +static void remapLanczos4( const Mat& _src, Mat& _dst, const Mat& _xy, + const Mat& _fxy, const void* _wtab, + int borderType, const Scalar& _borderValue ) +{ + typedef typename CastOp::rtype T; + typedef typename CastOp::type1 WT; + Size ssize = _src.size(), dsize = _dst.size(); + int cn = _src.channels(); + const AT* wtab = (const AT*)_wtab; + const T* S0 = (const T*)_src.data; + size_t sstep = _src.step/sizeof(S0[0]); + Scalar_ cval(saturate_cast(_borderValue[0]), + saturate_cast(_borderValue[1]), + saturate_cast(_borderValue[2]), + saturate_cast(_borderValue[3])); + int dx, dy; + CastOp castOp; + int borderType1 = borderType != BORDER_TRANSPARENT ? borderType : BORDER_REFLECT_101; + + unsigned width1 = std::max(ssize.width-7, 0), height1 = std::max(ssize.height-7, 0); + + if( _dst.isContinuous() && _xy.isContinuous() && _fxy.isContinuous() ) + { + dsize.width *= dsize.height; + dsize.height = 1; + } + + for( dy = 0; dy < dsize.height; dy++ ) + { + T* D = (T*)(_dst.data + _dst.step*dy); + const short* XY = (const short*)(_xy.data + _xy.step*dy); + const ushort* FXY = (const ushort*)(_fxy.data + _fxy.step*dy); + + for( dx = 0; dx < dsize.width; dx++, D += cn ) + { + int sx = XY[dx*2]-3, sy = XY[dx*2+1]-3; + const AT* w = wtab + FXY[dx]*64; + const T* S = S0 + sy*sstep + sx*cn; + int i, k; + if( (unsigned)sx < width1 && (unsigned)sy < height1 ) + { + for( k = 0; k < cn; k++ ) + { + WT sum = 0; + for( int r = 0; r < 8; r++, S += sstep, w += 8 ) + sum += S[0]*w[0] + S[cn]*w[1] + S[cn*2]*w[2] + S[cn*3]*w[3] + + S[cn*4]*w[4] + S[cn*5]*w[5] + S[cn*6]*w[6] + S[cn*7]*w[7]; + w -= 64; + S -= sstep*8 - 1; + D[k] = castOp(sum); + } + } + else + { + int x[8], y[8]; + if( borderType == BORDER_TRANSPARENT && + ((unsigned)(sx+3) >= (unsigned)ssize.width || + (unsigned)(sy+3) >= (unsigned)ssize.height) ) + continue; + + if( borderType1 == BORDER_CONSTANT && + (sx >= ssize.width || sx+8 <= 0 || + sy >= ssize.height || sy+8 <= 0)) + { + for( k = 0; k < cn; k++ ) + D[k] = cval[k]; + continue; + } + + for( i = 0; i < 8; i++ ) + { + x[i] = borderInterpolate(sx + i, ssize.width, borderType1)*cn; + y[i] = borderInterpolate(sy + i, ssize.height, borderType1); + } + + for( k = 0; k < cn; k++, S0++, w -= 64 ) + { + WT cv = cval[k], sum = cv*ONE; + for( i = 0; i < 8; i++, w += 8 ) + { + int yi = y[i]; + const T* S = S0 + yi*sstep; + if( yi < 0 ) + continue; + if( x[0] >= 0 ) + sum += (S[x[0]] - cv)*w[0]; + if( x[1] >= 0 ) + sum += (S[x[1]] - cv)*w[1]; + if( x[2] >= 0 ) + sum += (S[x[2]] - cv)*w[2]; + if( x[3] >= 0 ) + sum += (S[x[3]] - cv)*w[3]; + if( x[4] >= 0 ) + sum += (S[x[4]] - cv)*w[4]; + if( x[5] >= 0 ) + sum += (S[x[5]] - cv)*w[5]; + if( x[6] >= 0 ) + sum += (S[x[6]] - cv)*w[6]; + if( x[7] >= 0 ) + sum += (S[x[7]] - cv)*w[7]; + } + D[k] = castOp(sum); + } + S0 -= cn; + } + } + } +} + + +typedef void (*RemapNNFunc)(const Mat& _src, Mat& _dst, const Mat& _xy, + int borderType, const Scalar& _borderValue ); + +typedef void (*RemapFunc)(const Mat& _src, Mat& _dst, const Mat& _xy, + const Mat& _fxy, const void* _wtab, + int borderType, const Scalar& _borderValue); + +} + +void cv::remap( InputArray _src, OutputArray _dst, + InputArray _map1, InputArray _map2, + int interpolation, int borderType, const Scalar& borderValue ) +{ + static RemapNNFunc nn_tab[] = + { + remapNearest, remapNearest, remapNearest, remapNearest, + remapNearest, remapNearest, remapNearest, 0 + }; + + static RemapFunc linear_tab[] = + { + remapBilinear, RemapVec_8u, short>, 0, + remapBilinear, RemapNoVec, float>, + remapBilinear, RemapNoVec, float>, 0, + remapBilinear, RemapNoVec, float>, + remapBilinear, RemapNoVec, float>, 0 + }; + + static RemapFunc cubic_tab[] = + { + remapBicubic, short, INTER_REMAP_COEF_SCALE>, 0, + remapBicubic, float, 1>, + remapBicubic, float, 1>, 0, + remapBicubic, float, 1>, + remapBicubic, float, 1>, 0 + }; + + static RemapFunc lanczos4_tab[] = + { + remapLanczos4, short, INTER_REMAP_COEF_SCALE>, 0, + remapLanczos4, float, 1>, + remapLanczos4, float, 1>, 0, + remapLanczos4, float, 1>, + remapLanczos4, float, 1>, 0 + }; + + Mat src = _src.getMat(), map1 = _map1.getMat(), map2 = _map2.getMat(); + + CV_Assert( (!map2.data || map2.size() == map1.size())); + + _dst.create( map1.size(), src.type() ); + Mat dst = _dst.getMat(); + CV_Assert(dst.data != src.data); + + int depth = src.depth(), map_depth = map1.depth(); + RemapNNFunc nnfunc = 0; + RemapFunc ifunc = 0; + const void* ctab = 0; + bool fixpt = depth == CV_8U; + bool planar_input = false; + + if( interpolation == INTER_NEAREST ) + { + nnfunc = nn_tab[depth]; + CV_Assert( nnfunc != 0 ); + + if( map1.type() == CV_16SC2 && !map2.data ) // the data is already in the right format + { + nnfunc( src, dst, map1, borderType, borderValue ); + return; + } + } + else + { + if( interpolation == INTER_AREA ) + interpolation = INTER_LINEAR; + + if( interpolation == INTER_LINEAR ) + ifunc = linear_tab[depth]; + else if( interpolation == INTER_CUBIC ) + ifunc = cubic_tab[depth]; + else if( interpolation == INTER_LANCZOS4 ) + ifunc = lanczos4_tab[depth]; + else + CV_Error( CV_StsBadArg, "Unknown interpolation method" ); + CV_Assert( ifunc != 0 ); + ctab = initInterTab2D( interpolation, fixpt ); + } + + const Mat *m1 = &map1, *m2 = &map2; + + if( (map1.type() == CV_16SC2 && (map2.type() == CV_16UC1 || map2.type() == CV_16SC1)) || + (map2.type() == CV_16SC2 && (map1.type() == CV_16UC1 || map1.type() == CV_16SC1)) ) + { + if( map1.type() != CV_16SC2 ) + std::swap(m1, m2); + if( ifunc ) + { + ifunc( src, dst, *m1, *m2, ctab, borderType, borderValue ); + return; + } + } + else + { + CV_Assert( (map1.type() == CV_32FC2 && !map2.data) || + (map1.type() == CV_32FC1 && map2.type() == CV_32FC1) ); + planar_input = map1.channels() == 1; + } + + int x, y, x1, y1; + const int buf_size = 1 << 14; + int brows0 = std::min(128, dst.rows); + int bcols0 = std::min(buf_size/brows0, dst.cols); + brows0 = std::min(buf_size/bcols0, dst.rows); +#if CV_SSE2 + bool useSIMD = checkHardwareSupport(CV_CPU_SSE2); +#endif + + Mat _bufxy(brows0, bcols0, CV_16SC2), _bufa; + if( !nnfunc ) + _bufa.create(brows0, bcols0, CV_16UC1); + + for( y = 0; y < dst.rows; y += brows0 ) + { + for( x = 0; x < dst.cols; x += bcols0 ) + { + int brows = std::min(brows0, dst.rows - y); + int bcols = std::min(bcols0, dst.cols - x); + Mat dpart(dst, Rect(x, y, bcols, brows)); + Mat bufxy(_bufxy, Rect(0, 0, bcols, brows)); + + if( nnfunc ) + { + if( map_depth != CV_32F ) + { + for( y1 = 0; y1 < brows; y1++ ) + { + short* XY = (short*)(bufxy.data + bufxy.step*y1); + const short* sXY = (const short*)(m1->data + m1->step*(y+y1)) + x*2; + const ushort* sA = (const ushort*)(m2->data + m2->step*(y+y1)) + x; + + for( x1 = 0; x1 < bcols; x1++ ) + { + int a = sA[x1] & (INTER_TAB_SIZE2-1); + XY[x1*2] = sXY[x1*2] + NNDeltaTab_i[a][0]; + XY[x1*2+1] = sXY[x1*2+1] + NNDeltaTab_i[a][1]; + } + } + } + else if( !planar_input ) + map1(Rect(0,0,bcols,brows)).convertTo(bufxy, bufxy.depth()); + else + { + for( y1 = 0; y1 < brows; y1++ ) + { + short* XY = (short*)(bufxy.data + bufxy.step*y1); + const float* sX = (const float*)(map1.data + map1.step*(y+y1)) + x; + const float* sY = (const float*)(map2.data + map2.step*(y+y1)) + x; + x1 = 0; + + #if CV_SSE2 + if( useSIMD ) + { + for( ; x1 <= bcols - 8; x1 += 8 ) + { + __m128 fx0 = _mm_loadu_ps(sX + x1); + __m128 fx1 = _mm_loadu_ps(sX + x1 + 4); + __m128 fy0 = _mm_loadu_ps(sY + x1); + __m128 fy1 = _mm_loadu_ps(sY + x1 + 4); + __m128i ix0 = _mm_cvtps_epi32(fx0); + __m128i ix1 = _mm_cvtps_epi32(fx1); + __m128i iy0 = _mm_cvtps_epi32(fy0); + __m128i iy1 = _mm_cvtps_epi32(fy1); + ix0 = _mm_packs_epi32(ix0, ix1); + iy0 = _mm_packs_epi32(iy0, iy1); + ix1 = _mm_unpacklo_epi16(ix0, iy0); + iy1 = _mm_unpackhi_epi16(ix0, iy0); + _mm_storeu_si128((__m128i*)(XY + x1*2), ix1); + _mm_storeu_si128((__m128i*)(XY + x1*2 + 8), iy1); + } + } + #endif + + for( ; x1 < bcols; x1++ ) + { + XY[x1*2] = saturate_cast(sX[x1]); + XY[x1*2+1] = saturate_cast(sY[x1]); + } + } + } + nnfunc( src, dpart, bufxy, borderType, borderValue ); + continue; + } + + Mat bufa(_bufa, Rect(0,0,bcols, brows)); + for( y1 = 0; y1 < brows; y1++ ) + { + short* XY = (short*)(bufxy.data + bufxy.step*y1); + ushort* A = (ushort*)(bufa.data + bufa.step*y1); + + if( planar_input ) + { + const float* sX = (const float*)(map1.data + map1.step*(y+y1)) + x; + const float* sY = (const float*)(map2.data + map2.step*(y+y1)) + x; + + x1 = 0; + #if CV_SSE2 + if( useSIMD ) + { + __m128 scale = _mm_set1_ps((float)INTER_TAB_SIZE); + __m128i mask = _mm_set1_epi32(INTER_TAB_SIZE-1); + for( ; x1 <= bcols - 8; x1 += 8 ) + { + __m128 fx0 = _mm_loadu_ps(sX + x1); + __m128 fx1 = _mm_loadu_ps(sX + x1 + 4); + __m128 fy0 = _mm_loadu_ps(sY + x1); + __m128 fy1 = _mm_loadu_ps(sY + x1 + 4); + __m128i ix0 = _mm_cvtps_epi32(_mm_mul_ps(fx0, scale)); + __m128i ix1 = _mm_cvtps_epi32(_mm_mul_ps(fx1, scale)); + __m128i iy0 = _mm_cvtps_epi32(_mm_mul_ps(fy0, scale)); + __m128i iy1 = _mm_cvtps_epi32(_mm_mul_ps(fy1, scale)); + __m128i mx0 = _mm_and_si128(ix0, mask); + __m128i mx1 = _mm_and_si128(ix1, mask); + __m128i my0 = _mm_and_si128(iy0, mask); + __m128i my1 = _mm_and_si128(iy1, mask); + mx0 = _mm_packs_epi32(mx0, mx1); + my0 = _mm_packs_epi32(my0, my1); + my0 = _mm_slli_epi16(my0, INTER_BITS); + mx0 = _mm_or_si128(mx0, my0); + _mm_storeu_si128((__m128i*)(A + x1), mx0); + ix0 = _mm_srai_epi32(ix0, INTER_BITS); + ix1 = _mm_srai_epi32(ix1, INTER_BITS); + iy0 = _mm_srai_epi32(iy0, INTER_BITS); + iy1 = _mm_srai_epi32(iy1, INTER_BITS); + ix0 = _mm_packs_epi32(ix0, ix1); + iy0 = _mm_packs_epi32(iy0, iy1); + ix1 = _mm_unpacklo_epi16(ix0, iy0); + iy1 = _mm_unpackhi_epi16(ix0, iy0); + _mm_storeu_si128((__m128i*)(XY + x1*2), ix1); + _mm_storeu_si128((__m128i*)(XY + x1*2 + 8), iy1); + } + } + #endif + + for( ; x1 < bcols; x1++ ) + { + int sx = cvRound(sX[x1]*INTER_TAB_SIZE); + int sy = cvRound(sY[x1]*INTER_TAB_SIZE); + int v = (sy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (sx & (INTER_TAB_SIZE-1)); + XY[x1*2] = (short)(sx >> INTER_BITS); + XY[x1*2+1] = (short)(sy >> INTER_BITS); + A[x1] = (ushort)v; + } + } + else + { + const float* sXY = (const float*)(map1.data + map1.step*(y+y1)) + x*2; + + for( x1 = 0; x1 < bcols; x1++ ) + { + int sx = cvRound(sXY[x1*2]*INTER_TAB_SIZE); + int sy = cvRound(sXY[x1*2+1]*INTER_TAB_SIZE); + int v = (sy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (sx & (INTER_TAB_SIZE-1)); + XY[x1*2] = (short)(sx >> INTER_BITS); + XY[x1*2+1] = (short)(sy >> INTER_BITS); + A[x1] = (ushort)v; + } + } + } + ifunc(src, dpart, bufxy, bufa, ctab, borderType, borderValue); + } + } +} + + +void cv::convertMaps( InputArray _map1, InputArray _map2, + OutputArray _dstmap1, OutputArray _dstmap2, + int dstm1type, bool nninterpolate ) +{ + Mat map1 = _map1.getMat(), map2 = _map2.getMat(), dstmap1, dstmap2; + Size size = map1.size(); + const Mat *m1 = &map1, *m2 = &map2; + int m1type = m1->type(), m2type = m2->type(); + + CV_Assert( (m1type == CV_16SC2 && (nninterpolate || m2type == CV_16UC1 || m2type == CV_16SC1)) || + (m2type == CV_16SC2 && (nninterpolate || m1type == CV_16UC1 || m1type == CV_16SC1)) || + (m1type == CV_32FC1 && m2type == CV_32FC1) || + (m1type == CV_32FC2 && !m2->data) ); + + if( m2type == CV_16SC2 ) + { + std::swap( m1, m2 ); + std::swap( m1type, m2type ); + } + + if( dstm1type <= 0 ) + dstm1type = m1type == CV_16SC2 ? CV_32FC2 : CV_16SC2; + CV_Assert( dstm1type == CV_16SC2 || dstm1type == CV_32FC1 || dstm1type == CV_32FC2 ); + _dstmap1.create( size, dstm1type ); + dstmap1 = _dstmap1.getMat(); + + if( !nninterpolate && dstm1type != CV_32FC2 ) + { + _dstmap2.create( size, dstm1type == CV_16SC2 ? CV_16UC1 : CV_32FC1 ); + dstmap2 = _dstmap2.getMat(); + } + else + _dstmap2.release(); + + if( m1type == dstm1type || (nninterpolate && + ((m1type == CV_16SC2 && dstm1type == CV_32FC2) || + (m1type == CV_32FC2 && dstm1type == CV_16SC2))) ) + { + m1->convertTo( dstmap1, dstmap1.type() ); + if( dstmap2.data && dstmap2.type() == m2->type() ) + m2->copyTo( dstmap2 ); + return; + } + + if( m1type == CV_32FC1 && dstm1type == CV_32FC2 ) + { + Mat vdata[] = { *m1, *m2 }; + merge( vdata, 2, dstmap1 ); + return; + } + + if( m1type == CV_32FC2 && dstm1type == CV_32FC1 ) + { + Mat mv[] = { dstmap1, dstmap2 }; + split( *m1, mv ); + return; + } + + if( m1->isContinuous() && (!m2->data || m2->isContinuous()) && + dstmap1.isContinuous() && (!dstmap2.data || dstmap2.isContinuous()) ) + { + size.width *= size.height; + size.height = 1; + } + + const float scale = 1.f/INTER_TAB_SIZE; + int x, y; + for( y = 0; y < size.height; y++ ) + { + const float* src1f = (const float*)(m1->data + m1->step*y); + const float* src2f = (const float*)(m2->data + m2->step*y); + const short* src1 = (const short*)src1f; + const ushort* src2 = (const ushort*)src2f; + + float* dst1f = (float*)(dstmap1.data + dstmap1.step*y); + float* dst2f = (float*)(dstmap2.data + dstmap2.step*y); + short* dst1 = (short*)dst1f; + ushort* dst2 = (ushort*)dst2f; + + if( m1type == CV_32FC1 && dstm1type == CV_16SC2 ) + { + if( nninterpolate ) + for( x = 0; x < size.width; x++ ) + { + dst1[x*2] = saturate_cast(src1f[x]); + dst1[x*2+1] = saturate_cast(src2f[x]); + } + else + for( x = 0; x < size.width; x++ ) + { + int ix = saturate_cast(src1f[x]*INTER_TAB_SIZE); + int iy = saturate_cast(src2f[x]*INTER_TAB_SIZE); + dst1[x*2] = (short)(ix >> INTER_BITS); + dst1[x*2+1] = (short)(iy >> INTER_BITS); + dst2[x] = (ushort)((iy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (ix & (INTER_TAB_SIZE-1))); + } + } + else if( m1type == CV_32FC2 && dstm1type == CV_16SC2 ) + { + if( nninterpolate ) + for( x = 0; x < size.width; x++ ) + { + dst1[x*2] = saturate_cast(src1f[x*2]); + dst1[x*2+1] = saturate_cast(src1f[x*2+1]); + } + else + for( x = 0; x < size.width; x++ ) + { + int ix = saturate_cast(src1f[x*2]*INTER_TAB_SIZE); + int iy = saturate_cast(src1f[x*2+1]*INTER_TAB_SIZE); + dst1[x*2] = (short)(ix >> INTER_BITS); + dst1[x*2+1] = (short)(iy >> INTER_BITS); + dst2[x] = (ushort)((iy & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (ix & (INTER_TAB_SIZE-1))); + } + } + else if( m1type == CV_16SC2 && dstm1type == CV_32FC1 ) + { + for( x = 0; x < size.width; x++ ) + { + int fxy = src2 ? src2[x] : 0; + dst1f[x] = src1[x*2] + (fxy & (INTER_TAB_SIZE-1))*scale; + dst2f[x] = src1[x*2+1] + (fxy >> INTER_BITS)*scale; + } + } + else if( m1type == CV_16SC2 && dstm1type == CV_32FC2 ) + { + for( x = 0; x < size.width; x++ ) + { + int fxy = src2 ? src2[x] : 0; + dst1f[x*2] = src1[x*2] + (fxy & (INTER_TAB_SIZE-1))*scale; + dst1f[x*2+1] = src1[x*2+1] + (fxy >> INTER_BITS)*scale; + } + } + else + CV_Error( CV_StsNotImplemented, "Unsupported combination of input/output matrices" ); + } +} + + +void cv::warpAffine( InputArray _src, OutputArray _dst, + InputArray _M0, Size dsize, + int flags, int borderType, const Scalar& borderValue ) +{ + Mat src = _src.getMat(), M0 = _M0.getMat(); + _dst.create( dsize.area() == 0 ? src.size() : dsize, src.type() ); + Mat dst = _dst.getMat(); + CV_Assert( dst.data != src.data && src.cols > 0 && src.rows > 0 ); + + const int BLOCK_SZ = 64; + short XY[BLOCK_SZ*BLOCK_SZ*2], A[BLOCK_SZ*BLOCK_SZ]; + double M[6]; + Mat matM(2, 3, CV_64F, M); + int interpolation = flags & INTER_MAX; + if( interpolation == INTER_AREA ) + interpolation = INTER_LINEAR; + + CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 2 && M0.cols == 3 ); + M0.convertTo(matM, matM.type()); + + if( !(flags & WARP_INVERSE_MAP) ) + { + double D = M[0]*M[4] - M[1]*M[3]; + D = D != 0 ? 1./D : 0; + double A11 = M[4]*D, A22=M[0]*D; + M[0] = A11; M[1] *= -D; + M[3] *= -D; M[4] = A22; + double b1 = -M[0]*M[2] - M[1]*M[5]; + double b2 = -M[3]*M[2] - M[4]*M[5]; + M[2] = b1; M[5] = b2; + } + + int x, y, x1, y1, width = dst.cols, height = dst.rows; + AutoBuffer _abdelta(width*2); + int* adelta = &_abdelta[0], *bdelta = adelta + width; + const int AB_BITS = MAX(10, (int)INTER_BITS); + const int AB_SCALE = 1 << AB_BITS; + int round_delta = interpolation == INTER_NEAREST ? AB_SCALE/2 : AB_SCALE/INTER_TAB_SIZE/2; +#if CV_SSE2 + bool useSIMD = checkHardwareSupport(CV_CPU_SSE2); +#endif + + for( x = 0; x < width; x++ ) + { + adelta[x] = saturate_cast(M[0]*x*AB_SCALE); + bdelta[x] = saturate_cast(M[3]*x*AB_SCALE); + } + + int bh0 = std::min(BLOCK_SZ/2, height); + int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, width); + bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, height); + + for( y = 0; y < height; y += bh0 ) + { + for( x = 0; x < width; x += bw0 ) + { + int bw = std::min( bw0, width - x); + int bh = std::min( bh0, height - y); + + Mat _XY(bh, bw, CV_16SC2, XY), matA; + Mat dpart(dst, Rect(x, y, bw, bh)); + + for( y1 = 0; y1 < bh; y1++ ) + { + short* xy = XY + y1*bw*2; + int X0 = saturate_cast((M[1]*(y + y1) + M[2])*AB_SCALE) + round_delta; + int Y0 = saturate_cast((M[4]*(y + y1) + M[5])*AB_SCALE) + round_delta; + + if( interpolation == INTER_NEAREST ) + for( x1 = 0; x1 < bw; x1++ ) + { + int X = (X0 + adelta[x+x1]) >> AB_BITS; + int Y = (Y0 + bdelta[x+x1]) >> AB_BITS; + xy[x1*2] = saturate_cast(X); + xy[x1*2+1] = saturate_cast(Y); + } + else + { + short* alpha = A + y1*bw; + x1 = 0; + #if CV_SSE2 + if( useSIMD ) + { + __m128i fxy_mask = _mm_set1_epi32(INTER_TAB_SIZE - 1); + __m128i XX = _mm_set1_epi32(X0), YY = _mm_set1_epi32(Y0); + for( ; x1 <= bw - 8; x1 += 8 ) + { + __m128i tx0, tx1, ty0, ty1; + tx0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(adelta + x + x1)), XX); + ty0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(bdelta + x + x1)), YY); + tx1 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(adelta + x + x1 + 4)), XX); + ty1 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(bdelta + x + x1 + 4)), YY); + + tx0 = _mm_srai_epi32(tx0, AB_BITS - INTER_BITS); + ty0 = _mm_srai_epi32(ty0, AB_BITS - INTER_BITS); + tx1 = _mm_srai_epi32(tx1, AB_BITS - INTER_BITS); + ty1 = _mm_srai_epi32(ty1, AB_BITS - INTER_BITS); + + __m128i fx_ = _mm_packs_epi32(_mm_and_si128(tx0, fxy_mask), + _mm_and_si128(tx1, fxy_mask)); + __m128i fy_ = _mm_packs_epi32(_mm_and_si128(ty0, fxy_mask), + _mm_and_si128(ty1, fxy_mask)); + tx0 = _mm_packs_epi32(_mm_srai_epi32(tx0, INTER_BITS), + _mm_srai_epi32(tx1, INTER_BITS)); + ty0 = _mm_packs_epi32(_mm_srai_epi32(ty0, INTER_BITS), + _mm_srai_epi32(ty1, INTER_BITS)); + fx_ = _mm_adds_epi16(fx_, _mm_slli_epi16(fy_, INTER_BITS)); + + _mm_storeu_si128((__m128i*)(xy + x1*2), _mm_unpacklo_epi16(tx0, ty0)); + _mm_storeu_si128((__m128i*)(xy + x1*2 + 8), _mm_unpackhi_epi16(tx0, ty0)); + _mm_storeu_si128((__m128i*)(alpha + x1), fx_); + } + } + #endif + for( ; x1 < bw; x1++ ) + { + int X = (X0 + adelta[x+x1]) >> (AB_BITS - INTER_BITS); + int Y = (Y0 + bdelta[x+x1]) >> (AB_BITS - INTER_BITS); + xy[x1*2] = saturate_cast(X >> INTER_BITS); + xy[x1*2+1] = saturate_cast(Y >> INTER_BITS); + alpha[x1] = (short)((Y & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + + (X & (INTER_TAB_SIZE-1))); + } + } + } + + if( interpolation == INTER_NEAREST ) + remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue ); + else + { + Mat matA(bh, bw, CV_16U, A); + remap( src, dpart, _XY, matA, interpolation, borderType, borderValue ); + } + } + } +} + + +void cv::warpPerspective( InputArray _src, OutputArray _dst, InputArray _M0, + Size dsize, int flags, int borderType, const Scalar& borderValue ) +{ + Mat src = _src.getMat(), M0 = _M0.getMat(); + _dst.create( dsize.area() == 0 ? src.size() : dsize, src.type() ); + Mat dst = _dst.getMat(); + + CV_Assert( dst.data != src.data && src.cols > 0 && src.rows > 0 ); + + const int BLOCK_SZ = 32; + short XY[BLOCK_SZ*BLOCK_SZ*2], A[BLOCK_SZ*BLOCK_SZ]; + double M[9]; + Mat matM(3, 3, CV_64F, M); + int interpolation = flags & INTER_MAX; + if( interpolation == INTER_AREA ) + interpolation = INTER_LINEAR; + + CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 3 && M0.cols == 3 ); + M0.convertTo(matM, matM.type()); + + if( !(flags & WARP_INVERSE_MAP) ) + invert(matM, matM); + + int x, y, x1, y1, width = dst.cols, height = dst.rows; + + int bh0 = std::min(BLOCK_SZ/2, height); + int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, width); + bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, height); + + for( y = 0; y < height; y += bh0 ) + { + for( x = 0; x < width; x += bw0 ) + { + int bw = std::min( bw0, width - x); + int bh = std::min( bh0, height - y); + + Mat _XY(bh, bw, CV_16SC2, XY), matA; + Mat dpart(dst, Rect(x, y, bw, bh)); + + for( y1 = 0; y1 < bh; y1++ ) + { + short* xy = XY + y1*bw*2; + double X0 = M[0]*x + M[1]*(y + y1) + M[2]; + double Y0 = M[3]*x + M[4]*(y + y1) + M[5]; + double W0 = M[6]*x + M[7]*(y + y1) + M[8]; + + if( interpolation == INTER_NEAREST ) + for( x1 = 0; x1 < bw; x1++ ) + { + double W = W0 + M[6]*x1; + W = W ? 1./W : 0; + double fX = std::max((double)INT_MIN, std::min((double)INT_MAX, (X0 + M[0]*x1)*W)); + double fY = std::max((double)INT_MIN, std::min((double)INT_MAX, (Y0 + M[3]*x1)*W)); + int X = saturate_cast(fX); + int Y = saturate_cast(fY); + + xy[x1*2] = saturate_cast(X); + xy[x1*2+1] = saturate_cast(Y); + } + else + { + short* alpha = A + y1*bw; + for( x1 = 0; x1 < bw; x1++ ) + { + double W = W0 + M[6]*x1; + W = W ? INTER_TAB_SIZE/W : 0; + double fX = std::max((double)INT_MIN, std::min((double)INT_MAX, (X0 + M[0]*x1)*W)); + double fY = std::max((double)INT_MIN, std::min((double)INT_MAX, (Y0 + M[3]*x1)*W)); + int X = saturate_cast(fX); + int Y = saturate_cast(fY); + + xy[x1*2] = saturate_cast(X >> INTER_BITS); + xy[x1*2+1] = saturate_cast(Y >> INTER_BITS); + alpha[x1] = (short)((Y & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + + (X & (INTER_TAB_SIZE-1))); + } + } + } + + if( interpolation == INTER_NEAREST ) + remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue ); + else + { + Mat matA(bh, bw, CV_16U, A); + remap( src, dpart, _XY, matA, interpolation, borderType, borderValue ); + } + } + } +} + + +cv::Mat cv::getRotationMatrix2D( Point2f center, double angle, double scale ) +{ + angle *= CV_PI/180; + double alpha = cos(angle)*scale; + double beta = sin(angle)*scale; + + Mat M(2, 3, CV_64F); + double* m = (double*)M.data; + + m[0] = alpha; + m[1] = beta; + m[2] = (1-alpha)*center.x - beta*center.y; + m[3] = -beta; + m[4] = alpha; + m[5] = beta*center.x + (1-alpha)*center.y; + + return M; +} + +/* Calculates coefficients of perspective transformation + * which maps (xi,yi) to (ui,vi), (i=1,2,3,4): + * + * c00*xi + c01*yi + c02 + * ui = --------------------- + * c20*xi + c21*yi + c22 + * + * c10*xi + c11*yi + c12 + * vi = --------------------- + * c20*xi + c21*yi + c22 + * + * Coefficients are calculated by solving linear system: + * / x0 y0 1 0 0 0 -x0*u0 -y0*u0 \ /c00\ /u0\ + * | x1 y1 1 0 0 0 -x1*u1 -y1*u1 | |c01| |u1| + * | x2 y2 1 0 0 0 -x2*u2 -y2*u2 | |c02| |u2| + * | x3 y3 1 0 0 0 -x3*u3 -y3*u3 |.|c10|=|u3|, + * | 0 0 0 x0 y0 1 -x0*v0 -y0*v0 | |c11| |v0| + * | 0 0 0 x1 y1 1 -x1*v1 -y1*v1 | |c12| |v1| + * | 0 0 0 x2 y2 1 -x2*v2 -y2*v2 | |c20| |v2| + * \ 0 0 0 x3 y3 1 -x3*v3 -y3*v3 / \c21/ \v3/ + * + * where: + * cij - matrix coefficients, c22 = 1 + */ +cv::Mat cv::getPerspectiveTransform( const Point2f src[], const Point2f dst[] ) +{ + Mat M(3, 3, CV_64F), X(8, 1, CV_64F, M.data); + double a[8][8], b[8]; + Mat A(8, 8, CV_64F, a), B(8, 1, CV_64F, b); + + for( int i = 0; i < 4; ++i ) + { + a[i][0] = a[i+4][3] = src[i].x; + a[i][1] = a[i+4][4] = src[i].y; + a[i][2] = a[i+4][5] = 1; + a[i][3] = a[i][4] = a[i][5] = + a[i+4][0] = a[i+4][1] = a[i+4][2] = 0; + a[i][6] = -src[i].x*dst[i].x; + a[i][7] = -src[i].y*dst[i].x; + a[i+4][6] = -src[i].x*dst[i].y; + a[i+4][7] = -src[i].y*dst[i].y; + b[i] = dst[i].x; + b[i+4] = dst[i].y; + } + + solve( A, B, X, DECOMP_SVD ); + ((double*)M.data)[8] = 1.; + + return M; +} + +/* Calculates coefficients of affine transformation + * which maps (xi,yi) to (ui,vi), (i=1,2,3): + * + * ui = c00*xi + c01*yi + c02 + * + * vi = c10*xi + c11*yi + c12 + * + * Coefficients are calculated by solving linear system: + * / x0 y0 1 0 0 0 \ /c00\ /u0\ + * | x1 y1 1 0 0 0 | |c01| |u1| + * | x2 y2 1 0 0 0 | |c02| |u2| + * | 0 0 0 x0 y0 1 | |c10| |v0| + * | 0 0 0 x1 y1 1 | |c11| |v1| + * \ 0 0 0 x2 y2 1 / |c12| |v2| + * + * where: + * cij - matrix coefficients + */ +cv::Mat cv::getAffineTransform( const Point2f src[], const Point2f dst[] ) +{ + Mat M(2, 3, CV_64F), X(6, 1, CV_64F, M.data); + double a[6*6], b[6]; + Mat A(6, 6, CV_64F, a), B(6, 1, CV_64F, b); + + for( int i = 0; i < 3; i++ ) + { + int j = i*12; + int k = i*12+6; + a[j] = a[k+3] = src[i].x; + a[j+1] = a[k+4] = src[i].y; + a[j+2] = a[k+5] = 1; + a[j+3] = a[j+4] = a[j+5] = 0; + a[k] = a[k+1] = a[k+2] = 0; + b[i*2] = dst[i].x; + b[i*2+1] = dst[i].y; + } + + solve( A, B, X ); + return M; +} + +void cv::invertAffineTransform(InputArray _matM, OutputArray __iM) +{ + Mat matM = _matM.getMat(); + CV_Assert(matM.rows == 2 && matM.cols == 3); + __iM.create(2, 3, matM.type()); + Mat _iM = __iM.getMat(); + + if( matM.type() == CV_32F ) + { + const float* M = (const float*)matM.data; + float* iM = (float*)_iM.data; + int step = (int)(matM.step/sizeof(M[0])), istep = (int)(_iM.step/sizeof(iM[0])); + + double D = M[0]*M[step+1] - M[1]*M[step]; + D = D != 0 ? 1./D : 0; + double A11 = M[step+1]*D, A22 = M[0]*D, A12 = -M[1]*D, A21 = -M[step]*D; + double b1 = -A11*M[2] - A12*M[step+2]; + double b2 = -A21*M[2] - A22*M[step+2]; + + iM[0] = (float)A11; iM[1] = (float)A12; iM[2] = (float)b1; + iM[istep] = (float)A21; iM[istep+1] = (float)A22; iM[istep+2] = (float)b2; + } + else if( matM.type() == CV_64F ) + { + const double* M = (const double*)matM.data; + double* iM = (double*)_iM.data; + int step = (int)(matM.step/sizeof(M[0])), istep = (int)(_iM.step/sizeof(iM[0])); + + double D = M[0]*M[step+1] - M[1]*M[step]; + D = D != 0 ? 1./D : 0; + double A11 = M[step+1]*D, A22 = M[0]*D, A12 = -M[1]*D, A21 = -M[step]*D; + double b1 = -A11*M[2] - A12*M[step+2]; + double b2 = -A21*M[2] - A22*M[step+2]; + + iM[0] = A11; iM[1] = A12; iM[2] = b1; + iM[istep] = A21; iM[istep+1] = A22; iM[istep+2] = b2; + } + else + CV_Error( CV_StsUnsupportedFormat, "" ); +} + + +cv::Mat cv::getPerspectiveTransform(InputArray _src, InputArray _dst) +{ + Mat src = _src.getMat(), dst = _dst.getMat(); + CV_Assert(src.checkVector(2, CV_32F) == 4 && dst.checkVector(2, CV_32F) == 4); + return getPerspectiveTransform((const Point2f*)src.data, (const Point2f*)dst.data); +} + +cv::Mat cv::getAffineTransform(InputArray _src, InputArray _dst) +{ + Mat src = _src.getMat(), dst = _dst.getMat(); + CV_Assert(src.checkVector(2, CV_32F) == 3 && dst.checkVector(2, CV_32F) == 3); + return getAffineTransform((const Point2f*)src.data, (const Point2f*)dst.data); +} + + +CV_IMPL void +cvResize( const CvArr* srcarr, CvArr* dstarr, int method ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + CV_Assert( src.type() == dst.type() ); + cv::resize( src, dst, dst.size(), (double)dst.cols/src.cols, + (double)dst.rows/src.rows, method ); +} + + +CV_IMPL void +cvWarpAffine( const CvArr* srcarr, CvArr* dstarr, const CvMat* marr, + int flags, CvScalar fillval ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + cv::Mat matrix = cv::cvarrToMat(marr); + CV_Assert( src.type() == dst.type() ); + cv::warpAffine( src, dst, matrix, dst.size(), flags, + (flags & CV_WARP_FILL_OUTLIERS) ? cv::BORDER_CONSTANT : cv::BORDER_TRANSPARENT, + fillval ); +} + +CV_IMPL void +cvWarpPerspective( const CvArr* srcarr, CvArr* dstarr, const CvMat* marr, + int flags, CvScalar fillval ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + cv::Mat matrix = cv::cvarrToMat(marr); + CV_Assert( src.type() == dst.type() ); + cv::warpPerspective( src, dst, matrix, dst.size(), flags, + (flags & CV_WARP_FILL_OUTLIERS) ? cv::BORDER_CONSTANT : cv::BORDER_TRANSPARENT, + fillval ); +} + +CV_IMPL void +cvRemap( const CvArr* srcarr, CvArr* dstarr, + const CvArr* _mapx, const CvArr* _mapy, + int flags, CvScalar fillval ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), dst0 = dst; + cv::Mat mapx = cv::cvarrToMat(_mapx), mapy = cv::cvarrToMat(_mapy); + CV_Assert( src.type() == dst.type() && dst.size() == mapx.size() ); + cv::remap( src, dst, mapx, mapy, flags & cv::INTER_MAX, + (flags & CV_WARP_FILL_OUTLIERS) ? cv::BORDER_CONSTANT : cv::BORDER_TRANSPARENT, + fillval ); + CV_Assert( dst0.data == dst.data ); +} + + +CV_IMPL CvMat* +cv2DRotationMatrix( CvPoint2D32f center, double angle, + double scale, CvMat* matrix ) +{ + cv::Mat M0 = cv::cvarrToMat(matrix), M = cv::getRotationMatrix2D(center, angle, scale); + CV_Assert( M.size() == M.size() ); + M.convertTo(M0, M0.type()); + return matrix; +} + + +CV_IMPL CvMat* +cvGetPerspectiveTransform( const CvPoint2D32f* src, + const CvPoint2D32f* dst, + CvMat* matrix ) +{ + cv::Mat M0 = cv::cvarrToMat(matrix), + M = cv::getPerspectiveTransform((const cv::Point2f*)src, (const cv::Point2f*)dst); + CV_Assert( M.size() == M.size() ); + M.convertTo(M0, M0.type()); + return matrix; +} + + +CV_IMPL CvMat* +cvGetAffineTransform( const CvPoint2D32f* src, + const CvPoint2D32f* dst, + CvMat* matrix ) +{ + cv::Mat M0 = cv::cvarrToMat(matrix), + M = cv::getAffineTransform((const cv::Point2f*)src, (const cv::Point2f*)dst); + CV_Assert( M.size() == M0.size() ); + M.convertTo(M0, M0.type()); + return matrix; +} + + +CV_IMPL void +cvConvertMaps( const CvArr* arr1, const CvArr* arr2, CvArr* dstarr1, CvArr* dstarr2 ) +{ + cv::Mat map1 = cv::cvarrToMat(arr1), map2; + cv::Mat dstmap1 = cv::cvarrToMat(dstarr1), dstmap2; + + if( arr2 ) + map2 = cv::cvarrToMat(arr2); + if( dstarr2 ) + { + dstmap2 = cv::cvarrToMat(dstarr2); + if( dstmap2.type() == CV_16SC1 ) + dstmap2 = cv::Mat(dstmap2.size(), CV_16UC1, dstmap2.data, dstmap2.step); + } + + cv::convertMaps( map1, map2, dstmap1, dstmap2, dstmap1.type(), false ); +} + +/****************************************************************************************\ +* Log-Polar Transform * +\****************************************************************************************/ + +/* now it is done via Remap; more correct implementation should use + some super-sampling technique outside of the "fovea" circle */ +CV_IMPL void +cvLogPolar( const CvArr* srcarr, CvArr* dstarr, + CvPoint2D32f center, double M, int flags ) +{ + cv::Ptr mapx, mapy; + + CvMat srcstub, *src = cvGetMat(srcarr, &srcstub); + CvMat dststub, *dst = cvGetMat(dstarr, &dststub); + CvSize ssize, dsize; + + if( !CV_ARE_TYPES_EQ( src, dst )) + CV_Error( CV_StsUnmatchedFormats, "" ); + + if( M <= 0 ) + CV_Error( CV_StsOutOfRange, "M should be >0" ); + + ssize = cvGetMatSize(src); + dsize = cvGetMatSize(dst); + + mapx = cvCreateMat( dsize.height, dsize.width, CV_32F ); + mapy = cvCreateMat( dsize.height, dsize.width, CV_32F ); + + if( !(flags & CV_WARP_INVERSE_MAP) ) + { + int phi, rho; + cv::AutoBuffer _exp_tab(dsize.width); + double* exp_tab = _exp_tab; + + for( rho = 0; rho < dst->width; rho++ ) + exp_tab[rho] = std::exp(rho/M); + + for( phi = 0; phi < dsize.height; phi++ ) + { + double cp = cos(phi*2*CV_PI/dsize.height); + double sp = sin(phi*2*CV_PI/dsize.height); + float* mx = (float*)(mapx->data.ptr + phi*mapx->step); + float* my = (float*)(mapy->data.ptr + phi*mapy->step); + + for( rho = 0; rho < dsize.width; rho++ ) + { + double r = exp_tab[rho]; + double x = r*cp + center.x; + double y = r*sp + center.y; + + mx[rho] = (float)x; + my[rho] = (float)y; + } + } + } + else + { + int x, y; + CvMat bufx, bufy, bufp, bufa; + double ascale = ssize.height/(2*CV_PI); + cv::AutoBuffer _buf(4*dsize.width); + float* buf = _buf; + + bufx = cvMat( 1, dsize.width, CV_32F, buf ); + bufy = cvMat( 1, dsize.width, CV_32F, buf + dsize.width ); + bufp = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*2 ); + bufa = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*3 ); + + for( x = 0; x < dsize.width; x++ ) + bufx.data.fl[x] = (float)x - center.x; + + for( y = 0; y < dsize.height; y++ ) + { + float* mx = (float*)(mapx->data.ptr + y*mapx->step); + float* my = (float*)(mapy->data.ptr + y*mapy->step); + + for( x = 0; x < dsize.width; x++ ) + bufy.data.fl[x] = (float)y - center.y; + +#if 1 + cvCartToPolar( &bufx, &bufy, &bufp, &bufa ); + + for( x = 0; x < dsize.width; x++ ) + bufp.data.fl[x] += 1.f; + + cvLog( &bufp, &bufp ); + + for( x = 0; x < dsize.width; x++ ) + { + double rho = bufp.data.fl[x]*M; + double phi = bufa.data.fl[x]*ascale; + + mx[x] = (float)rho; + my[x] = (float)phi; + } +#else + for( x = 0; x < dsize.width; x++ ) + { + double xx = bufx.data.fl[x]; + double yy = bufy.data.fl[x]; + + double p = log(sqrt(xx*xx + yy*yy) + 1.)*M; + double a = atan2(yy,xx); + if( a < 0 ) + a = 2*CV_PI + a; + a *= ascale; + + mx[x] = (float)p; + my[x] = (float)a; + } +#endif + } + } + + cvRemap( src, dst, mapx, mapy, flags, cvScalarAll(0) ); +} + + +/**************************************************************************************** + Linear-Polar Transform + J.L. Blanco, Apr 2009 + ****************************************************************************************/ +CV_IMPL +void cvLinearPolar( const CvArr* srcarr, CvArr* dstarr, + CvPoint2D32f center, double maxRadius, int flags ) +{ + cv::Ptr mapx, mapy; + + CvMat srcstub, *src = (CvMat*)srcarr; + CvMat dststub, *dst = (CvMat*)dstarr; + CvSize ssize, dsize; + + src = cvGetMat( srcarr, &srcstub,0,0 ); + dst = cvGetMat( dstarr, &dststub,0,0 ); + + if( !CV_ARE_TYPES_EQ( src, dst )) + CV_Error( CV_StsUnmatchedFormats, "" ); + + ssize.width = src->cols; + ssize.height = src->rows; + dsize.width = dst->cols; + dsize.height = dst->rows; + + mapx = cvCreateMat( dsize.height, dsize.width, CV_32F ); + mapy = cvCreateMat( dsize.height, dsize.width, CV_32F ); + + if( !(flags & CV_WARP_INVERSE_MAP) ) + { + int phi, rho; + + for( phi = 0; phi < dsize.height; phi++ ) + { + double cp = cos(phi*2*CV_PI/dsize.height); + double sp = sin(phi*2*CV_PI/dsize.height); + float* mx = (float*)(mapx->data.ptr + phi*mapx->step); + float* my = (float*)(mapy->data.ptr + phi*mapy->step); + + for( rho = 0; rho < dsize.width; rho++ ) + { + double r = maxRadius*(rho+1)/dsize.width; + double x = r*cp + center.x; + double y = r*sp + center.y; + + mx[rho] = (float)x; + my[rho] = (float)y; + } + } + } + else + { + int x, y; + CvMat bufx, bufy, bufp, bufa; + const double ascale = ssize.height/(2*CV_PI); + const double pscale = ssize.width/maxRadius; + + cv::AutoBuffer _buf(4*dsize.width); + float* buf = _buf; + + bufx = cvMat( 1, dsize.width, CV_32F, buf ); + bufy = cvMat( 1, dsize.width, CV_32F, buf + dsize.width ); + bufp = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*2 ); + bufa = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*3 ); + + for( x = 0; x < dsize.width; x++ ) + bufx.data.fl[x] = (float)x - center.x; + + for( y = 0; y < dsize.height; y++ ) + { + float* mx = (float*)(mapx->data.ptr + y*mapx->step); + float* my = (float*)(mapy->data.ptr + y*mapy->step); + + for( x = 0; x < dsize.width; x++ ) + bufy.data.fl[x] = (float)y - center.y; + + cvCartToPolar( &bufx, &bufy, &bufp, &bufa, 0 ); + + for( x = 0; x < dsize.width; x++ ) + bufp.data.fl[x] += 1.f; + + for( x = 0; x < dsize.width; x++ ) + { + double rho = bufp.data.fl[x]*pscale; + double phi = bufa.data.fl[x]*ascale; + mx[x] = (float)rho; + my[x] = (float)phi; + } + } + } + + cvRemap( src, dst, mapx, mapy, flags, cvScalarAll(0) ); +} + + +/* End of file. */ diff --git a/opencv/imgproc/inpaint.cpp b/opencv/imgproc/inpaint.cpp new file mode 100644 index 0000000..2ca3170 --- /dev/null +++ b/opencv/imgproc/inpaint.cpp @@ -0,0 +1,817 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective icvers. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* //////////////////////////////////////////////////////////////////// +// +// Geometrical transforms on images and matrices: rotation, zoom etc. +// +// */ + +#include "precomp.hpp" + +#undef CV_MAT_ELEM_PTR_FAST +#define CV_MAT_ELEM_PTR_FAST( mat, row, col, pix_size ) \ + ((mat).data.ptr + (size_t)(mat).step*(row) + (pix_size)*(col)) + +inline float +min4( float a, float b, float c, float d ) +{ + a = MIN(a,b); + c = MIN(c,d); + return MIN(a,c); +} + +#define CV_MAT_3COLOR_ELEM(img,type,y,x,c) CV_MAT_ELEM(img,type,y,(x)*3+(c)) +#define KNOWN 0 //known outside narrow band +#define BAND 1 //narrow band (known) +#define INSIDE 2 //unknown +#define CHANGE 3 //servise + +typedef struct CvHeapElem +{ + float T; + int i,j; + struct CvHeapElem* prev; + struct CvHeapElem* next; +} +CvHeapElem; + + +class CvPriorityQueueFloat +{ +protected: + CvHeapElem *mem,*empty,*head,*tail; + int num,in; + +public: + bool Init( const CvMat* f ) + { + int i,j; + for( i = num = 0; i < f->rows; i++ ) + { + for( j = 0; j < f->cols; j++ ) + num += CV_MAT_ELEM(*f,uchar,i,j)!=0; + } + if (num<=0) return false; + mem = (CvHeapElem*)cvAlloc((num+2)*sizeof(CvHeapElem)); + if (mem==NULL) return false; + + head = mem; + head->i = head->j = -1; + head->prev = NULL; + head->next = mem+1; + head->T = -FLT_MAX; + empty = mem+1; + for (i=1; i<=num; i++) { + mem[i].prev = mem+i-1; + mem[i].next = mem+i+1; + mem[i].i = -1; + mem[i].T = FLT_MAX; + } + tail = mem+i; + tail->i = tail->j = -1; + tail->prev = mem+i-1; + tail->next = NULL; + tail->T = FLT_MAX; + return true; + } + + bool Add(const CvMat* f) { + int i,j; + for (i=0; irows; i++) { + for (j=0; jcols; j++) { + if (CV_MAT_ELEM(*f,uchar,i,j)!=0) { + if (!Push(i,j,0)) return false; + } + } + } + return true; + } + + bool Push(int i, int j, float T) { + CvHeapElem *tmp=empty,*add=empty; + if (empty==tail) return false; + while (tmp->prev->T>T) tmp = tmp->prev; + if (tmp!=empty) { + add->prev->next = add->next; + add->next->prev = add->prev; + empty = add->next; + add->prev = tmp->prev; + add->next = tmp; + add->prev->next = add; + add->next->prev = add; + } else { + empty = empty->next; + } + add->i = i; + add->j = j; + add->T = T; + in++; + // printf("push i %3d j %3d T %12.4e in %4d\n",i,j,T,in); + return true; + } + + bool Pop(int *i, int *j) { + CvHeapElem *tmp=head->next; + if (empty==tmp) return false; + *i = tmp->i; + *j = tmp->j; + tmp->prev->next = tmp->next; + tmp->next->prev = tmp->prev; + tmp->prev = empty->prev; + tmp->next = empty; + tmp->prev->next = tmp; + tmp->next->prev = tmp; + empty = tmp; + in--; + // printf("pop i %3d j %3d T %12.4e in %4d\n",tmp->i,tmp->j,tmp->T,in); + return true; + } + + bool Pop(int *i, int *j, float *T) { + CvHeapElem *tmp=head->next; + if (empty==tmp) return false; + *i = tmp->i; + *j = tmp->j; + *T = tmp->T; + tmp->prev->next = tmp->next; + tmp->next->prev = tmp->prev; + tmp->prev = empty->prev; + tmp->next = empty; + tmp->prev->next = tmp; + tmp->next->prev = tmp; + empty = tmp; + in--; + // printf("pop i %3d j %3d T %12.4e in %4d\n",tmp->i,tmp->j,tmp->T,in); + return true; + } + + CvPriorityQueueFloat(void) { + num=in=0; + mem=empty=head=tail=NULL; + } + + ~CvPriorityQueueFloat(void) + { + cvFree( &mem ); + } +}; + +inline float VectorScalMult(CvPoint2D32f v1,CvPoint2D32f v2) { + return v1.x*v2.x+v1.y*v2.y; +} + +inline float VectorLength(CvPoint2D32f v1) { + return v1.x*v1.x+v1.y*v1.y; +} + +/////////////////////////////////////////////////////////////////////////////////////////// +//HEAP::iterator Heap_Iterator; +//HEAP Heap; + +float FastMarching_solve(int i1,int j1,int i2,int j2, const CvMat* f, const CvMat* t) +{ + double sol, a11, a22, m12; + a11=CV_MAT_ELEM(*t,float,i1,j1); + a22=CV_MAT_ELEM(*t,float,i2,j2); + m12=MIN(a11,a22); + + if( CV_MAT_ELEM(*f,uchar,i1,j1) != INSIDE ) + if( CV_MAT_ELEM(*f,uchar,i2,j2) != INSIDE ) + if( fabs(a11-a22) >= 1.0 ) + sol = 1+m12; + else + sol = (a11+a22+sqrt((double)(2-(a11-a22)*(a11-a22))))*0.5; + else + sol = 1+a11; + else if( CV_MAT_ELEM(*f,uchar,i2,j2) != INSIDE ) + sol = 1+a22; + else + sol = 1+m12; + + return (float)sol; +} + +///////////////////////////////////////////////////////////////////////////////////// + + +static void +icvCalcFMM(const CvMat *f, CvMat *t, CvPriorityQueueFloat *Heap, bool negate) { + int i, j, ii = 0, jj = 0, q; + float dist; + + while (Heap->Pop(&ii,&jj)) { + + unsigned known=(negate)?CHANGE:KNOWN; + CV_MAT_ELEM(*f,uchar,ii,jj) = (uchar)known; + + for (q=0; q<4; q++) { + i=0; j=0; + if (q==0) {i=ii-1; j=jj;} + else if(q==1) {i=ii; j=jj-1;} + else if(q==2) {i=ii+1; j=jj;} + else {i=ii; j=jj+1;} + if ((i<=0)||(j<=0)||(i>f->rows)||(j>f->cols)) continue; + + if (CV_MAT_ELEM(*f,uchar,i,j)==INSIDE) { + dist = min4(FastMarching_solve(i-1,j,i,j-1,f,t), + FastMarching_solve(i+1,j,i,j-1,f,t), + FastMarching_solve(i-1,j,i,j+1,f,t), + FastMarching_solve(i+1,j,i,j+1,f,t)); + CV_MAT_ELEM(*t,float,i,j) = dist; + CV_MAT_ELEM(*f,uchar,i,j) = BAND; + Heap->Push(i,j,dist); + } + } + } + + if (negate) { + for (i=0; irows; i++) { + for(j=0; jcols; j++) { + if (CV_MAT_ELEM(*f,uchar,i,j) == CHANGE) { + CV_MAT_ELEM(*f,uchar,i,j) = KNOWN; + CV_MAT_ELEM(*t,float,i,j) = -CV_MAT_ELEM(*t,float,i,j); + } + } + } + } +} + + +static void +icvTeleaInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQueueFloat *Heap ) { + int i = 0, j = 0, ii = 0, jj = 0, k, l, q, color = 0; + float dist; + + if (CV_MAT_CN(out->type)==3) { + + while (Heap->Pop(&ii,&jj)) { + + CV_MAT_ELEM(*f,uchar,ii,jj) = KNOWN; + for(q=0; q<4; q++) { + if (q==0) {i=ii-1; j=jj;} + else if(q==1) {i=ii; j=jj-1;} + else if(q==2) {i=ii+1; j=jj;} + else if(q==3) {i=ii; j=jj+1;} + if ((i<=1)||(j<=1)||(i>t->rows-1)||(j>t->cols-1)) continue; + + if (CV_MAT_ELEM(*f,uchar,i,j)==INSIDE) { + dist = min4(FastMarching_solve(i-1,j,i,j-1,f,t), + FastMarching_solve(i+1,j,i,j-1,f,t), + FastMarching_solve(i-1,j,i,j+1,f,t), + FastMarching_solve(i+1,j,i,j+1,f,t)); + CV_MAT_ELEM(*t,float,i,j) = dist; + + for (color=0; color<=2; color++) { + CvPoint2D32f gradI,gradT,r; + float Ia=0,Jx=0,Jy=0,s=1.0e-20f,w,dst,lev,dir,sat; + + if (CV_MAT_ELEM(*f,uchar,i,j+1)!=INSIDE) { + if (CV_MAT_ELEM(*f,uchar,i,j-1)!=INSIDE) { + gradT.x=(float)((CV_MAT_ELEM(*t,float,i,j+1)-CV_MAT_ELEM(*t,float,i,j-1)))*0.5f; + } else { + gradT.x=(float)((CV_MAT_ELEM(*t,float,i,j+1)-CV_MAT_ELEM(*t,float,i,j))); + } + } else { + if (CV_MAT_ELEM(*f,uchar,i,j-1)!=INSIDE) { + gradT.x=(float)((CV_MAT_ELEM(*t,float,i,j)-CV_MAT_ELEM(*t,float,i,j-1))); + } else { + gradT.x=0; + } + } + if (CV_MAT_ELEM(*f,uchar,i+1,j)!=INSIDE) { + if (CV_MAT_ELEM(*f,uchar,i-1,j)!=INSIDE) { + gradT.y=(float)((CV_MAT_ELEM(*t,float,i+1,j)-CV_MAT_ELEM(*t,float,i-1,j)))*0.5f; + } else { + gradT.y=(float)((CV_MAT_ELEM(*t,float,i+1,j)-CV_MAT_ELEM(*t,float,i,j))); + } + } else { + if (CV_MAT_ELEM(*f,uchar,i-1,j)!=INSIDE) { + gradT.y=(float)((CV_MAT_ELEM(*t,float,i,j)-CV_MAT_ELEM(*t,float,i-1,j))); + } else { + gradT.y=0; + } + } + for (k=i-range; k<=i+range; k++) { + int km=k-1+(k==1),kp=k-1-(k==t->rows-2); + for (l=j-range; l<=j+range; l++) { + int lm=l-1+(l==1),lp=l-1-(l==t->cols-2); + if (k>0&&l>0&&krows-1&&lcols-1) { + if ((CV_MAT_ELEM(*f,uchar,k,l)!=INSIDE)&& + ((l-j)*(l-j)+(k-i)*(k-i)<=range*range)) { + r.y = (float)(i-k); + r.x = (float)(j-l); + + dst = (float)(1./(VectorLength(r)*sqrt((double)VectorLength(r)))); + lev = (float)(1./(1+fabs(CV_MAT_ELEM(*t,float,k,l)-CV_MAT_ELEM(*t,float,i,j)))); + + dir=VectorScalMult(r,gradT); + if (fabs(dir)<=0.01) dir=0.000001f; + w = (float)fabs(dst*lev*dir); + + if (CV_MAT_ELEM(*f,uchar,k,l+1)!=INSIDE) { + if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) { + gradI.x=(float)((CV_MAT_3COLOR_ELEM(*out,uchar,km,lp+1,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km,lm-1,color)))*2.0f; + } else { + gradI.x=(float)((CV_MAT_3COLOR_ELEM(*out,uchar,km,lp+1,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km,lm,color))); + } + } else { + if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) { + gradI.x=(float)((CV_MAT_3COLOR_ELEM(*out,uchar,km,lp,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km,lm-1,color))); + } else { + gradI.x=0; + } + } + if (CV_MAT_ELEM(*f,uchar,k+1,l)!=INSIDE) { + if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) { + gradI.y=(float)((CV_MAT_3COLOR_ELEM(*out,uchar,kp+1,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km-1,lm,color)))*2.0f; + } else { + gradI.y=(float)((CV_MAT_3COLOR_ELEM(*out,uchar,kp+1,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km,lm,color))); + } + } else { + if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) { + gradI.y=(float)((CV_MAT_3COLOR_ELEM(*out,uchar,kp,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km-1,lm,color))); + } else { + gradI.y=0; + } + } + Ia += (float)w * (float)(CV_MAT_3COLOR_ELEM(*out,uchar,km,lm,color)); + Jx -= (float)w * (float)(gradI.x*r.x); + Jy -= (float)w * (float)(gradI.y*r.y); + s += w; + } + } + } + } + sat = (float)((Ia/s+(Jx+Jy)/(sqrt(Jx*Jx+Jy*Jy)+1.0e-20f)+0.5f)); + { + int isat = cvRound(sat); + CV_MAT_3COLOR_ELEM(*out,uchar,i-1,j-1,color) = CV_CAST_8U(isat); + } + } + + CV_MAT_ELEM(*f,uchar,i,j) = BAND; + Heap->Push(i,j,dist); + } + } + } + + } else if (CV_MAT_CN(out->type)==1) { + + while (Heap->Pop(&ii,&jj)) { + + CV_MAT_ELEM(*f,uchar,ii,jj) = KNOWN; + for(q=0; q<4; q++) { + if (q==0) {i=ii-1; j=jj;} + else if(q==1) {i=ii; j=jj-1;} + else if(q==2) {i=ii+1; j=jj;} + else if(q==3) {i=ii; j=jj+1;} + if ((i<=1)||(j<=1)||(i>t->rows-1)||(j>t->cols-1)) continue; + + if (CV_MAT_ELEM(*f,uchar,i,j)==INSIDE) { + dist = min4(FastMarching_solve(i-1,j,i,j-1,f,t), + FastMarching_solve(i+1,j,i,j-1,f,t), + FastMarching_solve(i-1,j,i,j+1,f,t), + FastMarching_solve(i+1,j,i,j+1,f,t)); + CV_MAT_ELEM(*t,float,i,j) = dist; + + for (color=0; color<=0; color++) { + CvPoint2D32f gradI,gradT,r; + float Ia=0,Jx=0,Jy=0,s=1.0e-20f,w,dst,lev,dir,sat; + + if (CV_MAT_ELEM(*f,uchar,i,j+1)!=INSIDE) { + if (CV_MAT_ELEM(*f,uchar,i,j-1)!=INSIDE) { + gradT.x=(float)((CV_MAT_ELEM(*t,float,i,j+1)-CV_MAT_ELEM(*t,float,i,j-1)))*0.5f; + } else { + gradT.x=(float)((CV_MAT_ELEM(*t,float,i,j+1)-CV_MAT_ELEM(*t,float,i,j))); + } + } else { + if (CV_MAT_ELEM(*f,uchar,i,j-1)!=INSIDE) { + gradT.x=(float)((CV_MAT_ELEM(*t,float,i,j)-CV_MAT_ELEM(*t,float,i,j-1))); + } else { + gradT.x=0; + } + } + if (CV_MAT_ELEM(*f,uchar,i+1,j)!=INSIDE) { + if (CV_MAT_ELEM(*f,uchar,i-1,j)!=INSIDE) { + gradT.y=(float)((CV_MAT_ELEM(*t,float,i+1,j)-CV_MAT_ELEM(*t,float,i-1,j)))*0.5f; + } else { + gradT.y=(float)((CV_MAT_ELEM(*t,float,i+1,j)-CV_MAT_ELEM(*t,float,i,j))); + } + } else { + if (CV_MAT_ELEM(*f,uchar,i-1,j)!=INSIDE) { + gradT.y=(float)((CV_MAT_ELEM(*t,float,i,j)-CV_MAT_ELEM(*t,float,i-1,j))); + } else { + gradT.y=0; + } + } + for (k=i-range; k<=i+range; k++) { + int km=k-1+(k==1),kp=k-1-(k==t->rows-2); + for (l=j-range; l<=j+range; l++) { + int lm=l-1+(l==1),lp=l-1-(l==t->cols-2); + if (k>0&&l>0&&krows-1&&lcols-1) { + if ((CV_MAT_ELEM(*f,uchar,k,l)!=INSIDE)&& + ((l-j)*(l-j)+(k-i)*(k-i)<=range*range)) { + r.y = (float)(i-k); + r.x = (float)(j-l); + + dst = (float)(1./(VectorLength(r)*sqrt(VectorLength(r)))); + lev = (float)(1./(1+fabs(CV_MAT_ELEM(*t,float,k,l)-CV_MAT_ELEM(*t,float,i,j)))); + + dir=VectorScalMult(r,gradT); + if (fabs(dir)<=0.01) dir=0.000001f; + w = (float)fabs(dst*lev*dir); + + if (CV_MAT_ELEM(*f,uchar,k,l+1)!=INSIDE) { + if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) { + gradI.x=(float)((CV_MAT_ELEM(*out,uchar,km,lp+1)-CV_MAT_ELEM(*out,uchar,km,lm-1)))*2.0f; + } else { + gradI.x=(float)((CV_MAT_ELEM(*out,uchar,km,lp+1)-CV_MAT_ELEM(*out,uchar,km,lm))); + } + } else { + if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) { + gradI.x=(float)((CV_MAT_ELEM(*out,uchar,km,lp)-CV_MAT_ELEM(*out,uchar,km,lm-1))); + } else { + gradI.x=0; + } + } + if (CV_MAT_ELEM(*f,uchar,k+1,l)!=INSIDE) { + if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) { + gradI.y=(float)((CV_MAT_ELEM(*out,uchar,kp+1,lm)-CV_MAT_ELEM(*out,uchar,km-1,lm)))*2.0f; + } else { + gradI.y=(float)((CV_MAT_ELEM(*out,uchar,kp+1,lm)-CV_MAT_ELEM(*out,uchar,km,lm))); + } + } else { + if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) { + gradI.y=(float)((CV_MAT_ELEM(*out,uchar,kp,lm)-CV_MAT_ELEM(*out,uchar,km-1,lm))); + } else { + gradI.y=0; + } + } + Ia += (float)w * (float)(CV_MAT_ELEM(*out,uchar,km,lm)); + Jx -= (float)w * (float)(gradI.x*r.x); + Jy -= (float)w * (float)(gradI.y*r.y); + s += w; + } + } + } + } + sat = (float)((Ia/s+(Jx+Jy)/(sqrt(Jx*Jx+Jy*Jy)+1.0e-20f)+0.5f)); + { + int isat = cvRound(sat); + CV_MAT_ELEM(*out,uchar,i-1,j-1) = CV_CAST_8U(isat); + } + } + + CV_MAT_ELEM(*f,uchar,i,j) = BAND; + Heap->Push(i,j,dist); + } + } + } + } +} + + +static void +icvNSInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQueueFloat *Heap) { + int i = 0, j = 0, ii = 0, jj = 0, k, l, q, color = 0; + float dist; + + if (CV_MAT_CN(out->type)==3) { + + while (Heap->Pop(&ii,&jj)) { + + CV_MAT_ELEM(*f,uchar,ii,jj) = KNOWN; + for(q=0; q<4; q++) { + if (q==0) {i=ii-1; j=jj;} + else if(q==1) {i=ii; j=jj-1;} + else if(q==2) {i=ii+1; j=jj;} + else if(q==3) {i=ii; j=jj+1;} + if ((i<=1)||(j<=1)||(i>t->rows-1)||(j>t->cols-1)) continue; + + if (CV_MAT_ELEM(*f,uchar,i,j)==INSIDE) { + dist = min4(FastMarching_solve(i-1,j,i,j-1,f,t), + FastMarching_solve(i+1,j,i,j-1,f,t), + FastMarching_solve(i-1,j,i,j+1,f,t), + FastMarching_solve(i+1,j,i,j+1,f,t)); + CV_MAT_ELEM(*t,float,i,j) = dist; + + for (color=0; color<=2; color++) { + CvPoint2D32f gradI,r; + float Ia=0,s=1.0e-20f,w,dst,dir; + + for (k=i-range; k<=i+range; k++) { + int km=k-1+(k==1),kp=k-1-(k==f->rows-2); + for (l=j-range; l<=j+range; l++) { + int lm=l-1+(l==1),lp=l-1-(l==f->cols-2); + if (k>0&&l>0&&krows-1&&lcols-1) { + if ((CV_MAT_ELEM(*f,uchar,k,l)!=INSIDE)&& + ((l-j)*(l-j)+(k-i)*(k-i)<=range*range)) { + r.y=(float)(k-i); + r.x=(float)(l-j); + + dst = 1/(VectorLength(r)*VectorLength(r)+1); + + if (CV_MAT_ELEM(*f,uchar,k+1,l)!=INSIDE) { + if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) { + gradI.x=(float)(abs(CV_MAT_3COLOR_ELEM(*out,uchar,kp+1,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,kp,lm,color))+ + abs(CV_MAT_3COLOR_ELEM(*out,uchar,kp,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km-1,lm,color))); + } else { + gradI.x=(float)(abs(CV_MAT_3COLOR_ELEM(*out,uchar,kp+1,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,kp,lm,color)))*2.0f; + } + } else { + if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) { + gradI.x=(float)(abs(CV_MAT_3COLOR_ELEM(*out,uchar,kp,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km-1,lm,color)))*2.0f; + } else { + gradI.x=0; + } + } + if (CV_MAT_ELEM(*f,uchar,k,l+1)!=INSIDE) { + if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) { + gradI.y=(float)(abs(CV_MAT_3COLOR_ELEM(*out,uchar,km,lp+1,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km,lm,color))+ + abs(CV_MAT_3COLOR_ELEM(*out,uchar,km,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km,lm-1,color))); + } else { + gradI.y=(float)(abs(CV_MAT_3COLOR_ELEM(*out,uchar,km,lp+1,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km,lm,color)))*2.0f; + } + } else { + if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) { + gradI.y=(float)(abs(CV_MAT_3COLOR_ELEM(*out,uchar,km,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km,lm-1,color)))*2.0f; + } else { + gradI.y=0; + } + } + + gradI.x=-gradI.x; + dir=VectorScalMult(r,gradI); + + if (fabs(dir)<=0.01) { + dir=0.000001f; + } else { + dir = (float)fabs(VectorScalMult(r,gradI)/sqrt(VectorLength(r)*VectorLength(gradI))); + } + w = dst*dir; + Ia += (float)w * (float)(CV_MAT_3COLOR_ELEM(*out,uchar,km,lm,color)); + s += w; + } + } + } + } + { + int out_val = cvRound((double)Ia/s); + CV_MAT_3COLOR_ELEM(*out,uchar,i-1,j-1,color) = CV_CAST_8U(out_val); + } + } + + CV_MAT_ELEM(*f,uchar,i,j) = BAND; + Heap->Push(i,j,dist); + } + } + } + + } else if (CV_MAT_CN(out->type)==1) { + + while (Heap->Pop(&ii,&jj)) { + + CV_MAT_ELEM(*f,uchar,ii,jj) = KNOWN; + for(q=0; q<4; q++) { + if (q==0) {i=ii-1; j=jj;} + else if(q==1) {i=ii; j=jj-1;} + else if(q==2) {i=ii+1; j=jj;} + else if(q==3) {i=ii; j=jj+1;} + if ((i<=1)||(j<=1)||(i>t->rows-1)||(j>t->cols-1)) continue; + + if (CV_MAT_ELEM(*f,uchar,i,j)==INSIDE) { + dist = min4(FastMarching_solve(i-1,j,i,j-1,f,t), + FastMarching_solve(i+1,j,i,j-1,f,t), + FastMarching_solve(i-1,j,i,j+1,f,t), + FastMarching_solve(i+1,j,i,j+1,f,t)); + CV_MAT_ELEM(*t,float,i,j) = dist; + + { + CvPoint2D32f gradI,r; + float Ia=0,s=1.0e-20f,w,dst,dir; + + for (k=i-range; k<=i+range; k++) { + int km=k-1+(k==1),kp=k-1-(k==t->rows-2); + for (l=j-range; l<=j+range; l++) { + int lm=l-1+(l==1),lp=l-1-(l==t->cols-2); + if (k>0&&l>0&&krows-1&&lcols-1) { + if ((CV_MAT_ELEM(*f,uchar,k,l)!=INSIDE)&& + ((l-j)*(l-j)+(k-i)*(k-i)<=range*range)) { + r.y=(float)(i-k); + r.x=(float)(j-l); + + dst = 1/(VectorLength(r)*VectorLength(r)+1); + + if (CV_MAT_ELEM(*f,uchar,k+1,l)!=INSIDE) { + if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) { + gradI.x=(float)(abs(CV_MAT_ELEM(*out,uchar,kp+1,lm)-CV_MAT_ELEM(*out,uchar,kp,lm))+ + abs(CV_MAT_ELEM(*out,uchar,kp,lm)-CV_MAT_ELEM(*out,uchar,km-1,lm))); + } else { + gradI.x=(float)(abs(CV_MAT_ELEM(*out,uchar,kp+1,lm)-CV_MAT_ELEM(*out,uchar,kp,lm)))*2.0f; + } + } else { + if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) { + gradI.x=(float)(abs(CV_MAT_ELEM(*out,uchar,kp,lm)-CV_MAT_ELEM(*out,uchar,km-1,lm)))*2.0f; + } else { + gradI.x=0; + } + } + if (CV_MAT_ELEM(*f,uchar,k,l+1)!=INSIDE) { + if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) { + gradI.y=(float)(abs(CV_MAT_ELEM(*out,uchar,km,lp+1)-CV_MAT_ELEM(*out,uchar,km,lm))+ + abs(CV_MAT_ELEM(*out,uchar,km,lm)-CV_MAT_ELEM(*out,uchar,km,lm-1))); + } else { + gradI.y=(float)(abs(CV_MAT_ELEM(*out,uchar,km,lp+1)-CV_MAT_ELEM(*out,uchar,km,lm)))*2.0f; + } + } else { + if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) { + gradI.y=(float)(abs(CV_MAT_ELEM(*out,uchar,km,lm)-CV_MAT_ELEM(*out,uchar,km,lm-1)))*2.0f; + } else { + gradI.y=0; + } + } + + gradI.x=-gradI.x; + dir=VectorScalMult(r,gradI); + + if (fabs(dir)<=0.01) { + dir=0.000001f; + } else { + dir = (float)fabs(VectorScalMult(r,gradI)/sqrt(VectorLength(r)*VectorLength(gradI))); + } + w = dst*dir; + Ia += (float)w * (float)(CV_MAT_ELEM(*out,uchar,km,lm)); + s += w; + } + } + } + } + { + int out_val = cvRound((double)Ia/s); + CV_MAT_ELEM(*out,uchar,i-1,j-1) = CV_CAST_8U(out_val); + } + } + + CV_MAT_ELEM(*f,uchar,i,j) = BAND; + Heap->Push(i,j,dist); + } + } + } + + } +} + +#define SET_BORDER1_C1(image,type,value) {\ + int i,j;\ + for(j=0; jcols; j++) {\ + CV_MAT_ELEM(*image,type,0,j) = value;\ + }\ + for (i=1; irows-1; i++) {\ + CV_MAT_ELEM(*image,type,i,0) = CV_MAT_ELEM(*image,type,i,image->cols-1) = value;\ + }\ + for(j=0; jcols; j++) {\ + CV_MAT_ELEM(*image,type,erows-1,j) = value;\ + }\ + } + +#define COPY_MASK_BORDER1_C1(src,dst,type) {\ + int i,j;\ + for (i=0; irows; i++) {\ + for(j=0; jcols; j++) {\ + if (CV_MAT_ELEM(*src,type,i,j)!=0)\ + CV_MAT_ELEM(*dst,type,i+1,j+1) = INSIDE;\ + }\ + }\ + } + + +CV_IMPL void +cvInpaint( const CvArr* _input_img, const CvArr* _inpaint_mask, CvArr* _output_img, + double inpaintRange, int flags ) +{ + cv::Ptr mask, band, f, t, out; + cv::Ptr Heap, Out; + cv::Ptr el_cross, el_range; + + CvMat input_hdr, mask_hdr, output_hdr; + CvMat* input_img, *inpaint_mask, *output_img; + int range=cvRound(inpaintRange); + int erows, ecols; + + input_img = cvGetMat( _input_img, &input_hdr ); + inpaint_mask = cvGetMat( _inpaint_mask, &mask_hdr ); + output_img = cvGetMat( _output_img, &output_hdr ); + + if( !CV_ARE_SIZES_EQ(input_img,output_img) || !CV_ARE_SIZES_EQ(input_img,inpaint_mask)) + CV_Error( CV_StsUnmatchedSizes, "All the input and output images must have the same size" ); + + if( (CV_MAT_TYPE(input_img->type) != CV_8UC1 && + CV_MAT_TYPE(input_img->type) != CV_8UC3) || + !CV_ARE_TYPES_EQ(input_img,output_img) ) + CV_Error( CV_StsUnsupportedFormat, + "Only 8-bit 1-channel and 3-channel input/output images are supported" ); + + if( CV_MAT_TYPE(inpaint_mask->type) != CV_8UC1 ) + CV_Error( CV_StsUnsupportedFormat, "The mask must be 8-bit 1-channel image" ); + + range = MAX(range,1); + range = MIN(range,100); + + ecols = input_img->cols + 2; + erows = input_img->rows + 2; + + f = cvCreateMat(erows, ecols, CV_8UC1); + t = cvCreateMat(erows, ecols, CV_32FC1); + band = cvCreateMat(erows, ecols, CV_8UC1); + mask = cvCreateMat(erows, ecols, CV_8UC1); + el_cross = cvCreateStructuringElementEx(3,3,1,1,CV_SHAPE_CROSS,NULL); + + cvCopy( input_img, output_img ); + cvSet(mask,cvScalar(KNOWN,0,0,0)); + COPY_MASK_BORDER1_C1(inpaint_mask,mask,uchar); + SET_BORDER1_C1(mask,uchar,0); + cvSet(f,cvScalar(KNOWN,0,0,0)); + cvSet(t,cvScalar(1.0e6f,0,0,0)); + cvDilate(mask,band,el_cross,1); // image with narrow band + Heap=new CvPriorityQueueFloat; + if (!Heap->Init(band)) + return; + cvSub(band,mask,band,NULL); + SET_BORDER1_C1(band,uchar,0); + if (!Heap->Add(band)) + return; + cvSet(f,cvScalar(BAND,0,0,0),band); + cvSet(f,cvScalar(INSIDE,0,0,0),mask); + cvSet(t,cvScalar(0,0,0,0),band); + + if( flags == CV_INPAINT_TELEA ) + { + out = cvCreateMat(erows, ecols, CV_8UC1); + el_range = cvCreateStructuringElementEx(2*range+1,2*range+1, + range,range,CV_SHAPE_RECT,NULL); + cvDilate(mask,out,el_range,1); + cvSub(out,mask,out,NULL); + Out=new CvPriorityQueueFloat; + if (!Out->Init(out)) + return; + if (!Out->Add(band)) + return; + cvSub(out,band,out,NULL); + SET_BORDER1_C1(out,uchar,0); + icvCalcFMM(out,t,Out,true); + icvTeleaInpaintFMM(mask,t,output_img,range,Heap); + } + else if (flags == CV_INPAINT_NS) { + icvNSInpaintFMM(mask,t,output_img,range,Heap); + } else { + CV_Error( CV_StsBadArg, "The flags argument must be one of CV_INPAINT_TELEA or CV_INPAINT_NS" ); + } +} + +void cv::inpaint( InputArray _src, InputArray _mask, OutputArray _dst, + double inpaintRange, int flags ) +{ + Mat src = _src.getMat(), mask = _mask.getMat(); + _dst.create( src.size(), src.type() ); + CvMat c_src = src, c_mask = mask, c_dst = _dst.getMat(); + cvInpaint( &c_src, &c_mask, &c_dst, inpaintRange, flags ); +} diff --git a/opencv/imgproc/kdtree.cpp b/opencv/imgproc/kdtree.cpp new file mode 100644 index 0000000..2cfed90 --- /dev/null +++ b/opencv/imgproc/kdtree.cpp @@ -0,0 +1,241 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2008, Xavier Delacour, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +// 2008-05-13, Xavier Delacour + +#include "precomp.hpp" + +#if !defined _MSC_VER || defined __ICL || _MSC_VER >= 1300 + +#include "_kdtree.hpp" +#include "_featuretree.h" + +#if _MSC_VER >= 1400 +#pragma warning(disable:4996) // suppress "function call with parameters may be unsafe" in std::copy +#endif + +class CvKDTreeWrap : public CvFeatureTree { + template + struct deref { + typedef __scalartype scalar_type; + typedef double accum_type; + + CvMat* mat; + deref(CvMat* _mat) : mat(_mat) { + assert(CV_ELEM_SIZE1(__cvtype) == sizeof(__scalartype)); + } + scalar_type operator() (int i, int j) const { + return *((scalar_type*)(mat->data.ptr + i * mat->step) + j); + } + }; + +#define dispatch_cvtype(mat, c) \ + switch (CV_MAT_DEPTH((mat)->type)) { \ + case CV_32F: \ + { typedef CvKDTree > tree_type; c; break; } \ + case CV_64F: \ + { typedef CvKDTree > tree_type; c; break; } \ + default: assert(0); \ + } + + CvMat* mat; + void* data; + + template + void find_nn(const CvMat* d, int k, int emax, CvMat* results, CvMat* dist) { + __treetype* tr = (__treetype*) data; + const uchar* dptr = d->data.ptr; + uchar* resultsptr = results->data.ptr; + uchar* distptr = dist->data.ptr; + typename __treetype::bbf_nn_pqueue nn; + + assert(d->cols == tr->dims()); + assert(results->rows == d->rows); + assert(results->rows == dist->rows); + assert(results->cols == k); + assert(dist->cols == k); + + for (int j = 0; j < d->rows; ++j) { + const typename __treetype::scalar_type* dj = + (const typename __treetype::scalar_type*) dptr; + + int* resultsj = (int*) resultsptr; + double* distj = (double*) distptr; + tr->find_nn_bbf(dj, k, emax, nn); + + assert((int)nn.size() <= k); + for (unsigned int j = 0; j < nn.size(); ++j) { + *resultsj++ = *nn[j].p; + *distj++ = nn[j].dist; + } + std::fill(resultsj, resultsj + k - nn.size(), -1); + std::fill(distj, distj + k - nn.size(), 0); + + dptr += d->step; + resultsptr += results->step; + distptr += dist->step; + } + } + + template + int find_ortho_range(CvMat* bounds_min, CvMat* bounds_max, + CvMat* results) { + int rn = results->rows * results->cols; + std::vector inbounds; + dispatch_cvtype(mat, ((__treetype*)data)-> + find_ortho_range((typename __treetype::scalar_type*)bounds_min->data.ptr, + (typename __treetype::scalar_type*)bounds_max->data.ptr, + inbounds)); + std::copy(inbounds.begin(), + inbounds.begin() + std::min((int)inbounds.size(), rn), + (int*) results->data.ptr); + return (int)inbounds.size(); + } + + CvKDTreeWrap(const CvKDTreeWrap& x); + CvKDTreeWrap& operator= (const CvKDTreeWrap& rhs); +public: + CvKDTreeWrap(CvMat* _mat) : mat(_mat) { + // * a flag parameter should tell us whether + // * (a) user ensures *mat outlives *this and is unchanged, + // * (b) we take reference and user ensures mat is unchanged, + // * (c) we copy data, (d) we own and release data. + + std::vector tmp(mat->rows); + for (unsigned int j = 0; j < tmp.size(); ++j) + tmp[j] = j; + + dispatch_cvtype(mat, data = new tree_type + (&tmp[0], &tmp[0] + tmp.size(), mat->cols, + tree_type::deref_type(mat))); + } + ~CvKDTreeWrap() { + dispatch_cvtype(mat, delete (tree_type*) data); + } + + int dims() { + int d = 0; + dispatch_cvtype(mat, d = ((tree_type*) data)->dims()); + return d; + } + int type() { + return mat->type; + } + + void FindFeatures(const CvMat* desc, int k, int emax, CvMat* results, CvMat* dist) { + cv::Ptr tmp_desc; + + if (desc->cols != dims()) + CV_Error(CV_StsUnmatchedSizes, "desc columns be equal feature dimensions"); + if (results->rows != desc->rows && results->cols != k) + CV_Error(CV_StsUnmatchedSizes, "results and desc must be same height"); + if (dist->rows != desc->rows && dist->cols != k) + CV_Error(CV_StsUnmatchedSizes, "dist and desc must be same height"); + if (CV_MAT_TYPE(results->type) != CV_32SC1) + CV_Error(CV_StsUnsupportedFormat, "results must be CV_32SC1"); + if (CV_MAT_TYPE(dist->type) != CV_64FC1) + CV_Error(CV_StsUnsupportedFormat, "dist must be CV_64FC1"); + + if (CV_MAT_TYPE(type()) != CV_MAT_TYPE(desc->type)) { + tmp_desc = cvCreateMat(desc->rows, desc->cols, type()); + cvConvert(desc, tmp_desc); + desc = tmp_desc; + } + + assert(CV_MAT_TYPE(desc->type) == CV_MAT_TYPE(mat->type)); + assert(CV_MAT_TYPE(dist->type) == CV_64FC1); + assert(CV_MAT_TYPE(results->type) == CV_32SC1); + + dispatch_cvtype(mat, find_nn + (desc, k, emax, results, dist)); + } + int FindOrthoRange(CvMat* bounds_min, CvMat* bounds_max, + CvMat* results) { + bool free_bounds = false; + int count = -1; + + if (bounds_min->cols * bounds_min->rows != dims() || + bounds_max->cols * bounds_max->rows != dims()) + CV_Error(CV_StsUnmatchedSizes, "bounds_{min,max} must 1 x dims or dims x 1"); + if (CV_MAT_TYPE(bounds_min->type) != CV_MAT_TYPE(bounds_max->type)) + CV_Error(CV_StsUnmatchedFormats, "bounds_{min,max} must have same type"); + if (CV_MAT_TYPE(results->type) != CV_32SC1) + CV_Error(CV_StsUnsupportedFormat, "results must be CV_32SC1"); + + if (CV_MAT_TYPE(bounds_min->type) != CV_MAT_TYPE(type())) { + free_bounds = true; + + CvMat* old_bounds_min = bounds_min; + bounds_min = cvCreateMat(bounds_min->rows, bounds_min->cols, type()); + cvConvert(old_bounds_min, bounds_min); + + CvMat* old_bounds_max = bounds_max; + bounds_max = cvCreateMat(bounds_max->rows, bounds_max->cols, type()); + cvConvert(old_bounds_max, bounds_max); + } + + assert(CV_MAT_TYPE(bounds_min->type) == CV_MAT_TYPE(mat->type)); + assert(CV_MAT_TYPE(bounds_min->type) == CV_MAT_TYPE(bounds_max->type)); + assert(bounds_min->rows * bounds_min->cols == dims()); + assert(bounds_max->rows * bounds_max->cols == dims()); + + dispatch_cvtype(mat, count = find_ortho_range + (bounds_min, bounds_max,results)); + + if (free_bounds) { + cvReleaseMat(&bounds_min); + cvReleaseMat(&bounds_max); + } + + return count; + } +}; + +CvFeatureTree* cvCreateKDTree(CvMat* desc) { + + if (CV_MAT_TYPE(desc->type) != CV_32FC1 && + CV_MAT_TYPE(desc->type) != CV_64FC1) + CV_Error(CV_StsUnsupportedFormat, "descriptors must be either CV_32FC1 or CV_64FC1"); + + return new CvKDTreeWrap(desc); +} + +#endif diff --git a/opencv/imgproc/linefit.cpp b/opencv/imgproc/linefit.cpp new file mode 100644 index 0000000..1d237cb --- /dev/null +++ b/opencv/imgproc/linefit.cpp @@ -0,0 +1,719 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" + +static const double eps = 1e-6; + +static CvStatus +icvFitLine2D_wods( CvPoint2D32f * points, int _count, float *weights, float *line ) +{ + double x = 0, y = 0, x2 = 0, y2 = 0, xy = 0, w = 0; + double dx2, dy2, dxy; + int i; + int count = _count; + float t; + + /* Calculating the average of x and y... */ + + if( weights == 0 ) + { + for( i = 0; i < count; i += 1 ) + { + x += points[i].x; + y += points[i].y; + x2 += points[i].x * points[i].x; + y2 += points[i].y * points[i].y; + xy += points[i].x * points[i].y; + } + w = (float) count; + } + else + { + for( i = 0; i < count; i += 1 ) + { + x += weights[i] * points[i].x; + y += weights[i] * points[i].y; + x2 += weights[i] * points[i].x * points[i].x; + y2 += weights[i] * points[i].y * points[i].y; + xy += weights[i] * points[i].x * points[i].y; + w += weights[i]; + } + } + + x /= w; + y /= w; + x2 /= w; + y2 /= w; + xy /= w; + + dx2 = x2 - x * x; + dy2 = y2 - y * y; + dxy = xy - x * y; + + t = (float) atan2( 2 * dxy, dx2 - dy2 ) / 2; + line[0] = (float) cos( t ); + line[1] = (float) sin( t ); + + line[2] = (float) x; + line[3] = (float) y; + + return CV_NO_ERR; +} + +static CvStatus +icvFitLine3D_wods( CvPoint3D32f * points, int count, float *weights, float *line ) +{ + int i; + float w0 = 0; + float x0 = 0, y0 = 0, z0 = 0; + float x2 = 0, y2 = 0, z2 = 0, xy = 0, yz = 0, xz = 0; + float dx2, dy2, dz2, dxy, dxz, dyz; + float *v; + float n; + float det[9], evc[9], evl[3]; + + memset( evl, 0, 3*sizeof(evl[0])); + memset( evc, 0, 9*sizeof(evl[0])); + + if( weights ) + { + for( i = 0; i < count; i++ ) + { + float x = points[i].x; + float y = points[i].y; + float z = points[i].z; + float w = weights[i]; + + + x2 += x * x * w; + xy += x * y * w; + xz += x * z * w; + y2 += y * y * w; + yz += y * z * w; + z2 += z * z * w; + x0 += x * w; + y0 += y * w; + z0 += z * w; + w0 += w; + } + } + else + { + for( i = 0; i < count; i++ ) + { + float x = points[i].x; + float y = points[i].y; + float z = points[i].z; + + x2 += x * x; + xy += x * y; + xz += x * z; + y2 += y * y; + yz += y * z; + z2 += z * z; + x0 += x; + y0 += y; + z0 += z; + } + w0 = (float) count; + } + + x2 /= w0; + xy /= w0; + xz /= w0; + y2 /= w0; + yz /= w0; + z2 /= w0; + + x0 /= w0; + y0 /= w0; + z0 /= w0; + + dx2 = x2 - x0 * x0; + dxy = xy - x0 * y0; + dxz = xz - x0 * z0; + dy2 = y2 - y0 * y0; + dyz = yz - y0 * z0; + dz2 = z2 - z0 * z0; + + det[0] = dz2 + dy2; + det[1] = -dxy; + det[2] = -dxz; + det[3] = det[1]; + det[4] = dx2 + dz2; + det[5] = -dyz; + det[6] = det[2]; + det[7] = det[5]; + det[8] = dy2 + dx2; + + /* Searching for a eigenvector of det corresponding to the minimal eigenvalue */ +#if 1 + { + CvMat _det = cvMat( 3, 3, CV_32F, det ); + CvMat _evc = cvMat( 3, 3, CV_32F, evc ); + CvMat _evl = cvMat( 3, 1, CV_32F, evl ); + cvEigenVV( &_det, &_evc, &_evl, 0 ); + i = evl[0] < evl[1] ? (evl[0] < evl[2] ? 0 : 2) : (evl[1] < evl[2] ? 1 : 2); + } +#else + { + CvMat _det = cvMat( 3, 3, CV_32F, det ); + CvMat _evc = cvMat( 3, 3, CV_32F, evc ); + CvMat _evl = cvMat( 1, 3, CV_32F, evl ); + + cvSVD( &_det, &_evl, &_evc, 0, CV_SVD_MODIFY_A+CV_SVD_U_T ); + } + i = 2; +#endif + v = &evc[i * 3]; + n = (float) sqrt( (double)v[0] * v[0] + (double)v[1] * v[1] + (double)v[2] * v[2] ); + n = (float)MAX(n, eps); + line[0] = v[0] / n; + line[1] = v[1] / n; + line[2] = v[2] / n; + line[3] = x0; + line[4] = y0; + line[5] = z0; + + return CV_NO_ERR; +} + +static double +icvCalcDist2D( CvPoint2D32f * points, int count, float *_line, float *dist ) +{ + int j; + float px = _line[2], py = _line[3]; + float nx = _line[1], ny = -_line[0]; + double sum_dist = 0.; + + for( j = 0; j < count; j++ ) + { + float x, y; + + x = points[j].x - px; + y = points[j].y - py; + + dist[j] = (float) fabs( nx * x + ny * y ); + sum_dist += dist[j]; + } + + return sum_dist; +} + +static double +icvCalcDist3D( CvPoint3D32f * points, int count, float *_line, float *dist ) +{ + int j; + float px = _line[3], py = _line[4], pz = _line[5]; + float vx = _line[0], vy = _line[1], vz = _line[2]; + double sum_dist = 0.; + + for( j = 0; j < count; j++ ) + { + float x, y, z; + double p1, p2, p3; + + x = points[j].x - px; + y = points[j].y - py; + z = points[j].z - pz; + + p1 = vy * z - vz * y; + p2 = vz * x - vx * z; + p3 = vx * y - vy * x; + + dist[j] = (float) sqrt( p1*p1 + p2*p2 + p3*p3 ); + sum_dist += dist[j]; + } + + return sum_dist; +} + +static void +icvWeightL1( float *d, int count, float *w ) +{ + int i; + + for( i = 0; i < count; i++ ) + { + double t = fabs( (double) d[i] ); + w[i] = (float)(1. / MAX(t, eps)); + } +} + +static void +icvWeightL12( float *d, int count, float *w ) +{ + int i; + + for( i = 0; i < count; i++ ) + { + w[i] = 1.0f / (float) sqrt( 1 + (double) (d[i] * d[i] * 0.5) ); + } +} + + +static void +icvWeightHuber( float *d, int count, float *w, float _c ) +{ + int i; + const float c = _c <= 0 ? 1.345f : _c; + + for( i = 0; i < count; i++ ) + { + if( d[i] < c ) + w[i] = 1.0f; + else + w[i] = c/d[i]; + } +} + + +static void +icvWeightFair( float *d, int count, float *w, float _c ) +{ + int i; + const float c = _c == 0 ? 1 / 1.3998f : 1 / _c; + + for( i = 0; i < count; i++ ) + { + w[i] = 1 / (1 + d[i] * c); + } +} + +static void +icvWeightWelsch( float *d, int count, float *w, float _c ) +{ + int i; + const float c = _c == 0 ? 1 / 2.9846f : 1 / _c; + + for( i = 0; i < count; i++ ) + { + w[i] = (float) exp( -d[i] * d[i] * c * c ); + } +} + + +/* Takes an array of 2D points, type of distance (including user-defined +distance specified by callbacks, fills the array of four floats with line +parameters A, B, C, D, where (A, B) is the normalized direction vector, +(C, D) is the point that belongs to the line. */ + +static CvStatus icvFitLine2D( CvPoint2D32f * points, int count, int dist, + float _param, float reps, float aeps, float *line ) +{ + double EPS = count*FLT_EPSILON; + void (*calc_weights) (float *, int, float *) = 0; + void (*calc_weights_param) (float *, int, float *, float) = 0; + float *w; /* weights */ + float *r; /* square distances */ + int i, j, k; + float _line[6], _lineprev[6]; + float rdelta = reps != 0 ? reps : 1.0f; + float adelta = aeps != 0 ? aeps : 0.01f; + double min_err = DBL_MAX, err = 0; + CvRNG rng = cvRNG(-1); + + memset( line, 0, 4*sizeof(line[0]) ); + + switch (dist) + { + case CV_DIST_L2: + return icvFitLine2D_wods( points, count, 0, line ); + + case CV_DIST_L1: + calc_weights = icvWeightL1; + break; + + case CV_DIST_L12: + calc_weights = icvWeightL12; + break; + + case CV_DIST_FAIR: + calc_weights_param = icvWeightFair; + break; + + case CV_DIST_WELSCH: + calc_weights_param = icvWeightWelsch; + break; + + case CV_DIST_HUBER: + calc_weights_param = icvWeightHuber; + break; + + /*case CV_DIST_USER: + calc_weights = (void ( * )(float *, int, float *)) _PFP.fp; + break;*/ + + default: + return CV_BADFACTOR_ERR; + } + + w = (float *) cvAlloc( count * sizeof( float )); + r = (float *) cvAlloc( count * sizeof( float )); + + for( k = 0; k < 20; k++ ) + { + int first = 1; + for( i = 0; i < count; i++ ) + w[i] = 0.f; + + for( i = 0; i < MIN(count,10); ) + { + j = cvRandInt(&rng) % count; + if( w[j] < FLT_EPSILON ) + { + w[j] = 1.f; + i++; + } + } + + icvFitLine2D_wods( points, count, w, _line ); + for( i = 0; i < 30; i++ ) + { + double sum_w = 0; + + if( first ) + { + first = 0; + } + else + { + double t = _line[0] * _lineprev[0] + _line[1] * _lineprev[1]; + t = MAX(t,-1.); + t = MIN(t,1.); + if( fabs(acos(t)) < adelta ) + { + float x, y, d; + + x = (float) fabs( _line[2] - _lineprev[2] ); + y = (float) fabs( _line[3] - _lineprev[3] ); + + d = x > y ? x : y; + if( d < rdelta ) + break; + } + } + /* calculate distances */ + err = icvCalcDist2D( points, count, _line, r ); + if( err < EPS ) + break; + + /* calculate weights */ + if( calc_weights ) + calc_weights( r, count, w ); + else + calc_weights_param( r, count, w, _param ); + + for( j = 0; j < count; j++ ) + sum_w += w[j]; + + if( fabs(sum_w) > FLT_EPSILON ) + { + sum_w = 1./sum_w; + for( j = 0; j < count; j++ ) + w[j] = (float)(w[j]*sum_w); + } + else + { + for( j = 0; j < count; j++ ) + w[j] = 1.f; + } + + /* save the line parameters */ + memcpy( _lineprev, _line, 4 * sizeof( float )); + + /* Run again... */ + icvFitLine2D_wods( points, count, w, _line ); + } + + if( err < min_err ) + { + min_err = err; + memcpy( line, _line, 4 * sizeof(line[0])); + if( err < EPS ) + break; + } + } + + cvFree( &w ); + cvFree( &r ); + return CV_OK; +} + + +/* Takes an array of 3D points, type of distance (including user-defined +distance specified by callbacks, fills the array of four floats with line +parameters A, B, C, D, E, F, where (A, B, C) is the normalized direction vector, +(D, E, F) is the point that belongs to the line. */ + +static CvStatus +icvFitLine3D( CvPoint3D32f * points, int count, int dist, + float _param, float reps, float aeps, float *line ) +{ + double EPS = count*FLT_EPSILON; + void (*calc_weights) (float *, int, float *) = 0; + void (*calc_weights_param) (float *, int, float *, float) = 0; + float *w; /* weights */ + float *r; /* square distances */ + int i, j, k; + float _line[6]={0,0,0,0,0,0}, _lineprev[6]={0,0,0,0,0,0}; + float rdelta = reps != 0 ? reps : 1.0f; + float adelta = aeps != 0 ? aeps : 0.01f; + double min_err = DBL_MAX, err = 0; + CvRNG rng = cvRNG(-1); + + switch (dist) + { + case CV_DIST_L2: + return icvFitLine3D_wods( points, count, 0, line ); + + case CV_DIST_L1: + calc_weights = icvWeightL1; + break; + + case CV_DIST_L12: + calc_weights = icvWeightL12; + break; + + case CV_DIST_FAIR: + calc_weights_param = icvWeightFair; + break; + + case CV_DIST_WELSCH: + calc_weights_param = icvWeightWelsch; + break; + + case CV_DIST_HUBER: + calc_weights_param = icvWeightHuber; + break; + + /*case CV_DIST_USER: + _PFP.p = param; + calc_weights = (void ( * )(float *, int, float *)) _PFP.fp; + break;*/ + + default: + return CV_BADFACTOR_ERR; + } + + w = (float *) cvAlloc( count * sizeof( float )); + r = (float *) cvAlloc( count * sizeof( float )); + + for( k = 0; k < 20; k++ ) + { + int first = 1; + for( i = 0; i < count; i++ ) + w[i] = 0.f; + + for( i = 0; i < MIN(count,10); ) + { + j = cvRandInt(&rng) % count; + if( w[j] < FLT_EPSILON ) + { + w[j] = 1.f; + i++; + } + } + + icvFitLine3D_wods( points, count, w, _line ); + for( i = 0; i < 30; i++ ) + { + double sum_w = 0; + + if( first ) + { + first = 0; + } + else + { + double t = _line[0] * _lineprev[0] + _line[1] * _lineprev[1] + _line[2] * _lineprev[2]; + t = MAX(t,-1.); + t = MIN(t,1.); + if( fabs(acos(t)) < adelta ) + { + float x, y, z, ax, ay, az, dx, dy, dz, d; + + x = _line[3] - _lineprev[3]; + y = _line[4] - _lineprev[4]; + z = _line[5] - _lineprev[5]; + ax = _line[0] - _lineprev[0]; + ay = _line[1] - _lineprev[1]; + az = _line[2] - _lineprev[2]; + dx = (float) fabs( y * az - z * ay ); + dy = (float) fabs( z * ax - x * az ); + dz = (float) fabs( x * ay - y * ax ); + + d = dx > dy ? (dx > dz ? dx : dz) : (dy > dz ? dy : dz); + if( d < rdelta ) + break; + } + } + /* calculate distances */ + if( icvCalcDist3D( points, count, _line, r ) < FLT_EPSILON*count ) + break; + + /* calculate weights */ + if( calc_weights ) + calc_weights( r, count, w ); + else + calc_weights_param( r, count, w, _param ); + + for( j = 0; j < count; j++ ) + sum_w += w[j]; + + if( fabs(sum_w) > FLT_EPSILON ) + { + sum_w = 1./sum_w; + for( j = 0; j < count; j++ ) + w[j] = (float)(w[j]*sum_w); + } + else + { + for( j = 0; j < count; j++ ) + w[j] = 1.f; + } + + /* save the line parameters */ + memcpy( _lineprev, _line, 6 * sizeof( float )); + + /* Run again... */ + icvFitLine3D_wods( points, count, w, _line ); + } + + if( err < min_err ) + { + min_err = err; + memcpy( line, _line, 6 * sizeof(line[0])); + if( err < EPS ) + break; + } + } + + // Return... + cvFree( &w ); + cvFree( &r ); + return CV_OK; +} + + +CV_IMPL void +cvFitLine( const CvArr* array, int dist, double param, + double reps, double aeps, float *line ) +{ + cv::AutoBuffer buffer; + + schar* points = 0; + union { CvContour contour; CvSeq seq; } header; + CvSeqBlock block; + CvSeq* ptseq = (CvSeq*)array; + int type; + + if( !line ) + CV_Error( CV_StsNullPtr, "NULL pointer to line parameters" ); + + if( CV_IS_SEQ(ptseq) ) + { + type = CV_SEQ_ELTYPE(ptseq); + if( ptseq->total == 0 ) + CV_Error( CV_StsBadSize, "The sequence has no points" ); + if( (type!=CV_32FC2 && type!=CV_32FC3 && type!=CV_32SC2 && type!=CV_32SC3) || + CV_ELEM_SIZE(type) != ptseq->elem_size ) + CV_Error( CV_StsUnsupportedFormat, + "Input sequence must consist of 2d points or 3d points" ); + } + else + { + CvMat* mat = (CvMat*)array; + type = CV_MAT_TYPE(mat->type); + if( !CV_IS_MAT(mat)) + CV_Error( CV_StsBadArg, "Input array is not a sequence nor matrix" ); + + if( !CV_IS_MAT_CONT(mat->type) || + (type!=CV_32FC2 && type!=CV_32FC3 && type!=CV_32SC2 && type!=CV_32SC3) || + (mat->width != 1 && mat->height != 1)) + CV_Error( CV_StsBadArg, + "Input array must be 1d continuous array of 2d or 3d points" ); + + ptseq = cvMakeSeqHeaderForArray( + CV_SEQ_KIND_GENERIC|type, sizeof(CvContour), CV_ELEM_SIZE(type), mat->data.ptr, + mat->width + mat->height - 1, &header.seq, &block ); + } + + if( reps < 0 || aeps < 0 ) + CV_Error( CV_StsOutOfRange, "Both reps and aeps must be non-negative" ); + + if( CV_MAT_DEPTH(type) == CV_32F && ptseq->first->next == ptseq->first ) + { + /* no need to copy data in this case */ + points = ptseq->first->data; + } + else + { + buffer.allocate(ptseq->total*CV_ELEM_SIZE(type)); + points = buffer; + cvCvtSeqToArray( ptseq, points, CV_WHOLE_SEQ ); + + if( CV_MAT_DEPTH(type) != CV_32F ) + { + int i, total = ptseq->total*CV_MAT_CN(type); + assert( CV_MAT_DEPTH(type) == CV_32S ); + + for( i = 0; i < total; i++ ) + ((float*)points)[i] = (float)((int*)points)[i]; + } + } + + if( dist == CV_DIST_USER ) + CV_Error( CV_StsBadArg, "User-defined distance is not allowed" ); + + if( CV_MAT_CN(type) == 2 ) + { + IPPI_CALL( icvFitLine2D( (CvPoint2D32f*)points, ptseq->total, + dist, (float)param, (float)reps, (float)aeps, line )); + } + else + { + IPPI_CALL( icvFitLine3D( (CvPoint3D32f*)points, ptseq->total, + dist, (float)param, (float)reps, (float)aeps, line )); + } +} + +/* End of file. */ diff --git a/opencv/imgproc/lsh.cpp b/opencv/imgproc/lsh.cpp new file mode 100644 index 0000000..75e1016 --- /dev/null +++ b/opencv/imgproc/lsh.cpp @@ -0,0 +1,454 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2009, Xavier Delacour, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +// 2009-01-12, Xavier Delacour + + +// * hash perf could be improved +// * in particular, implement integer only (converted fixed from float input) + +// * number of hash functions could be reduced (andoni paper) + +// * redundant distance computations could be suppressed + +// * rework CvLSHOperations interface-- move some of the loops into it to +// * allow efficient async storage + + +// Datar, M., Immorlica, N., Indyk, P., and Mirrokni, V. S. 2004. Locality-sensitive hashing +// scheme based on p-stable distributions. In Proceedings of the Twentieth Annual Symposium on +// Computational Geometry (Brooklyn, New York, USA, June 08 - 11, 2004). SCG '04. ACM, New York, +// NY, 253-262. DOI= http://doi.acm.org/10.1145/997817.997857 + +#include "precomp.hpp" +#include +#include +#include +#include + +template +class memory_hash_ops : public CvLSHOperations { + int d; + std::vector data; + std::vector free_data; + struct node { + int i, h2, next; + }; + std::vector nodes; + std::vector free_nodes; + std::vector bins; + +public: + memory_hash_ops(int _d, int n) : d(_d) { + bins.resize(n, -1); + } + + virtual int vector_add(const void* _p) { + const T* p = (const T*)_p; + int i; + if (free_data.empty()) { + i = (int)data.size(); + data.insert(data.end(), d, 0); + } else { + i = free_data.end()[-1]; + free_data.pop_back(); + } + std::copy(p, p + d, data.begin() + i); + return i / d; + } + virtual void vector_remove(int i) { + free_data.push_back(i * d); + } + virtual const void* vector_lookup(int i) { + return &data[i * d]; + } + virtual void vector_reserve(int n) { + data.reserve(n * d); + } + virtual unsigned int vector_count() { + return (unsigned)(data.size() / d - free_data.size()); + } + + virtual void hash_insert(lsh_hash h, int /*l*/, int i) { + int ii; + if (free_nodes.empty()) { + ii = (int)nodes.size(); + nodes.push_back(node()); + } else { + ii = free_nodes.end()[-1]; + free_nodes.pop_back(); + } + node& n = nodes[ii]; + int h1 = (int)(h.h1 % bins.size()); + n.i = i; + n.h2 = h.h2; + n.next = bins[h1]; + bins[h1] = ii; + } + virtual void hash_remove(lsh_hash h, int /*l*/, int i) { + int h1 = (int)(h.h1 % bins.size()); + for (int ii = bins[h1], iin, iip = -1; ii != -1; iip = ii, ii = iin) { + iin = nodes[ii].next; + if (nodes[ii].h2 == h.h2 && nodes[ii].i == i) { + free_nodes.push_back(ii); + if (iip == -1) + bins[h1] = iin; + else + nodes[iip].next = iin; + } + } + } + virtual int hash_lookup(lsh_hash h, int /*l*/, int* ret_i, int ret_i_max) { + int h1 = (int)(h.h1 % bins.size()); + int k = 0; + for (int ii = bins[h1]; ii != -1 && k < ret_i_max; ii = nodes[ii].next) + if (nodes[ii].h2 == h.h2) + ret_i[k++] = nodes[ii].i; + return k; + } +}; + +template +class pstable_l2_func { + CvMat *a, *b, *r1, *r2; + int d, k; + double r; + pstable_l2_func(const pstable_l2_func& x); + pstable_l2_func& operator= (const pstable_l2_func& rhs); +public: + typedef T scalar_type; + typedef T accum_type; + pstable_l2_func(int _d, int _k, double _r, CvRNG& rng) + : d(_d), k(_k), r(_r) { + assert(sizeof(T) == CV_ELEM_SIZE1(cvtype)); + a = cvCreateMat(k, d, cvtype); + b = cvCreateMat(k, 1, cvtype); + r1 = cvCreateMat(k, 1, CV_32SC1); + r2 = cvCreateMat(k, 1, CV_32SC1); + cvRandArr(&rng, a, CV_RAND_NORMAL, cvScalar(0), cvScalar(1)); + cvRandArr(&rng, b, CV_RAND_UNI, cvScalar(0), cvScalar(r)); + cvRandArr(&rng, r1, CV_RAND_UNI, + cvScalar(std::numeric_limits::min()), + cvScalar(std::numeric_limits::max())); + cvRandArr(&rng, r2, CV_RAND_UNI, + cvScalar(std::numeric_limits::min()), + cvScalar(std::numeric_limits::max())); + } + ~pstable_l2_func() { + cvReleaseMat(&a); + cvReleaseMat(&b); + cvReleaseMat(&r1); + cvReleaseMat(&r2); + } + + // * factor all L functions into this (reduces number of matrices to 4 total; + // * simpler syntax in lsh_table). give parameter l here that tells us which + // * row to use etc. + + lsh_hash operator() (const T* x) const { + const T* aj = (const T*)a->data.ptr; + const T* bj = (const T*)b->data.ptr; + + lsh_hash h; + h.h1 = h.h2 = 0; + for (int j = 0; j < k; ++j) { + accum_type s = 0; + for (int jj = 0; jj < d; ++jj) + s += aj[jj] * x[jj]; + s += *bj; + s = accum_type(s/r); + int si = int(s); + h.h1 += r1->data.i[j] * si; + h.h2 += r2->data.i[j] * si; + + aj += d; + bj++; + } + return h; + } + accum_type distance(const T* p, const T* q) const { + accum_type s = 0; + for (int j = 0; j < d; ++j) { + accum_type d1 = p[j] - q[j]; + s += d1 * d1; + } + return s; + } +}; + +template +class lsh_table { +public: + typedef typename H::scalar_type scalar_type; + typedef typename H::accum_type accum_type; +private: + std::vector g; + CvLSHOperations* ops; + int d, L, k; + double r; + + static accum_type comp_dist(const std::pair& x, + const std::pair& y) { + return x.second < y.second; + } + + lsh_table(const lsh_table& x); + lsh_table& operator= (const lsh_table& rhs); +public: + lsh_table(CvLSHOperations* _ops, int _d, int Lval, int _k, double _r, CvRNG& rng) + : ops(_ops), d(_d), L(Lval), k(_k), r(_r) { + g.resize(L); + for (int j = 0; j < L; ++j) + g[j] = new H(d, k, r, rng); + } + ~lsh_table() { + for (int j = 0; j < L; ++j) + delete g[j]; + delete ops; + } + + int dims() const { + return d; + } + unsigned int size() const { + return ops->vector_count(); + } + + void add(const scalar_type* data, int n, int* ret_indices = 0) { + for (int j=0;jvector_add(x); + if (ret_indices) + ret_indices[j] = i; + + for (int l = 0; l < L; ++l) { + lsh_hash h = (*g[l])(x); + ops->hash_insert(h, l, i); + } + } + } + void remove(const int* indices, int n) { + for (int j = 0; j < n; ++j) { + int i = indices[n]; + const scalar_type* x = (const scalar_type*)ops->vector_lookup(i); + + for (int l = 0; l < L; ++l) { + lsh_hash h = (*g[l])(x); + ops->hash_remove(h, l, i); + } + ops->vector_remove(i); + } + } + void query(const scalar_type* q, int k0, int emax, double* dist, int* results) { + cv::AutoBuffer tmp(emax); + typedef std::pair dr_type; // * swap int and accum_type here, for naming consistency + cv::AutoBuffer dr(k0); + int k1 = 0; + + // * handle k0 >= emax, in which case don't track max distance + + for (int l = 0; l < L && emax > 0; ++l) { + lsh_hash h = (*g[l])(q); + int m = ops->hash_lookup(h, l, tmp, emax); + for (int j = 0; j < m && emax > 0; ++j, --emax) { + int i = tmp[j]; + const scalar_type* p = (const scalar_type*)ops->vector_lookup(i); + accum_type pd = (*g[l]).distance(p, q); + if (k1 < k0) { + dr[k1++] = std::make_pair(i, pd); + std::push_heap(&dr[0], &dr[k1], comp_dist); + } else if (pd < dr[0].second) { + std::pop_heap(&dr[0], &dr[k0], comp_dist); + dr[k0 - 1] = std::make_pair(i, pd); + std::push_heap(&dr[0], &dr[k0], comp_dist); + } + } + } + + for (int j = 0; j < k1; ++j) + dist[j] = dr[j].second, results[j] = dr[j].first; + std::fill(dist + k1, dist + k0, 0); + std::fill(results + k1, results + k0, -1); + } + void query(const scalar_type* data, int n, int k0, int emax, double* dist, int* results) { + for (int j = 0; j < n; ++j) { + query(data, k0, emax, dist, results); + data += d; // * this may not agree with step for some scalar_types + dist += k0; + results += k0; + } + } +}; + +typedef lsh_table > lsh_pstable_l2_32f; +typedef lsh_table > lsh_pstable_l2_64f; + +struct CvLSH { + int type; + union { + lsh_pstable_l2_32f* lsh_32f; + lsh_pstable_l2_64f* lsh_64f; + } u; +}; + +CvLSH* cvCreateLSH(CvLSHOperations* ops, int d, int L, int k, int type, double r, int64 seed) { + CvLSH* lsh = 0; + CvRNG rng = cvRNG(seed); + + if (type != CV_32FC1 && type != CV_64FC1) + CV_Error(CV_StsUnsupportedFormat, "vectors must be either CV_32FC1 or CV_64FC1"); + lsh = new CvLSH; + lsh->type = type; + switch (type) { + case CV_32FC1: lsh->u.lsh_32f = new lsh_pstable_l2_32f(ops, d, L, k, r, rng); break; + case CV_64FC1: lsh->u.lsh_64f = new lsh_pstable_l2_64f(ops, d, L, k, r, rng); break; + } + + return lsh; +} + +CvLSH* cvCreateMemoryLSH(int d, int n, int L, int k, int type, double r, int64 seed) { + CvLSHOperations* ops = 0; + + switch (type) { + case CV_32FC1: ops = new memory_hash_ops(d,n); break; + case CV_64FC1: ops = new memory_hash_ops(d,n); break; + } + return cvCreateLSH(ops, d, L, k, type, r, seed); +} + +void cvReleaseLSH(CvLSH** lsh) { + switch ((*lsh)->type) { + case CV_32FC1: delete (*lsh)->u.lsh_32f; break; + case CV_64FC1: delete (*lsh)->u.lsh_64f; break; + default: assert(0); + } + delete *lsh; + *lsh = 0; +} + +unsigned int LSHSize(CvLSH* lsh) { + switch (lsh->type) { + case CV_32FC1: return lsh->u.lsh_32f->size(); break; + case CV_64FC1: return lsh->u.lsh_64f->size(); break; + default: assert(0); + } + return 0; +} + + +void cvLSHAdd(CvLSH* lsh, const CvMat* data, CvMat* indices) { + int dims, n; + int* ret_indices = 0; + + switch (lsh->type) { + case CV_32FC1: dims = lsh->u.lsh_32f->dims(); break; + case CV_64FC1: dims = lsh->u.lsh_64f->dims(); break; + default: assert(0); return; + } + + n = data->rows; + + if (dims != data->cols) + CV_Error(CV_StsBadSize, "data must be n x d, where d is what was used to construct LSH"); + + if (CV_MAT_TYPE(data->type) != lsh->type) + CV_Error(CV_StsUnsupportedFormat, "type of data and constructed LSH must agree"); + if (indices) { + if (CV_MAT_TYPE(indices->type) != CV_32SC1) + CV_Error(CV_StsUnsupportedFormat, "indices must be CV_32SC1"); + if (indices->rows * indices->cols != n) + CV_Error(CV_StsBadSize, "indices must be n x 1 or 1 x n for n x d data"); + ret_indices = indices->data.i; + } + + switch (lsh->type) { + case CV_32FC1: lsh->u.lsh_32f->add(data->data.fl, n, ret_indices); break; + case CV_64FC1: lsh->u.lsh_64f->add(data->data.db, n, ret_indices); break; + default: assert(0); return; + } +} + +void cvLSHRemove(CvLSH* lsh, const CvMat* indices) { + int n; + + if (CV_MAT_TYPE(indices->type) != CV_32SC1) + CV_Error(CV_StsUnsupportedFormat, "indices must be CV_32SC1"); + n = indices->rows * indices->cols; + switch (lsh->type) { + case CV_32FC1: lsh->u.lsh_32f->remove(indices->data.i, n); break; + case CV_64FC1: lsh->u.lsh_64f->remove(indices->data.i, n); break; + default: assert(0); return; + } +} + +void cvLSHQuery(CvLSH* lsh, const CvMat* data, CvMat* indices, CvMat* dist, int k, int emax) { + int dims; + + switch (lsh->type) { + case CV_32FC1: dims = lsh->u.lsh_32f->dims(); break; + case CV_64FC1: dims = lsh->u.lsh_64f->dims(); break; + default: assert(0); return; + } + + if (k<1) + CV_Error(CV_StsOutOfRange, "k must be positive"); + if (CV_MAT_TYPE(data->type) != lsh->type) + CV_Error(CV_StsUnsupportedFormat, "type of data and constructed LSH must agree"); + if (dims != data->cols) + CV_Error(CV_StsBadSize, "data must be n x d, where d is what was used to construct LSH"); + if (dist->rows != data->rows || dist->cols != k) + CV_Error(CV_StsBadSize, "dist must be n x k for n x d data"); + if (dist->rows != indices->rows || dist->cols != indices->cols) + CV_Error(CV_StsBadSize, "dist and indices must be same size"); + if (CV_MAT_TYPE(dist->type) != CV_64FC1) + CV_Error(CV_StsUnsupportedFormat, "dist must be CV_64FC1"); + if (CV_MAT_TYPE(indices->type) != CV_32SC1) + CV_Error(CV_StsUnsupportedFormat, "indices must be CV_32SC1"); + + switch (lsh->type) { + case CV_32FC1: lsh->u.lsh_32f->query(data->data.fl, data->rows, + k, emax, dist->data.db, indices->data.i); break; + case CV_64FC1: lsh->u.lsh_64f->query(data->data.db, data->rows, + k, emax, dist->data.db, indices->data.i); break; + default: assert(0); return; + } +} diff --git a/opencv/imgproc/matchcontours.cpp b/opencv/imgproc/matchcontours.cpp new file mode 100644 index 0000000..00e544f --- /dev/null +++ b/opencv/imgproc/matchcontours.cpp @@ -0,0 +1,198 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" + +/*F/////////////////////////////////////////////////////////////////////////////////////// +// Name: cvMatchContours +// Purpose: +// Calculates matching of the two contours +// Context: +// Parameters: +// contour_1 - pointer to the first input contour object. +// contour_2 - pointer to the second input contour object. +// method - method for the matching calculation +// (now CV_IPPI_CONTOURS_MATCH_I1, CV_CONTOURS_MATCH_I2 or +// CV_CONTOURS_MATCH_I3 only ) +// rezult - output calculated measure +// +//F*/ +CV_IMPL double +cvMatchShapes( const void* contour1, const void* contour2, + int method, double /*parameter*/ ) +{ + CvMoments moments; + CvHuMoments huMoments; + double ma[7], mb[7]; + int i, sma, smb; + double eps = 1.e-5; + double mmm; + double result = 0; + + if( !contour1 || !contour2 ) + CV_Error( CV_StsNullPtr, "" ); + + // calculate moments of the first shape + cvMoments( contour1, &moments ); + cvGetHuMoments( &moments, &huMoments ); + + ma[0] = huMoments.hu1; + ma[1] = huMoments.hu2; + ma[2] = huMoments.hu3; + ma[3] = huMoments.hu4; + ma[4] = huMoments.hu5; + ma[5] = huMoments.hu6; + ma[6] = huMoments.hu7; + + + // calculate moments of the second shape + cvMoments( contour2, &moments ); + cvGetHuMoments( &moments, &huMoments ); + + mb[0] = huMoments.hu1; + mb[1] = huMoments.hu2; + mb[2] = huMoments.hu3; + mb[3] = huMoments.hu4; + mb[4] = huMoments.hu5; + mb[5] = huMoments.hu6; + mb[6] = huMoments.hu7; + + switch (method) + { + case 1: + { + for( i = 0; i < 7; i++ ) + { + double ama = fabs( ma[i] ); + double amb = fabs( mb[i] ); + + if( ma[i] > 0 ) + sma = 1; + else if( ma[i] < 0 ) + sma = -1; + else + sma = 0; + if( mb[i] > 0 ) + smb = 1; + else if( mb[i] < 0 ) + smb = -1; + else + smb = 0; + + if( ama > eps && amb > eps ) + { + ama = 1. / (sma * log10( ama )); + amb = 1. / (smb * log10( amb )); + result += fabs( -ama + amb ); + } + } + break; + } + + case 2: + { + for( i = 0; i < 7; i++ ) + { + double ama = fabs( ma[i] ); + double amb = fabs( mb[i] ); + + if( ma[i] > 0 ) + sma = 1; + else if( ma[i] < 0 ) + sma = -1; + else + sma = 0; + if( mb[i] > 0 ) + smb = 1; + else if( mb[i] < 0 ) + smb = -1; + else + smb = 0; + + if( ama > eps && amb > eps ) + { + ama = sma * log10( ama ); + amb = smb * log10( amb ); + result += fabs( -ama + amb ); + } + } + break; + } + + case 3: + { + for( i = 0; i < 7; i++ ) + { + double ama = fabs( ma[i] ); + double amb = fabs( mb[i] ); + + if( ma[i] > 0 ) + sma = 1; + else if( ma[i] < 0 ) + sma = -1; + else + sma = 0; + if( mb[i] > 0 ) + smb = 1; + else if( mb[i] < 0 ) + smb = -1; + else + smb = 0; + + if( ama > eps && amb > eps ) + { + ama = sma * log10( ama ); + amb = smb * log10( amb ); + mmm = fabs( (ama - amb) / ama ); + if( result < mmm ) + result = mmm; + } + } + break; + } + default: + CV_Error( CV_StsBadArg, "Unknown comparison method" ); + } + + return result; +} + + +/* End of file. */ diff --git a/opencv/imgproc/moments.cpp b/opencv/imgproc/moments.cpp new file mode 100644 index 0000000..aa1a569 --- /dev/null +++ b/opencv/imgproc/moments.cpp @@ -0,0 +1,651 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" + +// The function calculates center of gravity and the central second order moments +static void icvCompleteMomentState( CvMoments* moments ) +{ + double cx = 0, cy = 0; + double mu20, mu11, mu02; + + assert( moments != 0 ); + moments->inv_sqrt_m00 = 0; + + if( fabs(moments->m00) > DBL_EPSILON ) + { + double inv_m00 = 1. / moments->m00; + cx = moments->m10 * inv_m00; + cy = moments->m01 * inv_m00; + moments->inv_sqrt_m00 = std::sqrt( fabs(inv_m00) ); + } + + // mu20 = m20 - m10*cx + mu20 = moments->m20 - moments->m10 * cx; + // mu11 = m11 - m10*cy + mu11 = moments->m11 - moments->m10 * cy; + // mu02 = m02 - m01*cy + mu02 = moments->m02 - moments->m01 * cy; + + moments->mu20 = mu20; + moments->mu11 = mu11; + moments->mu02 = mu02; + + // mu30 = m30 - cx*(3*mu20 + cx*m10) + moments->mu30 = moments->m30 - cx * (3 * mu20 + cx * moments->m10); + mu11 += mu11; + // mu21 = m21 - cx*(2*mu11 + cx*m01) - cy*mu20 + moments->mu21 = moments->m21 - cx * (mu11 + cx * moments->m01) - cy * mu20; + // mu12 = m12 - cy*(2*mu11 + cy*m10) - cx*mu02 + moments->mu12 = moments->m12 - cy * (mu11 + cy * moments->m10) - cx * mu02; + // mu03 = m03 - cy*(3*mu02 + cy*m01) + moments->mu03 = moments->m03 - cy * (3 * mu02 + cy * moments->m01); +} + + +static void icvContourMoments( CvSeq* contour, CvMoments* moments ) +{ + int is_float = CV_SEQ_ELTYPE(contour) == CV_32FC2; + + if( contour->total ) + { + CvSeqReader reader; + double a00, a10, a01, a20, a11, a02, a30, a21, a12, a03; + double xi, yi, xi2, yi2, xi_1, yi_1, xi_12, yi_12, dxy, xii_1, yii_1; + int lpt = contour->total; + + a00 = a10 = a01 = a20 = a11 = a02 = a30 = a21 = a12 = a03 = 0; + + cvStartReadSeq( contour, &reader, 0 ); + + if( !is_float ) + { + xi_1 = ((CvPoint*)(reader.ptr))->x; + yi_1 = ((CvPoint*)(reader.ptr))->y; + } + else + { + xi_1 = ((CvPoint2D32f*)(reader.ptr))->x; + yi_1 = ((CvPoint2D32f*)(reader.ptr))->y; + } + CV_NEXT_SEQ_ELEM( contour->elem_size, reader ); + + xi_12 = xi_1 * xi_1; + yi_12 = yi_1 * yi_1; + + while( lpt-- > 0 ) + { + if( !is_float ) + { + xi = ((CvPoint*)(reader.ptr))->x; + yi = ((CvPoint*)(reader.ptr))->y; + } + else + { + xi = ((CvPoint2D32f*)(reader.ptr))->x; + yi = ((CvPoint2D32f*)(reader.ptr))->y; + } + CV_NEXT_SEQ_ELEM( contour->elem_size, reader ); + + xi2 = xi * xi; + yi2 = yi * yi; + dxy = xi_1 * yi - xi * yi_1; + xii_1 = xi_1 + xi; + yii_1 = yi_1 + yi; + + a00 += dxy; + a10 += dxy * xii_1; + a01 += dxy * yii_1; + a20 += dxy * (xi_1 * xii_1 + xi2); + a11 += dxy * (xi_1 * (yii_1 + yi_1) + xi * (yii_1 + yi)); + a02 += dxy * (yi_1 * yii_1 + yi2); + a30 += dxy * xii_1 * (xi_12 + xi2); + a03 += dxy * yii_1 * (yi_12 + yi2); + a21 += + dxy * (xi_12 * (3 * yi_1 + yi) + 2 * xi * xi_1 * yii_1 + + xi2 * (yi_1 + 3 * yi)); + a12 += + dxy * (yi_12 * (3 * xi_1 + xi) + 2 * yi * yi_1 * xii_1 + + yi2 * (xi_1 + 3 * xi)); + + xi_1 = xi; + yi_1 = yi; + xi_12 = xi2; + yi_12 = yi2; + } + + double db1_2, db1_6, db1_12, db1_24, db1_20, db1_60; + + if( fabs(a00) > FLT_EPSILON ) + { + if( a00 > 0 ) + { + db1_2 = 0.5; + db1_6 = 0.16666666666666666666666666666667; + db1_12 = 0.083333333333333333333333333333333; + db1_24 = 0.041666666666666666666666666666667; + db1_20 = 0.05; + db1_60 = 0.016666666666666666666666666666667; + } + else + { + db1_2 = -0.5; + db1_6 = -0.16666666666666666666666666666667; + db1_12 = -0.083333333333333333333333333333333; + db1_24 = -0.041666666666666666666666666666667; + db1_20 = -0.05; + db1_60 = -0.016666666666666666666666666666667; + } + + // spatial moments + moments->m00 = a00 * db1_2; + moments->m10 = a10 * db1_6; + moments->m01 = a01 * db1_6; + moments->m20 = a20 * db1_12; + moments->m11 = a11 * db1_24; + moments->m02 = a02 * db1_12; + moments->m30 = a30 * db1_20; + moments->m21 = a21 * db1_60; + moments->m12 = a12 * db1_60; + moments->m03 = a03 * db1_20; + + icvCompleteMomentState( moments ); + } + } +} + + +/****************************************************************************************\ +* Spatial Raster Moments * +\****************************************************************************************/ + +template +static void momentsInTile( const cv::Mat& img, double* moments ) +{ + cv::Size size = img.size(); + int x, y; + MT mom[10] = {0,0,0,0,0,0,0,0,0,0}; + + for( y = 0; y < size.height; y++ ) + { + const T* ptr = (const T*)(img.data + y*img.step); + WT x0 = 0, x1 = 0, x2 = 0; + MT x3 = 0; + + for( x = 0; x < size.width; x++ ) + { + WT p = ptr[x]; + WT xp = x * p, xxp; + + x0 += p; + x1 += xp; + xxp = xp * x; + x2 += xxp; + x3 += xxp * x; + } + + WT py = y * x0, sy = y*y; + + mom[9] += ((MT)py) * sy; // m03 + mom[8] += ((MT)x1) * sy; // m12 + mom[7] += ((MT)x2) * y; // m21 + mom[6] += x3; // m30 + mom[5] += x0 * sy; // m02 + mom[4] += x1 * y; // m11 + mom[3] += x2; // m20 + mom[2] += py; // m01 + mom[1] += x1; // m10 + mom[0] += x0; // m00 + } + + for( x = 0; x < 10; x++ ) + moments[x] = (double)mom[x]; +} + + +#if CV_SSE2 + +template<> void momentsInTile( const cv::Mat& img, double* moments ) +{ + typedef uchar T; + typedef int WT; + typedef int MT; + cv::Size size = img.size(); + int x, y; + MT mom[10] = {0,0,0,0,0,0,0,0,0,0}; + bool useSIMD = cv::checkHardwareSupport(CV_CPU_SSE2); + + for( y = 0; y < size.height; y++ ) + { + const T* ptr = img.ptr(y); + int x0 = 0, x1 = 0, x2 = 0, x3 = 0, x = 0; + + if( useSIMD ) + { + __m128i qx_init = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7); + __m128i dx = _mm_set1_epi16(8); + __m128i z = _mm_setzero_si128(), qx0 = z, qx1 = z, qx2 = z, qx3 = z, qx = qx_init; + + for( ; x <= size.width - 8; x += 8 ) + { + __m128i p = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(ptr + x)), z); + qx0 = _mm_add_epi32(qx0, _mm_sad_epu8(p, z)); + __m128i px = _mm_mullo_epi16(p, qx); + __m128i sx = _mm_mullo_epi16(qx, qx); + qx1 = _mm_add_epi32(qx1, _mm_madd_epi16(p, qx)); + qx2 = _mm_add_epi32(qx2, _mm_madd_epi16(p, sx)); + qx3 = _mm_add_epi32(qx3, _mm_madd_epi16(px, sx)); + + qx = _mm_add_epi16(qx, dx); + } + int CV_DECL_ALIGNED(16) buf[4]; + _mm_store_si128((__m128i*)buf, qx0); + x0 = buf[0] + buf[1] + buf[2] + buf[3]; + _mm_store_si128((__m128i*)buf, qx1); + x1 = buf[0] + buf[1] + buf[2] + buf[3]; + _mm_store_si128((__m128i*)buf, qx2); + x2 = buf[0] + buf[1] + buf[2] + buf[3]; + _mm_store_si128((__m128i*)buf, qx3); + x3 = buf[0] + buf[1] + buf[2] + buf[3]; + } + + for( ; x < size.width; x++ ) + { + WT p = ptr[x]; + WT xp = x * p, xxp; + + x0 += p; + x1 += xp; + xxp = xp * x; + x2 += xxp; + x3 += xxp * x; + } + + WT py = y * x0, sy = y*y; + + mom[9] += ((MT)py) * sy; // m03 + mom[8] += ((MT)x1) * sy; // m12 + mom[7] += ((MT)x2) * y; // m21 + mom[6] += x3; // m30 + mom[5] += x0 * sy; // m02 + mom[4] += x1 * y; // m11 + mom[3] += x2; // m20 + mom[2] += py; // m01 + mom[1] += x1; // m10 + mom[0] += x0; // m00 + } + + for( x = 0; x < 10; x++ ) + moments[x] = (double)mom[x]; +} + +#endif + +typedef void (*CvMomentsInTileFunc)(const cv::Mat& img, double* moments); + +CV_IMPL void cvMoments( const void* array, CvMoments* moments, int binary ) +{ + const int TILE_SIZE = 32; + int type, depth, cn, coi = 0; + CvMat stub, *mat = (CvMat*)array; + CvMomentsInTileFunc func = 0; + CvContour contourHeader; + CvSeq* contour = 0; + CvSeqBlock block; + double buf[TILE_SIZE*TILE_SIZE]; + uchar nzbuf[TILE_SIZE*TILE_SIZE]; + + if( CV_IS_SEQ( array )) + { + contour = (CvSeq*)array; + if( !CV_IS_SEQ_POINT_SET( contour )) + CV_Error( CV_StsBadArg, "The passed sequence is not a valid contour" ); + } + + if( !moments ) + CV_Error( CV_StsNullPtr, "" ); + + memset( moments, 0, sizeof(*moments)); + + if( !contour ) + { + mat = cvGetMat( mat, &stub, &coi ); + type = CV_MAT_TYPE( mat->type ); + + if( type == CV_32SC2 || type == CV_32FC2 ) + { + contour = cvPointSeqFromMat( + CV_SEQ_KIND_CURVE | CV_SEQ_FLAG_CLOSED, + mat, &contourHeader, &block ); + } + } + + if( contour ) + { + icvContourMoments( contour, moments ); + return; + } + + type = CV_MAT_TYPE( mat->type ); + depth = CV_MAT_DEPTH( type ); + cn = CV_MAT_CN( type ); + + cv::Size size = cvGetMatSize( mat ); + + if( cn > 1 && coi == 0 ) + CV_Error( CV_StsBadArg, "Invalid image type" ); + + if( size.width <= 0 || size.height <= 0 ) + return; + + if( binary || depth == CV_8U ) + func = momentsInTile; + else if( depth == CV_16U ) + func = momentsInTile; + else if( depth == CV_16S ) + func = momentsInTile; + else if( depth == CV_32F ) + func = momentsInTile; + else if( depth == CV_64F ) + func = momentsInTile; + else + CV_Error( CV_StsUnsupportedFormat, "" ); + + cv::Mat src0(mat); + + for( int y = 0; y < size.height; y += TILE_SIZE ) + { + cv::Size tileSize; + tileSize.height = std::min(TILE_SIZE, size.height - y); + + for( int x = 0; x < size.width; x += TILE_SIZE ) + { + tileSize.width = std::min(TILE_SIZE, size.width - x); + cv::Mat src(src0, cv::Rect(x, y, tileSize.width, tileSize.height)); + + if( coi > 0 ) + { + cv::Mat tmp(tileSize, depth, buf); + int pairs[] = {coi-1, 0}; + cv::mixChannels(&src, 1, &tmp, 1, pairs, 1); + src = tmp; + } + if( binary ) + { + cv::Mat tmp(tileSize, CV_8U, nzbuf); + cv::compare( src, 0, tmp, CV_CMP_NE ); + src = tmp; + } + + double mom[10]; + func( src, mom ); + + if(binary) + { + double s = 1./255; + for( int k = 0; k < 10; k++ ) + mom[k] *= s; + } + + double xm = x * mom[0], ym = y * mom[0]; + + // accumulate moments computed in each tile + + // + m00 ( = m00' ) + moments->m00 += mom[0]; + + // + m10 ( = m10' + x*m00' ) + moments->m10 += mom[1] + xm; + + // + m01 ( = m01' + y*m00' ) + moments->m01 += mom[2] + ym; + + // + m20 ( = m20' + 2*x*m10' + x*x*m00' ) + moments->m20 += mom[3] + x * (mom[1] * 2 + xm); + + // + m11 ( = m11' + x*m01' + y*m10' + x*y*m00' ) + moments->m11 += mom[4] + x * (mom[2] + ym) + y * mom[1]; + + // + m02 ( = m02' + 2*y*m01' + y*y*m00' ) + moments->m02 += mom[5] + y * (mom[2] * 2 + ym); + + // + m30 ( = m30' + 3*x*m20' + 3*x*x*m10' + x*x*x*m00' ) + moments->m30 += mom[6] + x * (3. * mom[3] + x * (3. * mom[1] + xm)); + + // + m21 ( = m21' + x*(2*m11' + 2*y*m10' + x*m01' + x*y*m00') + y*m20') + moments->m21 += mom[7] + x * (2 * (mom[4] + y * mom[1]) + x * (mom[2] + ym)) + y * mom[3]; + + // + m12 ( = m12' + y*(2*m11' + 2*x*m01' + y*m10' + x*y*m00') + x*m02') + moments->m12 += mom[8] + y * (2 * (mom[4] + x * mom[2]) + y * (mom[1] + xm)) + x * mom[5]; + + // + m03 ( = m03' + 3*y*m02' + 3*y*y*m01' + y*y*y*m00' ) + moments->m03 += mom[9] + y * (3. * mom[5] + y * (3. * mom[2] + ym)); + } + } + + icvCompleteMomentState( moments ); +} + + +CV_IMPL void cvGetHuMoments( CvMoments * mState, CvHuMoments * HuState ) +{ + if( !mState || !HuState ) + CV_Error( CV_StsNullPtr, "" ); + + double m00s = mState->inv_sqrt_m00, m00 = m00s * m00s, s2 = m00 * m00, s3 = s2 * m00s; + + double nu20 = mState->mu20 * s2, + nu11 = mState->mu11 * s2, + nu02 = mState->mu02 * s2, + nu30 = mState->mu30 * s3, + nu21 = mState->mu21 * s3, nu12 = mState->mu12 * s3, nu03 = mState->mu03 * s3; + + double t0 = nu30 + nu12; + double t1 = nu21 + nu03; + + double q0 = t0 * t0, q1 = t1 * t1; + + double n4 = 4 * nu11; + double s = nu20 + nu02; + double d = nu20 - nu02; + + HuState->hu1 = s; + HuState->hu2 = d * d + n4 * nu11; + HuState->hu4 = q0 + q1; + HuState->hu6 = d * (q0 - q1) + n4 * t0 * t1; + + t0 *= q0 - 3 * q1; + t1 *= 3 * q0 - q1; + + q0 = nu30 - 3 * nu12; + q1 = 3 * nu21 - nu03; + + HuState->hu3 = q0 * q0 + q1 * q1; + HuState->hu5 = q0 * t0 + q1 * t1; + HuState->hu7 = q1 * t0 - q0 * t1; +} + + +CV_IMPL double cvGetSpatialMoment( CvMoments * moments, int x_order, int y_order ) +{ + int order = x_order + y_order; + + if( !moments ) + CV_Error( CV_StsNullPtr, "" ); + if( (x_order | y_order) < 0 || order > 3 ) + CV_Error( CV_StsOutOfRange, "" ); + + return (&(moments->m00))[order + (order >> 1) + (order > 2) * 2 + y_order]; +} + + +CV_IMPL double cvGetCentralMoment( CvMoments * moments, int x_order, int y_order ) +{ + int order = x_order + y_order; + + if( !moments ) + CV_Error( CV_StsNullPtr, "" ); + if( (x_order | y_order) < 0 || order > 3 ) + CV_Error( CV_StsOutOfRange, "" ); + + return order >= 2 ? (&(moments->m00))[4 + order * 3 + y_order] : + order == 0 ? moments->m00 : 0; +} + + +CV_IMPL double cvGetNormalizedCentralMoment( CvMoments * moments, int x_order, int y_order ) +{ + int order = x_order + y_order; + + double mu = cvGetCentralMoment( moments, x_order, y_order ); + double m00s = moments->inv_sqrt_m00; + + while( --order >= 0 ) + mu *= m00s; + return mu * m00s * m00s; +} + + +namespace cv +{ + +Moments::Moments() +{ + m00 = m10 = m01 = m20 = m11 = m02 = m30 = m21 = m12 = m03 = + mu20 = mu11 = mu02 = mu30 = mu21 = mu12 = mu03 = + nu20 = nu11 = nu02 = nu30 = nu21 = nu12 = nu03 = 0.; +} + +Moments::Moments( double _m00, double _m10, double _m01, double _m20, double _m11, + double _m02, double _m30, double _m21, double _m12, double _m03 ) +{ + m00 = _m00; m10 = _m10; m01 = _m01; + m20 = _m20; m11 = _m11; m02 = _m02; + m30 = _m30; m21 = _m21; m12 = _m12; m03 = _m03; + + double cx = 0, cy = 0, inv_m00 = 0; + if( std::abs(m00) > DBL_EPSILON ) + { + inv_m00 = 1./m00; + cx = m10*inv_m00; cy = m01*inv_m00; + } + + mu20 = m20 - m10*cx; + mu11 = m11 - m10*cy; + mu02 = m02 - m01*cy; + + mu30 = m30 - cx*(3*mu20 + cx*m10); + mu21 = m21 - cx*(2*mu11 + cx*m01) - cy*mu20; + mu12 = m12 - cy*(2*mu11 + cy*m10) - cx*mu02; + mu03 = m03 - cy*(3*mu02 + cy*m01); + + double inv_sqrt_m00 = std::sqrt(std::abs(inv_m00)); + double s2 = inv_m00*inv_m00, s3 = s2*inv_sqrt_m00; + + nu20 = mu20*s2; nu11 = mu11*s2; nu02 = mu02*s2; + nu30 = mu30*s3; nu21 = mu21*s3; nu12 = mu12*s3; nu03 = mu03*s3; +} + +Moments::Moments( const CvMoments& m ) +{ + *this = Moments(m.m00, m.m10, m.m01, m.m20, m.m11, m.m02, m.m30, m.m21, m.m12, m.m03); +} + +Moments::operator CvMoments() const +{ + CvMoments m; + m.m00 = m00; m.m10 = m10; m.m01 = m01; + m.m20 = m20; m.m11 = m11; m.m02 = m02; + m.m30 = m30; m.m21 = m21; m.m12 = m12; m.m03 = m03; + m.mu20 = mu20; m.mu11 = mu11; m.mu02 = mu02; + m.mu30 = mu30; m.mu21 = mu21; m.mu12 = mu12; m.mu03 = mu03; + double am00 = std::abs(m00); + m.inv_sqrt_m00 = am00 > DBL_EPSILON ? 1./std::sqrt(am00) : 0; + + return m; +} + +} + +cv::Moments cv::moments( InputArray _array, bool binaryImage ) +{ + CvMoments om; + Mat arr = _array.getMat(); + CvMat c_array = arr; + cvMoments(&c_array, &om, binaryImage); + return om; +} + +void cv::HuMoments( const Moments& m, double hu[7] ) +{ + double t0 = m.nu30 + m.nu12; + double t1 = m.nu21 + m.nu03; + + double q0 = t0 * t0, q1 = t1 * t1; + + double n4 = 4 * m.nu11; + double s = m.nu20 + m.nu02; + double d = m.nu20 - m.nu02; + + hu[0] = s; + hu[1] = d * d + n4 * m.nu11; + hu[3] = q0 + q1; + hu[5] = d * (q0 - q1) + n4 * t0 * t1; + + t0 *= q0 - 3 * q1; + t1 *= 3 * q0 - q1; + + q0 = m.nu30 - 3 * m.nu12; + q1 = 3 * m.nu21 - m.nu03; + + hu[2] = q0 * q0 + q1 * q1; + hu[4] = q0 * t0 + q1 * t1; + hu[6] = q1 * t0 - q0 * t1; +} + +void cv::HuMoments( const Moments& m, OutputArray _hu ) +{ + _hu.create(7, 1, CV_64F); + Mat hu = _hu.getMat(); + CV_Assert( hu.isContinuous() ); + HuMoments(m, (double*)hu.data); +} + +/* End of file. */ diff --git a/opencv/imgproc/morph.cpp b/opencv/imgproc/morph.cpp new file mode 100644 index 0000000..389a6a2 --- /dev/null +++ b/opencv/imgproc/morph.cpp @@ -0,0 +1,1261 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" +#include +#include + +/****************************************************************************************\ + Basic Morphological Operations: Erosion & Dilation +\****************************************************************************************/ + +namespace cv +{ + +template struct MinOp +{ + typedef T type1; + typedef T type2; + typedef T rtype; + T operator ()(T a, T b) const { return std::min(a, b); } +}; + +template struct MaxOp +{ + typedef T type1; + typedef T type2; + typedef T rtype; + T operator ()(T a, T b) const { return std::max(a, b); } +}; + +#undef CV_MIN_8U +#undef CV_MAX_8U +#define CV_MIN_8U(a,b) ((a) - CV_FAST_CAST_8U((a) - (b))) +#define CV_MAX_8U(a,b) ((a) + CV_FAST_CAST_8U((b) - (a))) + +template<> inline uchar MinOp::operator ()(uchar a, uchar b) const { return CV_MIN_8U(a, b); } +template<> inline uchar MaxOp::operator ()(uchar a, uchar b) const { return CV_MAX_8U(a, b); } + +#if CV_SSE2 + +template struct MorphRowIVec +{ + enum { ESZ = VecUpdate::ESZ }; + + MorphRowIVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {} + int operator()(const uchar* src, uchar* dst, int width, int cn) const + { + if( !checkHardwareSupport(CV_CPU_SSE2) ) + return 0; + + cn *= ESZ; + int i, k, _ksize = ksize*cn; + width = (width & -4)*cn; + VecUpdate updateOp; + + for( i = 0; i <= width - 16; i += 16 ) + { + __m128i s = _mm_loadu_si128((const __m128i*)(src + i)); + for( k = cn; k < _ksize; k += cn ) + { + __m128i x = _mm_loadu_si128((const __m128i*)(src + i + k)); + s = updateOp(s, x); + } + _mm_storeu_si128((__m128i*)(dst + i), s); + } + + for( ; i < width; i += 4 ) + { + __m128i s = _mm_cvtsi32_si128(*(const int*)(src + i)); + for( k = cn; k < _ksize; k += cn ) + { + __m128i x = _mm_cvtsi32_si128(*(const int*)(src + i + k)); + s = updateOp(s, x); + } + *(int*)(dst + i) = _mm_cvtsi128_si32(s); + } + + return i/ESZ; + } + + int ksize, anchor; +}; + + +template struct MorphRowFVec +{ + MorphRowFVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {} + int operator()(const uchar* src, uchar* dst, int width, int cn) const + { + if( !checkHardwareSupport(CV_CPU_SSE) ) + return 0; + + int i, k, _ksize = ksize*cn; + width = (width & -4)*cn; + VecUpdate updateOp; + + for( i = 0; i < width; i += 4 ) + { + __m128 s = _mm_loadu_ps((const float*)src + i); + for( k = cn; k < _ksize; k += cn ) + { + __m128 x = _mm_loadu_ps((const float*)src + i + k); + s = updateOp(s, x); + } + _mm_storeu_ps((float*)dst + i, s); + } + + return i; + } + + int ksize, anchor; +}; + + +template struct MorphColumnIVec +{ + enum { ESZ = VecUpdate::ESZ }; + + MorphColumnIVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {} + int operator()(const uchar** src, uchar* dst, int dststep, int count, int width) const + { + if( !checkHardwareSupport(CV_CPU_SSE2) ) + return 0; + + int i = 0, k, _ksize = ksize; + width *= ESZ; + VecUpdate updateOp; + + for( i = 0; i < count + ksize - 1; i++ ) + CV_Assert( ((size_t)src[i] & 15) == 0 ); + + for( ; _ksize > 1 && count > 1; count -= 2, dst += dststep*2, src += 2 ) + { + for( i = 0; i <= width - 32; i += 32 ) + { + const uchar* sptr = src[1] + i; + __m128i s0 = _mm_load_si128((const __m128i*)sptr); + __m128i s1 = _mm_load_si128((const __m128i*)(sptr + 16)); + __m128i x0, x1; + + for( k = 2; k < _ksize; k++ ) + { + sptr = src[k] + i; + x0 = _mm_load_si128((const __m128i*)sptr); + x1 = _mm_load_si128((const __m128i*)(sptr + 16)); + s0 = updateOp(s0, x0); + s1 = updateOp(s1, x1); + } + + sptr = src[0] + i; + x0 = _mm_load_si128((const __m128i*)sptr); + x1 = _mm_load_si128((const __m128i*)(sptr + 16)); + _mm_storeu_si128((__m128i*)(dst + i), updateOp(s0, x0)); + _mm_storeu_si128((__m128i*)(dst + i + 16), updateOp(s1, x1)); + + sptr = src[k] + i; + x0 = _mm_load_si128((const __m128i*)sptr); + x1 = _mm_load_si128((const __m128i*)(sptr + 16)); + _mm_storeu_si128((__m128i*)(dst + dststep + i), updateOp(s0, x0)); + _mm_storeu_si128((__m128i*)(dst + dststep + i + 16), updateOp(s1, x1)); + } + + for( ; i <= width - 8; i += 8 ) + { + __m128i s0 = _mm_loadl_epi64((const __m128i*)(src[1] + i)), x0; + + for( k = 2; k < _ksize; k++ ) + { + x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i)); + s0 = updateOp(s0, x0); + } + + x0 = _mm_loadl_epi64((const __m128i*)(src[0] + i)); + _mm_storel_epi64((__m128i*)(dst + i), updateOp(s0, x0)); + x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i)); + _mm_storel_epi64((__m128i*)(dst + dststep + i), updateOp(s0, x0)); + } + } + + for( ; count > 0; count--, dst += dststep, src++ ) + { + for( i = 0; i <= width - 32; i += 32 ) + { + const uchar* sptr = src[0] + i; + __m128i s0 = _mm_load_si128((const __m128i*)sptr); + __m128i s1 = _mm_load_si128((const __m128i*)(sptr + 16)); + __m128i x0, x1; + + for( k = 1; k < _ksize; k++ ) + { + sptr = src[k] + i; + x0 = _mm_load_si128((const __m128i*)sptr); + x1 = _mm_load_si128((const __m128i*)(sptr + 16)); + s0 = updateOp(s0, x0); + s1 = updateOp(s1, x1); + } + _mm_storeu_si128((__m128i*)(dst + i), s0); + _mm_storeu_si128((__m128i*)(dst + i + 16), s1); + } + + for( ; i <= width - 8; i += 8 ) + { + __m128i s0 = _mm_loadl_epi64((const __m128i*)(src[0] + i)), x0; + + for( k = 1; k < _ksize; k++ ) + { + x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i)); + s0 = updateOp(s0, x0); + } + _mm_storel_epi64((__m128i*)(dst + i), s0); + } + } + + return i/ESZ; + } + + int ksize, anchor; +}; + + +template struct MorphColumnFVec +{ + MorphColumnFVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {} + int operator()(const uchar** _src, uchar* _dst, int dststep, int count, int width) const + { + if( !checkHardwareSupport(CV_CPU_SSE) ) + return 0; + + int i = 0, k, _ksize = ksize; + VecUpdate updateOp; + + for( i = 0; i < count + ksize - 1; i++ ) + CV_Assert( ((size_t)_src[i] & 15) == 0 ); + + const float** src = (const float**)_src; + float* dst = (float*)_dst; + dststep /= sizeof(dst[0]); + + for( ; _ksize > 1 && count > 1; count -= 2, dst += dststep*2, src += 2 ) + { + for( i = 0; i <= width - 16; i += 16 ) + { + const float* sptr = src[1] + i; + __m128 s0 = _mm_load_ps(sptr); + __m128 s1 = _mm_load_ps(sptr + 4); + __m128 s2 = _mm_load_ps(sptr + 8); + __m128 s3 = _mm_load_ps(sptr + 12); + __m128 x0, x1, x2, x3; + + for( k = 2; k < _ksize; k++ ) + { + sptr = src[k] + i; + x0 = _mm_load_ps(sptr); + x1 = _mm_load_ps(sptr + 4); + s0 = updateOp(s0, x0); + s1 = updateOp(s1, x1); + x2 = _mm_load_ps(sptr + 8); + x3 = _mm_load_ps(sptr + 12); + s2 = updateOp(s2, x2); + s3 = updateOp(s3, x3); + } + + sptr = src[0] + i; + x0 = _mm_load_ps(sptr); + x1 = _mm_load_ps(sptr + 4); + x2 = _mm_load_ps(sptr + 8); + x3 = _mm_load_ps(sptr + 12); + _mm_storeu_ps(dst + i, updateOp(s0, x0)); + _mm_storeu_ps(dst + i + 4, updateOp(s1, x1)); + _mm_storeu_ps(dst + i + 8, updateOp(s2, x2)); + _mm_storeu_ps(dst + i + 12, updateOp(s3, x3)); + + sptr = src[k] + i; + x0 = _mm_load_ps(sptr); + x1 = _mm_load_ps(sptr + 4); + x2 = _mm_load_ps(sptr + 8); + x3 = _mm_load_ps(sptr + 12); + _mm_storeu_ps(dst + dststep + i, updateOp(s0, x0)); + _mm_storeu_ps(dst + dststep + i + 4, updateOp(s1, x1)); + _mm_storeu_ps(dst + dststep + i + 8, updateOp(s2, x2)); + _mm_storeu_ps(dst + dststep + i + 12, updateOp(s3, x3)); + } + + for( ; i <= width - 4; i += 4 ) + { + __m128 s0 = _mm_load_ps(src[1] + i), x0; + + for( k = 2; k < _ksize; k++ ) + { + x0 = _mm_load_ps(src[k] + i); + s0 = updateOp(s0, x0); + } + + x0 = _mm_load_ps(src[0] + i); + _mm_storeu_ps(dst + i, updateOp(s0, x0)); + x0 = _mm_load_ps(src[k] + i); + _mm_storeu_ps(dst + dststep + i, updateOp(s0, x0)); + } + } + + for( ; count > 0; count--, dst += dststep, src++ ) + { + for( i = 0; i <= width - 16; i += 16 ) + { + const float* sptr = src[0] + i; + __m128 s0 = _mm_load_ps(sptr); + __m128 s1 = _mm_load_ps(sptr + 4); + __m128 s2 = _mm_load_ps(sptr + 8); + __m128 s3 = _mm_load_ps(sptr + 12); + __m128 x0, x1, x2, x3; + + for( k = 1; k < _ksize; k++ ) + { + sptr = src[k] + i; + x0 = _mm_load_ps(sptr); + x1 = _mm_load_ps(sptr + 4); + s0 = updateOp(s0, x0); + s1 = updateOp(s1, x1); + x2 = _mm_load_ps(sptr + 8); + x3 = _mm_load_ps(sptr + 12); + s2 = updateOp(s2, x2); + s3 = updateOp(s3, x3); + } + _mm_storeu_ps(dst + i, s0); + _mm_storeu_ps(dst + i + 4, s1); + _mm_storeu_ps(dst + i + 8, s2); + _mm_storeu_ps(dst + i + 12, s3); + } + + for( i = 0; i <= width - 4; i += 4 ) + { + __m128 s0 = _mm_load_ps(src[0] + i), x0; + for( k = 1; k < _ksize; k++ ) + { + x0 = _mm_load_ps(src[k] + i); + s0 = updateOp(s0, x0); + } + _mm_storeu_ps(dst + i, s0); + } + } + + return i; + } + + int ksize, anchor; +}; + + +template struct MorphIVec +{ + enum { ESZ = VecUpdate::ESZ }; + + int operator()(uchar** src, int nz, uchar* dst, int width) const + { + if( !checkHardwareSupport(CV_CPU_SSE2) ) + return 0; + + int i, k; + width *= ESZ; + VecUpdate updateOp; + + for( i = 0; i <= width - 32; i += 32 ) + { + const uchar* sptr = src[0] + i; + __m128i s0 = _mm_loadu_si128((const __m128i*)sptr); + __m128i s1 = _mm_loadu_si128((const __m128i*)(sptr + 16)); + __m128i x0, x1; + + for( k = 1; k < nz; k++ ) + { + sptr = src[k] + i; + x0 = _mm_loadu_si128((const __m128i*)sptr); + x1 = _mm_loadu_si128((const __m128i*)(sptr + 16)); + s0 = updateOp(s0, x0); + s1 = updateOp(s1, x1); + } + _mm_storeu_si128((__m128i*)(dst + i), s0); + _mm_storeu_si128((__m128i*)(dst + i + 16), s1); + } + + for( ; i <= width - 8; i += 8 ) + { + __m128i s0 = _mm_loadl_epi64((const __m128i*)(src[0] + i)), x0; + + for( k = 1; k < nz; k++ ) + { + x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i)); + s0 = updateOp(s0, x0); + } + _mm_storel_epi64((__m128i*)(dst + i), s0); + } + + return i/ESZ; + } +}; + + +template struct MorphFVec +{ + int operator()(uchar** _src, int nz, uchar* _dst, int width) const + { + if( !checkHardwareSupport(CV_CPU_SSE) ) + return 0; + + const float** src = (const float**)_src; + float* dst = (float*)_dst; + int i, k; + VecUpdate updateOp; + + for( i = 0; i <= width - 16; i += 16 ) + { + const float* sptr = src[0] + i; + __m128 s0 = _mm_loadu_ps(sptr); + __m128 s1 = _mm_loadu_ps(sptr + 4); + __m128 s2 = _mm_loadu_ps(sptr + 8); + __m128 s3 = _mm_loadu_ps(sptr + 12); + __m128 x0, x1, x2, x3; + + for( k = 1; k < nz; k++ ) + { + sptr = src[k] + i; + x0 = _mm_loadu_ps(sptr); + x1 = _mm_loadu_ps(sptr + 4); + x2 = _mm_loadu_ps(sptr + 8); + x3 = _mm_loadu_ps(sptr + 12); + s0 = updateOp(s0, x0); + s1 = updateOp(s1, x1); + s2 = updateOp(s2, x2); + s3 = updateOp(s3, x3); + } + _mm_storeu_ps(dst + i, s0); + _mm_storeu_ps(dst + i + 4, s1); + _mm_storeu_ps(dst + i + 8, s2); + _mm_storeu_ps(dst + i + 12, s3); + } + + for( ; i <= width - 4; i += 4 ) + { + __m128 s0 = _mm_loadu_ps(src[0] + i), x0; + + for( k = 1; k < nz; k++ ) + { + x0 = _mm_loadu_ps(src[k] + i); + s0 = updateOp(s0, x0); + } + _mm_storeu_ps(dst + i, s0); + } + + for( ; i < width; i++ ) + { + __m128 s0 = _mm_load_ss(src[0] + i), x0; + + for( k = 1; k < nz; k++ ) + { + x0 = _mm_load_ss(src[k] + i); + s0 = updateOp(s0, x0); + } + _mm_store_ss(dst + i, s0); + } + + return i; + } +}; + +struct VMin8u +{ + enum { ESZ = 1 }; + __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_min_epu8(a,b); } +}; +struct VMax8u +{ + enum { ESZ = 1 }; + __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_max_epu8(a,b); } +}; +struct VMin16u +{ + enum { ESZ = 2 }; + __m128i operator()(const __m128i& a, const __m128i& b) const + { return _mm_subs_epu16(a,_mm_subs_epu16(a,b)); } +}; +struct VMax16u +{ + enum { ESZ = 2 }; + __m128i operator()(const __m128i& a, const __m128i& b) const + { return _mm_adds_epu16(_mm_subs_epu16(a,b), b); } +}; +struct VMin16s +{ + enum { ESZ = 2 }; + __m128i operator()(const __m128i& a, const __m128i& b) const + { return _mm_min_epi16(a, b); } +}; +struct VMax16s +{ + enum { ESZ = 2 }; + __m128i operator()(const __m128i& a, const __m128i& b) const + { return _mm_max_epi16(a, b); } +}; +struct VMin32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_min_ps(a,b); }}; +struct VMax32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_max_ps(a,b); }}; + +typedef MorphRowIVec ErodeRowVec8u; +typedef MorphRowIVec DilateRowVec8u; +typedef MorphRowIVec ErodeRowVec16u; +typedef MorphRowIVec DilateRowVec16u; +typedef MorphRowIVec ErodeRowVec16s; +typedef MorphRowIVec DilateRowVec16s; +typedef MorphRowFVec ErodeRowVec32f; +typedef MorphRowFVec DilateRowVec32f; + +typedef MorphColumnIVec ErodeColumnVec8u; +typedef MorphColumnIVec DilateColumnVec8u; +typedef MorphColumnIVec ErodeColumnVec16u; +typedef MorphColumnIVec DilateColumnVec16u; +typedef MorphColumnIVec ErodeColumnVec16s; +typedef MorphColumnIVec DilateColumnVec16s; +typedef MorphColumnFVec ErodeColumnVec32f; +typedef MorphColumnFVec DilateColumnVec32f; + +typedef MorphIVec ErodeVec8u; +typedef MorphIVec DilateVec8u; +typedef MorphIVec ErodeVec16u; +typedef MorphIVec DilateVec16u; +typedef MorphIVec ErodeVec16s; +typedef MorphIVec DilateVec16s; +typedef MorphFVec ErodeVec32f; +typedef MorphFVec DilateVec32f; + +#else + +struct MorphRowNoVec +{ + MorphRowNoVec(int, int) {} + int operator()(const uchar*, uchar*, int, int) const { return 0; } +}; + +struct MorphColumnNoVec +{ + MorphColumnNoVec(int, int) {} + int operator()(const uchar**, uchar*, int, int, int) const { return 0; } +}; + +struct MorphNoVec +{ + int operator()(uchar**, int, uchar*, int) const { return 0; } +}; + +typedef MorphRowNoVec ErodeRowVec8u; +typedef MorphRowNoVec DilateRowVec8u; +typedef MorphRowNoVec ErodeRowVec16u; +typedef MorphRowNoVec DilateRowVec16u; +typedef MorphRowNoVec ErodeRowVec16s; +typedef MorphRowNoVec DilateRowVec16s; +typedef MorphRowNoVec ErodeRowVec32f; +typedef MorphRowNoVec DilateRowVec32f; + +typedef MorphColumnNoVec ErodeColumnVec8u; +typedef MorphColumnNoVec DilateColumnVec8u; +typedef MorphColumnNoVec ErodeColumnVec16u; +typedef MorphColumnNoVec DilateColumnVec16u; +typedef MorphColumnNoVec ErodeColumnVec16s; +typedef MorphColumnNoVec DilateColumnVec16s; +typedef MorphColumnNoVec ErodeColumnVec32f; +typedef MorphColumnNoVec DilateColumnVec32f; + +typedef MorphNoVec ErodeVec8u; +typedef MorphNoVec DilateVec8u; +typedef MorphNoVec ErodeVec16u; +typedef MorphNoVec DilateVec16u; +typedef MorphNoVec ErodeVec16s; +typedef MorphNoVec DilateVec16s; +typedef MorphNoVec ErodeVec32f; +typedef MorphNoVec DilateVec32f; + +#endif + +template struct MorphRowFilter : public BaseRowFilter +{ + typedef typename Op::rtype T; + + MorphRowFilter( int _ksize, int _anchor ) : vecOp(_ksize, _anchor) + { + ksize = _ksize; + anchor = _anchor; + } + + void operator()(const uchar* src, uchar* dst, int width, int cn) + { + int i, j, k, _ksize = ksize*cn; + const T* S = (const T*)src; + Op op; + T* D = (T*)dst; + + if( _ksize == cn ) + { + for( i = 0; i < width*cn; i++ ) + D[i] = S[i]; + return; + } + + int i0 = vecOp(src, dst, width, cn); + width *= cn; + + for( k = 0; k < cn; k++, S++, D++ ) + { + for( i = i0; i <= width - cn*2; i += cn*2 ) + { + const T* s = S + i; + T m = s[cn]; + for( j = cn*2; j < _ksize; j += cn ) + m = op(m, s[j]); + D[i] = op(m, s[0]); + D[i+cn] = op(m, s[j]); + } + + for( ; i < width; i += cn ) + { + const T* s = S + i; + T m = s[0]; + for( j = cn; j < _ksize; j += cn ) + m = op(m, s[j]); + D[i] = m; + } + } + } + + VecOp vecOp; +}; + + +template struct MorphColumnFilter : public BaseColumnFilter +{ + typedef typename Op::rtype T; + + MorphColumnFilter( int _ksize, int _anchor ) : vecOp(_ksize, _anchor) + { + ksize = _ksize; + anchor = _anchor; + } + + void operator()(const uchar** _src, uchar* dst, int dststep, int count, int width) + { + int i, k, _ksize = ksize; + const T** src = (const T**)_src; + T* D = (T*)dst; + Op op; + + int i0 = vecOp(_src, dst, dststep, count, width); + dststep /= sizeof(D[0]); + + for( ; _ksize > 1 && count > 1; count -= 2, D += dststep*2, src += 2 ) + { + for( i = i0; i <= width - 4; i += 4 ) + { + const T* sptr = src[1] + i; + T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3]; + + for( k = 2; k < _ksize; k++ ) + { + sptr = src[k] + i; + s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]); + s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]); + } + + sptr = src[0] + i; + D[i] = op(s0, sptr[0]); + D[i+1] = op(s1, sptr[1]); + D[i+2] = op(s2, sptr[2]); + D[i+3] = op(s3, sptr[3]); + + sptr = src[k] + i; + D[i+dststep] = op(s0, sptr[0]); + D[i+dststep+1] = op(s1, sptr[1]); + D[i+dststep+2] = op(s2, sptr[2]); + D[i+dststep+3] = op(s3, sptr[3]); + } + + for( ; i < width; i++ ) + { + T s0 = src[1][i]; + + for( k = 2; k < _ksize; k++ ) + s0 = op(s0, src[k][i]); + + D[i] = op(s0, src[0][i]); + D[i+dststep] = op(s0, src[k][i]); + } + } + + for( ; count > 0; count--, D += dststep, src++ ) + { + for( i = i0; i <= width - 4; i += 4 ) + { + const T* sptr = src[0] + i; + T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3]; + + for( k = 1; k < _ksize; k++ ) + { + sptr = src[k] + i; + s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]); + s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]); + } + + D[i] = s0; D[i+1] = s1; + D[i+2] = s2; D[i+3] = s3; + } + + for( ; i < width; i++ ) + { + T s0 = src[0][i]; + for( k = 1; k < _ksize; k++ ) + s0 = op(s0, src[k][i]); + D[i] = s0; + } + } + } + + VecOp vecOp; +}; + + +template struct MorphFilter : BaseFilter +{ + typedef typename Op::rtype T; + + MorphFilter( const Mat& _kernel, Point _anchor ) + { + anchor = _anchor; + ksize = _kernel.size(); + CV_Assert( _kernel.type() == CV_8U ); + + vector coeffs; // we do not really the values of non-zero + // kernel elements, just their locations + preprocess2DKernel( _kernel, coords, coeffs ); + ptrs.resize( coords.size() ); + } + + void operator()(const uchar** src, uchar* dst, int dststep, int count, int width, int cn) + { + const Point* pt = &coords[0]; + const T** kp = (const T**)&ptrs[0]; + int i, k, nz = (int)coords.size(); + Op op; + + width *= cn; + for( ; count > 0; count--, dst += dststep, src++ ) + { + T* D = (T*)dst; + + for( k = 0; k < nz; k++ ) + kp[k] = (const T*)src[pt[k].y] + pt[k].x*cn; + + i = vecOp(&ptrs[0], nz, dst, width); + + for( ; i <= width - 4; i += 4 ) + { + const T* sptr = kp[0] + i; + T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3]; + + for( k = 1; k < nz; k++ ) + { + sptr = kp[k] + i; + s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]); + s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]); + } + + D[i] = s0; D[i+1] = s1; + D[i+2] = s2; D[i+3] = s3; + } + + for( ; i < width; i++ ) + { + T s0 = kp[0][i]; + for( k = 1; k < nz; k++ ) + s0 = op(s0, kp[k][i]); + D[i] = s0; + } + } + } + + vector coords; + vector ptrs; + VecOp vecOp; +}; + +} + +/////////////////////////////////// External Interface ///////////////////////////////////// + +cv::Ptr cv::getMorphologyRowFilter(int op, int type, int ksize, int anchor) +{ + int depth = CV_MAT_DEPTH(type); + if( anchor < 0 ) + anchor = ksize/2; + CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE ); + if( op == MORPH_ERODE ) + { + if( depth == CV_8U ) + return Ptr(new MorphRowFilter, + ErodeRowVec8u>(ksize, anchor)); + if( depth == CV_16U ) + return Ptr(new MorphRowFilter, + ErodeRowVec16u>(ksize, anchor)); + if( depth == CV_16S ) + return Ptr(new MorphRowFilter, + ErodeRowVec16s>(ksize, anchor)); + if( depth == CV_32F ) + return Ptr(new MorphRowFilter, + ErodeRowVec32f>(ksize, anchor)); + } + else + { + if( depth == CV_8U ) + return Ptr(new MorphRowFilter, + DilateRowVec8u>(ksize, anchor)); + if( depth == CV_16U ) + return Ptr(new MorphRowFilter, + DilateRowVec16u>(ksize, anchor)); + if( depth == CV_16S ) + return Ptr(new MorphRowFilter, + DilateRowVec16s>(ksize, anchor)); + if( depth == CV_32F ) + return Ptr(new MorphRowFilter, + DilateRowVec32f>(ksize, anchor)); + } + + CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type)); + return Ptr(0); +} + +cv::Ptr cv::getMorphologyColumnFilter(int op, int type, int ksize, int anchor) +{ + int depth = CV_MAT_DEPTH(type); + if( anchor < 0 ) + anchor = ksize/2; + CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE ); + if( op == MORPH_ERODE ) + { + if( depth == CV_8U ) + return Ptr(new MorphColumnFilter, + ErodeColumnVec8u>(ksize, anchor)); + if( depth == CV_16U ) + return Ptr(new MorphColumnFilter, + ErodeColumnVec16u>(ksize, anchor)); + if( depth == CV_16S ) + return Ptr(new MorphColumnFilter, + ErodeColumnVec16s>(ksize, anchor)); + if( depth == CV_32F ) + return Ptr(new MorphColumnFilter, + ErodeColumnVec32f>(ksize, anchor)); + } + else + { + if( depth == CV_8U ) + return Ptr(new MorphColumnFilter, + DilateColumnVec8u>(ksize, anchor)); + if( depth == CV_16U ) + return Ptr(new MorphColumnFilter, + DilateColumnVec16u>(ksize, anchor)); + if( depth == CV_16S ) + return Ptr(new MorphColumnFilter, + DilateColumnVec16s>(ksize, anchor)); + if( depth == CV_32F ) + return Ptr(new MorphColumnFilter, + DilateColumnVec32f>(ksize, anchor)); + } + + CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type)); + return Ptr(0); +} + + +cv::Ptr cv::getMorphologyFilter(int op, int type, InputArray _kernel, Point anchor) +{ + Mat kernel = _kernel.getMat(); + int depth = CV_MAT_DEPTH(type); + anchor = normalizeAnchor(anchor, kernel.size()); + CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE ); + if( op == MORPH_ERODE ) + { + if( depth == CV_8U ) + return Ptr(new MorphFilter, ErodeVec8u>(kernel, anchor)); + if( depth == CV_16U ) + return Ptr(new MorphFilter, ErodeVec16u>(kernel, anchor)); + if( depth == CV_16S ) + return Ptr(new MorphFilter, ErodeVec16s>(kernel, anchor)); + if( depth == CV_32F ) + return Ptr(new MorphFilter, ErodeVec32f>(kernel, anchor)); + } + else + { + if( depth == CV_8U ) + return Ptr(new MorphFilter, DilateVec8u>(kernel, anchor)); + if( depth == CV_16U ) + return Ptr(new MorphFilter, DilateVec16u>(kernel, anchor)); + if( depth == CV_16S ) + return Ptr(new MorphFilter, DilateVec16s>(kernel, anchor)); + if( depth == CV_32F ) + return Ptr(new MorphFilter, DilateVec32f>(kernel, anchor)); + } + + CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type)); + return Ptr(0); +} + + +cv::Ptr cv::createMorphologyFilter( int op, int type, InputArray _kernel, + Point anchor, int _rowBorderType, int _columnBorderType, + const Scalar& _borderValue ) +{ + Mat kernel = _kernel.getMat(); + anchor = normalizeAnchor(anchor, kernel.size()); + + Ptr rowFilter; + Ptr columnFilter; + Ptr filter2D; + + if( countNonZero(kernel) == kernel.rows*kernel.cols ) + { + // rectangular structuring element + rowFilter = getMorphologyRowFilter(op, type, kernel.cols, anchor.x); + columnFilter = getMorphologyColumnFilter(op, type, kernel.rows, anchor.y); + } + else + filter2D = getMorphologyFilter(op, type, kernel, anchor); + + Scalar borderValue = _borderValue; + if( (_rowBorderType == BORDER_CONSTANT || _columnBorderType == BORDER_CONSTANT) && + borderValue == morphologyDefaultBorderValue() ) + { + int depth = CV_MAT_DEPTH(type); + CV_Assert( depth == CV_8U || depth == CV_16U || depth == CV_32F ); + if( op == MORPH_ERODE ) + borderValue = Scalar::all( depth == CV_8U ? (double)UCHAR_MAX : + depth == CV_16U ? (double)USHRT_MAX : (double)FLT_MAX ); + else + borderValue = Scalar::all( depth == CV_8U || depth == CV_16U ? + 0. : (double)-FLT_MAX ); + } + + return Ptr(new FilterEngine(filter2D, rowFilter, columnFilter, + type, type, type, _rowBorderType, _columnBorderType, borderValue )); +} + + +cv::Mat cv::getStructuringElement(int shape, Size ksize, Point anchor) +{ + int i, j; + int r = 0, c = 0; + double inv_r2 = 0; + + CV_Assert( shape == MORPH_RECT || shape == MORPH_CROSS || shape == MORPH_ELLIPSE ); + + anchor = normalizeAnchor(anchor, ksize); + + if( ksize == Size(1,1) ) + shape = MORPH_RECT; + + if( shape == MORPH_ELLIPSE ) + { + r = ksize.height/2; + c = ksize.width/2; + inv_r2 = r ? 1./((double)r*r) : 0; + } + + Mat elem(ksize, CV_8U); + + for( i = 0; i < ksize.height; i++ ) + { + uchar* ptr = elem.data + i*elem.step; + int j1 = 0, j2 = 0; + + if( shape == MORPH_RECT || (shape == MORPH_CROSS && i == anchor.y) ) + j2 = ksize.width; + else if( shape == MORPH_CROSS ) + j1 = anchor.x, j2 = j1 + 1; + else + { + int dy = i - r; + if( std::abs(dy) <= r ) + { + int dx = saturate_cast(c*std::sqrt((r*r - dy*dy)*inv_r2)); + j1 = std::max( c - dx, 0 ); + j2 = std::min( c + dx + 1, ksize.width ); + } + } + + for( j = 0; j < j1; j++ ) + ptr[j] = 0; + for( ; j < j2; j++ ) + ptr[j] = 1; + for( ; j < ksize.width; j++ ) + ptr[j] = 0; + } + + return elem; +} + +namespace cv +{ + +static void morphOp( int op, InputArray _src, OutputArray _dst, + InputArray _kernel, + Point anchor, int iterations, + int borderType, const Scalar& borderValue ) +{ + Mat src = _src.getMat(), kernel = _kernel.getMat(); + Size ksize = kernel.data ? kernel.size() : Size(3,3); + anchor = normalizeAnchor(anchor, ksize); + + CV_Assert( anchor.inside(Rect(0, 0, ksize.width, ksize.height)) ); + + _dst.create( src.size(), src.type() ); + Mat dst = _dst.getMat(); + + if( iterations == 0 || kernel.rows*kernel.cols == 1 ) + { + src.copyTo(dst); + return; + } + + if( !kernel.data ) + { + kernel = getStructuringElement(MORPH_RECT, Size(1+iterations*2,1+iterations*2)); + anchor = Point(iterations, iterations); + iterations = 1; + } + else if( iterations > 1 && countNonZero(kernel) == kernel.rows*kernel.cols ) + { + anchor = Point(anchor.x*iterations, anchor.y*iterations); + kernel = getStructuringElement(MORPH_RECT, + Size(ksize.width + iterations*(ksize.width-1), + ksize.height + iterations*(ksize.height-1)), + anchor); + iterations = 1; + } + + Ptr f = createMorphologyFilter(op, src.type(), + kernel, anchor, borderType, borderType, borderValue ); + + f->apply( src, dst ); + for( int i = 1; i < iterations; i++ ) + f->apply( dst, dst ); +} + +template<> void Ptr::delete_obj() +{ cvReleaseStructuringElement(&obj); } + +} + +void cv::erode( InputArray src, OutputArray dst, InputArray kernel, + Point anchor, int iterations, + int borderType, const Scalar& borderValue ) +{ + morphOp( MORPH_ERODE, src, dst, kernel, anchor, iterations, borderType, borderValue ); +} + + +void cv::dilate( InputArray src, OutputArray dst, InputArray kernel, + Point anchor, int iterations, + int borderType, const Scalar& borderValue ) +{ + morphOp( MORPH_DILATE, src, dst, kernel, anchor, iterations, borderType, borderValue ); +} + + +void cv::morphologyEx( InputArray _src, OutputArray _dst, int op, + InputArray kernel, Point anchor, int iterations, + int borderType, const Scalar& borderValue ) +{ + Mat src = _src.getMat(), temp; + _dst.create(src.size(), src.type()); + Mat dst = _dst.getMat(); + + switch( op ) + { + case MORPH_ERODE: + erode( src, dst, kernel, anchor, iterations, borderType, borderValue ); + break; + case MORPH_DILATE: + dilate( src, dst, kernel, anchor, iterations, borderType, borderValue ); + break; + case MORPH_OPEN: + erode( src, dst, kernel, anchor, iterations, borderType, borderValue ); + dilate( dst, dst, kernel, anchor, iterations, borderType, borderValue ); + break; + case CV_MOP_CLOSE: + dilate( src, dst, kernel, anchor, iterations, borderType, borderValue ); + erode( dst, dst, kernel, anchor, iterations, borderType, borderValue ); + break; + case CV_MOP_GRADIENT: + erode( src, temp, kernel, anchor, iterations, borderType, borderValue ); + dilate( src, dst, kernel, anchor, iterations, borderType, borderValue ); + dst -= temp; + break; + case CV_MOP_TOPHAT: + if( src.data != dst.data ) + temp = dst; + erode( src, temp, kernel, anchor, iterations, borderType, borderValue ); + dilate( temp, temp, kernel, anchor, iterations, borderType, borderValue ); + dst = src - temp; + break; + case CV_MOP_BLACKHAT: + if( src.data != dst.data ) + temp = dst; + dilate( src, temp, kernel, anchor, iterations, borderType, borderValue ); + erode( temp, temp, kernel, anchor, iterations, borderType, borderValue ); + dst = temp - src; + break; + default: + CV_Error( CV_StsBadArg, "unknown morphological operation" ); + } +} + +CV_IMPL IplConvKernel * +cvCreateStructuringElementEx( int cols, int rows, + int anchorX, int anchorY, + int shape, int *values ) +{ + cv::Size ksize = cv::Size(cols, rows); + cv::Point anchor = cv::Point(anchorX, anchorY); + CV_Assert( cols > 0 && rows > 0 && anchor.inside(cv::Rect(0,0,cols,rows)) && + (shape != CV_SHAPE_CUSTOM || values != 0)); + + int i, size = rows * cols; + int element_size = sizeof(IplConvKernel) + size*sizeof(int); + IplConvKernel *element = (IplConvKernel*)cvAlloc(element_size + 32); + + element->nCols = cols; + element->nRows = rows; + element->anchorX = anchorX; + element->anchorY = anchorY; + element->nShiftR = shape < CV_SHAPE_ELLIPSE ? shape : CV_SHAPE_CUSTOM; + element->values = (int*)(element + 1); + + if( shape == CV_SHAPE_CUSTOM ) + { + for( i = 0; i < size; i++ ) + element->values[i] = values[i]; + } + else + { + cv::Mat elem = cv::getStructuringElement(shape, ksize, anchor); + for( i = 0; i < size; i++ ) + element->values[i] = elem.data[i]; + } + + return element; +} + + +CV_IMPL void +cvReleaseStructuringElement( IplConvKernel ** element ) +{ + if( !element ) + CV_Error( CV_StsNullPtr, "" ); + cvFree( element ); +} + + +static void convertConvKernel( const IplConvKernel* src, cv::Mat& dst, cv::Point& anchor ) +{ + if(!src) + { + anchor = cv::Point(1,1); + dst.release(); + return; + } + anchor = cv::Point(src->anchorX, src->anchorY); + dst.create(src->nRows, src->nCols, CV_8U); + + int i, size = src->nRows*src->nCols; + for( i = 0; i < size; i++ ) + dst.data[i] = (uchar)src->values[i]; +} + + +CV_IMPL void +cvErode( const CvArr* srcarr, CvArr* dstarr, IplConvKernel* element, int iterations ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel; + CV_Assert( src.size() == dst.size() && src.type() == dst.type() ); + cv::Point anchor; + convertConvKernel( element, kernel, anchor ); + cv::erode( src, dst, kernel, anchor, iterations, cv::BORDER_REPLICATE ); +} + + +CV_IMPL void +cvDilate( const CvArr* srcarr, CvArr* dstarr, IplConvKernel* element, int iterations ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel; + CV_Assert( src.size() == dst.size() && src.type() == dst.type() ); + cv::Point anchor; + convertConvKernel( element, kernel, anchor ); + cv::dilate( src, dst, kernel, anchor, iterations, cv::BORDER_REPLICATE ); +} + + +CV_IMPL void +cvMorphologyEx( const void* srcarr, void* dstarr, void*, + IplConvKernel* element, int op, int iterations ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel; + CV_Assert( src.size() == dst.size() && src.type() == dst.type() ); + cv::Point anchor; + IplConvKernel* temp_element = NULL; + if (!element) + { + temp_element = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT); + } else { + temp_element = element; + } + convertConvKernel( temp_element, kernel, anchor ); + if (!element) + { + cvReleaseStructuringElement(&temp_element); + } + cv::morphologyEx( src, dst, op, kernel, anchor, iterations, cv::BORDER_REPLICATE ); +} + +/* End of file. */ diff --git a/opencv/imgproc/opencv2/imgproc/imgproc.hpp b/opencv/imgproc/opencv2/imgproc/imgproc.hpp new file mode 100644 index 0000000..613b878 --- /dev/null +++ b/opencv/imgproc/opencv2/imgproc/imgproc.hpp @@ -0,0 +1,1139 @@ +/*! \file imgproc.hpp + \brief The Image Processing + */ + +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_HPP__ +#define __OPENCV_IMGPROC_HPP__ + +#include "core.hpp" +#include "types_c.h" + +#ifdef __cplusplus + +/*! \namespace cv + Namespace where all the C++ OpenCV functionality resides + */ +namespace cv +{ + +//! various border interpolation methods +enum { BORDER_REPLICATE=IPL_BORDER_REPLICATE, BORDER_CONSTANT=IPL_BORDER_CONSTANT, + BORDER_REFLECT=IPL_BORDER_REFLECT, BORDER_WRAP=IPL_BORDER_WRAP, + BORDER_REFLECT_101=IPL_BORDER_REFLECT_101, BORDER_REFLECT101=BORDER_REFLECT_101, + BORDER_TRANSPARENT=IPL_BORDER_TRANSPARENT, + BORDER_DEFAULT=BORDER_REFLECT_101, BORDER_ISOLATED=16 }; + +//! 1D interpolation function: returns coordinate of the "donor" pixel for the specified location p. +CV_EXPORTS_W int borderInterpolate( int p, int len, int borderType ); + +/*! + The Base Class for 1D or Row-wise Filters + + This is the base class for linear or non-linear filters that process 1D data. + In particular, such filters are used for the "horizontal" filtering parts in separable filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. +*/ +class CV_EXPORTS BaseRowFilter +{ +public: + //! the default constructor + BaseRowFilter(); + //! the destructor + virtual ~BaseRowFilter(); + //! the filtering operator. Must be overrided in the derived classes. The horizontal border interpolation is done outside of the class. + virtual void operator()(const uchar* src, uchar* dst, + int width, int cn) = 0; + int ksize, anchor; +}; + + +/*! + The Base Class for Column-wise Filters + + This is the base class for linear or non-linear filters that process columns of 2D arrays. + Such filters are used for the "vertical" filtering parts in separable filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. + + Unlike cv::BaseRowFilter, cv::BaseColumnFilter may have some context information, + i.e. box filter keeps the sliding sum of elements. To reset the state BaseColumnFilter::reset() + must be called (e.g. the method is called by cv::FilterEngine) + */ +class CV_EXPORTS BaseColumnFilter +{ +public: + //! the default constructor + BaseColumnFilter(); + //! the destructor + virtual ~BaseColumnFilter(); + //! the filtering operator. Must be overrided in the derived classes. The vertical border interpolation is done outside of the class. + virtual void operator()(const uchar** src, uchar* dst, int dststep, + int dstcount, int width) = 0; + //! resets the internal buffers, if any + virtual void reset(); + int ksize, anchor; +}; + +/*! + The Base Class for Non-Separable 2D Filters. + + This is the base class for linear or non-linear 2D filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. + + Similar to cv::BaseColumnFilter, the class may have some context information, + that should be reset using BaseFilter::reset() method before processing the new array. +*/ +class CV_EXPORTS BaseFilter +{ +public: + //! the default constructor + BaseFilter(); + //! the destructor + virtual ~BaseFilter(); + //! the filtering operator. The horizontal and the vertical border interpolation is done outside of the class. + virtual void operator()(const uchar** src, uchar* dst, int dststep, + int dstcount, int width, int cn) = 0; + //! resets the internal buffers, if any + virtual void reset(); + Size ksize; + Point anchor; +}; + +/*! + The Main Class for Image Filtering. + + The class can be used to apply an arbitrary filtering operation to an image. + It contains all the necessary intermediate buffers, it computes extrapolated values + of the "virtual" pixels outside of the image etc. + Pointers to the initialized cv::FilterEngine instances + are returned by various OpenCV functions, such as cv::createSeparableLinearFilter(), + cv::createLinearFilter(), cv::createGaussianFilter(), cv::createDerivFilter(), + cv::createBoxFilter() and cv::createMorphologyFilter(). + + Using the class you can process large images by parts and build complex pipelines + that include filtering as some of the stages. If all you need is to apply some pre-defined + filtering operation, you may use cv::filter2D(), cv::erode(), cv::dilate() etc. + functions that create FilterEngine internally. + + Here is the example on how to use the class to implement Laplacian operator, which is the sum of + second-order derivatives. More complex variant for different types is implemented in cv::Laplacian(). + + \code + void laplace_f(const Mat& src, Mat& dst) + { + CV_Assert( src.type() == CV_32F ); + // make sure the destination array has the proper size and type + dst.create(src.size(), src.type()); + + // get the derivative and smooth kernels for d2I/dx2. + // for d2I/dy2 we could use the same kernels, just swapped + Mat kd, ks; + getSobelKernels( kd, ks, 2, 0, ksize, false, ktype ); + + // let's process 10 source rows at once + int DELTA = std::min(10, src.rows); + Ptr Fxx = createSeparableLinearFilter(src.type(), + dst.type(), kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() ); + Ptr Fyy = createSeparableLinearFilter(src.type(), + dst.type(), ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() ); + + int y = Fxx->start(src), dsty = 0, dy = 0; + Fyy->start(src); + const uchar* sptr = src.data + y*src.step; + + // allocate the buffers for the spatial image derivatives; + // the buffers need to have more than DELTA rows, because at the + // last iteration the output may take max(kd.rows-1,ks.rows-1) + // rows more than the input. + Mat Ixx( DELTA + kd.rows - 1, src.cols, dst.type() ); + Mat Iyy( DELTA + kd.rows - 1, src.cols, dst.type() ); + + // inside the loop we always pass DELTA rows to the filter + // (note that the "proceed" method takes care of possibe overflow, since + // it was given the actual image height in the "start" method) + // on output we can get: + // * < DELTA rows (the initial buffer accumulation stage) + // * = DELTA rows (settled state in the middle) + // * > DELTA rows (then the input image is over, but we generate + // "virtual" rows using the border mode and filter them) + // this variable number of output rows is dy. + // dsty is the current output row. + // sptr is the pointer to the first input row in the portion to process + for( ; dsty < dst.rows; sptr += DELTA*src.step, dsty += dy ) + { + Fxx->proceed( sptr, (int)src.step, DELTA, Ixx.data, (int)Ixx.step ); + dy = Fyy->proceed( sptr, (int)src.step, DELTA, d2y.data, (int)Iyy.step ); + if( dy > 0 ) + { + Mat dstripe = dst.rowRange(dsty, dsty + dy); + add(Ixx.rowRange(0, dy), Iyy.rowRange(0, dy), dstripe); + } + } + } + \endcode +*/ +class CV_EXPORTS FilterEngine +{ +public: + //! the default constructor + FilterEngine(); + //! the full constructor. Either _filter2D or both _rowFilter and _columnFilter must be non-empty. + FilterEngine(const Ptr& _filter2D, + const Ptr& _rowFilter, + const Ptr& _columnFilter, + int srcType, int dstType, int bufType, + int _rowBorderType=BORDER_REPLICATE, + int _columnBorderType=-1, + const Scalar& _borderValue=Scalar()); + //! the destructor + virtual ~FilterEngine(); + //! reinitializes the engine. The previously assigned filters are released. + void init(const Ptr& _filter2D, + const Ptr& _rowFilter, + const Ptr& _columnFilter, + int srcType, int dstType, int bufType, + int _rowBorderType=BORDER_REPLICATE, int _columnBorderType=-1, + const Scalar& _borderValue=Scalar()); + //! starts filtering of the specified ROI of an image of size wholeSize. + virtual int start(Size wholeSize, Rect roi, int maxBufRows=-1); + //! starts filtering of the specified ROI of the specified image. + virtual int start(const Mat& src, const Rect& srcRoi=Rect(0,0,-1,-1), + bool isolated=false, int maxBufRows=-1); + //! processes the next srcCount rows of the image. + virtual int proceed(const uchar* src, int srcStep, int srcCount, + uchar* dst, int dstStep); + //! applies filter to the specified ROI of the image. if srcRoi=(0,0,-1,-1), the whole image is filtered. + virtual void apply( const Mat& src, Mat& dst, + const Rect& srcRoi=Rect(0,0,-1,-1), + Point dstOfs=Point(0,0), + bool isolated=false); + //! returns true if the filter is separable + bool isSeparable() const { return (const BaseFilter*)filter2D == 0; } + //! returns the number + int remainingInputRows() const; + int remainingOutputRows() const; + + int srcType, dstType, bufType; + Size ksize; + Point anchor; + int maxWidth; + Size wholeSize; + Rect roi; + int dx1, dx2; + int rowBorderType, columnBorderType; + vector borderTab; + int borderElemSize; + vector ringBuf; + vector srcRow; + vector constBorderValue; + vector constBorderRow; + int bufStep, startY, startY0, endY, rowCount, dstY; + vector rows; + + Ptr filter2D; + Ptr rowFilter; + Ptr columnFilter; +}; + +//! type of the kernel +enum { KERNEL_GENERAL=0, KERNEL_SYMMETRICAL=1, KERNEL_ASYMMETRICAL=2, + KERNEL_SMOOTH=4, KERNEL_INTEGER=8 }; + +//! returns type (one of KERNEL_*) of 1D or 2D kernel specified by its coefficients. +CV_EXPORTS int getKernelType(InputArray kernel, Point anchor); + +//! returns the primitive row filter with the specified kernel +CV_EXPORTS Ptr getLinearRowFilter(int srcType, int bufType, + InputArray kernel, int anchor, + int symmetryType); + +//! returns the primitive column filter with the specified kernel +CV_EXPORTS Ptr getLinearColumnFilter(int bufType, int dstType, + InputArray kernel, int anchor, + int symmetryType, double delta=0, + int bits=0); + +//! returns 2D filter with the specified kernel +CV_EXPORTS Ptr getLinearFilter(int srcType, int dstType, + InputArray kernel, + Point anchor=Point(-1,-1), + double delta=0, int bits=0); + +//! returns the separable linear filter engine +CV_EXPORTS Ptr createSeparableLinearFilter(int srcType, int dstType, + InputArray rowKernel, InputArray columnKernel, + Point _anchor=Point(-1,-1), double delta=0, + int _rowBorderType=BORDER_DEFAULT, + int _columnBorderType=-1, + const Scalar& _borderValue=Scalar()); + +//! returns the non-separable linear filter engine +CV_EXPORTS Ptr createLinearFilter(int srcType, int dstType, + InputArray kernel, Point _anchor=Point(-1,-1), + double delta=0, int _rowBorderType=BORDER_DEFAULT, + int _columnBorderType=-1, const Scalar& _borderValue=Scalar()); + +//! returns the Gaussian kernel with the specified parameters +CV_EXPORTS_W Mat getGaussianKernel( int ksize, double sigma, int ktype=CV_64F ); + +//! returns the Gaussian filter engine +CV_EXPORTS Ptr createGaussianFilter( int type, Size ksize, + double sigma1, double sigma2=0, + int borderType=BORDER_DEFAULT); +//! initializes kernels of the generalized Sobel operator +CV_EXPORTS_W void getDerivKernels( OutputArray kx, OutputArray ky, + int dx, int dy, int ksize, + bool normalize=false, int ktype=CV_32F ); +//! returns filter engine for the generalized Sobel operator +CV_EXPORTS Ptr createDerivFilter( int srcType, int dstType, + int dx, int dy, int ksize, + int borderType=BORDER_DEFAULT ); +//! returns horizontal 1D box filter +CV_EXPORTS Ptr getRowSumFilter(int srcType, int sumType, + int ksize, int anchor=-1); +//! returns vertical 1D box filter +CV_EXPORTS Ptr getColumnSumFilter( int sumType, int dstType, + int ksize, int anchor=-1, + double scale=1); +//! returns box filter engine +CV_EXPORTS Ptr createBoxFilter( int srcType, int dstType, Size ksize, + Point anchor=Point(-1,-1), + bool normalize=true, + int borderType=BORDER_DEFAULT); +//! type of morphological operation +enum { MORPH_ERODE=CV_MOP_ERODE, MORPH_DILATE=CV_MOP_DILATE, + MORPH_OPEN=CV_MOP_OPEN, MORPH_CLOSE=CV_MOP_CLOSE, + MORPH_GRADIENT=CV_MOP_GRADIENT, MORPH_TOPHAT=CV_MOP_TOPHAT, + MORPH_BLACKHAT=CV_MOP_BLACKHAT }; + +//! returns horizontal 1D morphological filter +CV_EXPORTS Ptr getMorphologyRowFilter(int op, int type, int ksize, int anchor=-1); +//! returns vertical 1D morphological filter +CV_EXPORTS Ptr getMorphologyColumnFilter(int op, int type, int ksize, int anchor=-1); +//! returns 2D morphological filter +CV_EXPORTS Ptr getMorphologyFilter(int op, int type, InputArray kernel, + Point anchor=Point(-1,-1)); + +//! returns "magic" border value for erosion and dilation. It is automatically transformed to Scalar::all(-DBL_MAX) for dilation. +static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); } + +//! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported. +CV_EXPORTS Ptr createMorphologyFilter(int op, int type, InputArray kernel, + Point anchor=Point(-1,-1), int _rowBorderType=BORDER_CONSTANT, + int _columnBorderType=-1, + const Scalar& _borderValue=morphologyDefaultBorderValue()); + +//! shape of the structuring element +enum { MORPH_RECT=0, MORPH_CROSS=1, MORPH_ELLIPSE=2 }; +//! returns structuring element of the specified shape and size +CV_EXPORTS_W Mat getStructuringElement(int shape, Size ksize, Point anchor=Point(-1,-1)); + +template<> CV_EXPORTS void Ptr::delete_obj(); + +//! copies 2D array to a larger destination array with extrapolation of the outer part of src using the specified border mode +CV_EXPORTS_W void copyMakeBorder( InputArray src, OutputArray dst, + int top, int bottom, int left, int right, + int borderType, const Scalar& value=Scalar() ); + +//! smooths the image using median filter. +CV_EXPORTS_W void medianBlur( InputArray src, OutputArray dst, int ksize ); +//! smooths the image using Gaussian filter. +CV_EXPORTS_W void GaussianBlur( InputArray src, + OutputArray dst, Size ksize, + double sigma1, double sigma2=0, + int borderType=BORDER_DEFAULT ); +//! smooths the image using bilateral filter +CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d, + double sigmaColor, double sigmaSpace, + int borderType=BORDER_DEFAULT ); +//! smooths the image using the box filter. Each pixel is processed in O(1) time +CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth, + Size ksize, Point anchor=Point(-1,-1), + bool normalize=true, + int borderType=BORDER_DEFAULT ); +//! a synonym for normalized box filter +CV_EXPORTS_W void blur( InputArray src, OutputArray dst, + Size ksize, Point anchor=Point(-1,-1), + int borderType=BORDER_DEFAULT ); + +//! applies non-separable 2D linear filter to the image +CV_EXPORTS_W void filter2D( InputArray src, OutputArray dst, int ddepth, + InputArray kernel, Point anchor=Point(-1,-1), + double delta=0, int borderType=BORDER_DEFAULT ); + +//! applies separable 2D linear filter to the image +CV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth, + InputArray kernelX, InputArray kernelY, + Point anchor=Point(-1,-1), + double delta=0, int borderType=BORDER_DEFAULT ); + +//! applies generalized Sobel operator to the image +CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth, + int dx, int dy, int ksize=3, + double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies the vertical or horizontal Scharr operator to the image +CV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth, + int dx, int dy, double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies Laplacian operator to the image +CV_EXPORTS_W void Laplacian( InputArray src, OutputArray dst, int ddepth, + int ksize=1, double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies Canny edge detector and produces the edge map. +CV_EXPORTS_W void Canny( InputArray image, OutputArray edges, + double threshold1, double threshold2, + int apertureSize=3, bool L2gradient=false ); + +//! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria +CV_EXPORTS_W void cornerMinEigenVal( InputArray src, OutputArray dst, + int blockSize, int ksize=3, + int borderType=BORDER_DEFAULT ); + +//! computes Harris cornerness criteria at each image pixel +CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize, + int ksize, double k, + int borderType=BORDER_DEFAULT ); + +//! computes both eigenvalues and the eigenvectors of 2x2 derivative covariation matrix at each pixel. The output is stored as 6-channel matrix. +CV_EXPORTS_W void cornerEigenValsAndVecs( InputArray src, OutputArray dst, + int blockSize, int ksize, + int borderType=BORDER_DEFAULT ); + +//! computes another complex cornerness criteria at each pixel +CV_EXPORTS_W void preCornerDetect( InputArray src, OutputArray dst, int ksize, + int borderType=BORDER_DEFAULT ); + +//! adjusts the corner locations with sub-pixel accuracy to maximize the certain cornerness criteria +CV_EXPORTS_W void cornerSubPix( InputArray image, InputOutputArray corners, + Size winSize, Size zeroZone, + TermCriteria criteria ); + +//! finds the strong enough corners where the cornerMinEigenVal() or cornerHarris() report the local maxima +CV_EXPORTS_W void goodFeaturesToTrack( InputArray image, OutputArray corners, + int maxCorners, double qualityLevel, double minDistance, + InputArray mask=noArray(), int blockSize=3, + bool useHarrisDetector=false, double k=0.04 ); + +//! finds lines in the black-n-white image using the standard or pyramid Hough transform +CV_EXPORTS_W void HoughLines( InputArray image, OutputArray lines, + double rho, double theta, int threshold, + double srn=0, double stn=0 ); + +//! finds line segments in the black-n-white image using probabalistic Hough transform +CV_EXPORTS_W void HoughLinesP( InputArray image, OutputArray lines, + double rho, double theta, int threshold, + double minLineLength=0, double maxLineGap=0 ); + +//! finds circles in the grayscale image using 2+1 gradient Hough transform +CV_EXPORTS_W void HoughCircles( InputArray image, OutputArray circles, + int method, double dp, double minDist, + double param1=100, double param2=100, + int minRadius=0, int maxRadius=0 ); + +//! erodes the image (applies the local minimum operator) +CV_EXPORTS_W void erode( InputArray src, OutputArray dst, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! dilates the image (applies the local maximum operator) +CV_EXPORTS_W void dilate( InputArray src, OutputArray dst, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! applies an advanced morphological operation to the image +CV_EXPORTS_W void morphologyEx( InputArray src, OutputArray dst, + int op, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! interpolation algorithm +enum +{ + INTER_NEAREST=CV_INTER_NN, //!< nearest neighbor interpolation + INTER_LINEAR=CV_INTER_LINEAR, //!< bilinear interpolation + INTER_CUBIC=CV_INTER_CUBIC, //!< bicubic interpolation + INTER_AREA=CV_INTER_AREA, //!< area-based (or super) interpolation + INTER_LANCZOS4=CV_INTER_LANCZOS4, //!< Lanczos interpolation over 8x8 neighborhood + INTER_MAX=7, + WARP_INVERSE_MAP=CV_WARP_INVERSE_MAP +}; + +//! resizes the image +CV_EXPORTS_W void resize( InputArray src, OutputArray dst, + Size dsize, double fx=0, double fy=0, + int interpolation=INTER_LINEAR ); + +//! warps the image using affine transformation +CV_EXPORTS_W void warpAffine( InputArray src, OutputArray dst, + InputArray M, Size dsize, + int flags=INTER_LINEAR, + int borderMode=BORDER_CONSTANT, + const Scalar& borderValue=Scalar()); + +//! warps the image using perspective transformation +CV_EXPORTS_W void warpPerspective( InputArray src, OutputArray dst, + InputArray M, Size dsize, + int flags=INTER_LINEAR, + int borderMode=BORDER_CONSTANT, + const Scalar& borderValue=Scalar()); + +enum +{ + INTER_BITS=5, INTER_BITS2=INTER_BITS*2, + INTER_TAB_SIZE=(1< CV_EXPORTS void Ptr::delete_obj(); + +//! computes the joint dense histogram for a set of images. +CV_EXPORTS void calcHist( const Mat* images, int nimages, + const int* channels, InputArray mask, + OutputArray hist, int dims, const int* histSize, + const float** ranges, bool uniform=true, bool accumulate=false ); + +//! computes the joint sparse histogram for a set of images. +CV_EXPORTS void calcHist( const Mat* images, int nimages, + const int* channels, InputArray mask, + SparseMat& hist, int dims, + const int* histSize, const float** ranges, + bool uniform=true, bool accumulate=false ); + +CV_EXPORTS_W void calcHist( InputArrayOfArrays images, + const vector& channels, + InputArray mask, OutputArray hist, + const vector& histSize, + const vector& ranges, + bool accumulate=false ); + +//! computes back projection for the set of images +CV_EXPORTS void calcBackProject( const Mat* images, int nimages, + const int* channels, InputArray hist, + OutputArray backProject, const float** ranges, + double scale=1, bool uniform=true ); + +//! computes back projection for the set of images +CV_EXPORTS void calcBackProject( const Mat* images, int nimages, + const int* channels, const SparseMat& hist, + OutputArray backProject, const float** ranges, + double scale=1, bool uniform=true ); + +CV_EXPORTS_W void calcBackProject( InputArrayOfArrays images, const vector& channels, + InputArray hist, OutputArray dst, + const vector& ranges, + double scale ); + +//! compares two histograms stored in dense arrays +CV_EXPORTS_W double compareHist( InputArray H1, InputArray H2, int method ); + +//! compares two histograms stored in sparse arrays +CV_EXPORTS double compareHist( const SparseMat& H1, const SparseMat& H2, int method ); + +//! normalizes the grayscale image brightness and contrast by normalizing its histogram +CV_EXPORTS_W void equalizeHist( InputArray src, OutputArray dst ); + +CV_EXPORTS float EMD( InputArray signature1, InputArray signature2, + int distType, InputArray cost=noArray(), + float* lowerBound=0, OutputArray flow=noArray() ); + +//! segments the image using watershed algorithm +CV_EXPORTS_W void watershed( InputArray image, InputOutputArray markers ); + +//! filters image using meanshift algorithm +CV_EXPORTS_W void pyrMeanShiftFiltering( InputArray src, OutputArray dst, + double sp, double sr, int maxLevel=1, + TermCriteria termcrit=TermCriteria( + TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) ); + +//! class of the pixel in GrabCut algorithm +enum +{ + GC_BGD = 0, //!< background + GC_FGD = 1, //!< foreground + GC_PR_BGD = 2, //!< most probably background + GC_PR_FGD = 3 //!< most probably foreground +}; + +//! GrabCut algorithm flags +enum +{ + GC_INIT_WITH_RECT = 0, + GC_INIT_WITH_MASK = 1, + GC_EVAL = 2 +}; + +//! segments the image using GrabCut algorithm +CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect, + InputOutputArray bgdModel, InputOutputArray fgdModel, + int iterCount, int mode = GC_EVAL ); + +//! the inpainting algorithm +enum +{ + INPAINT_NS=CV_INPAINT_NS, // Navier-Stokes algorithm + INPAINT_TELEA=CV_INPAINT_TELEA // A. Telea algorithm +}; + +//! restores the damaged image areas using one of the available intpainting algorithms +CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask, + OutputArray dst, double inpaintRange, int flags ); + +//! builds the discrete Voronoi diagram +CV_EXPORTS_W void distanceTransform( InputArray src, OutputArray dst, + OutputArray labels, int distanceType, int maskSize ); + +//! computes the distance transform map +CV_EXPORTS void distanceTransform( InputArray src, OutputArray dst, + int distanceType, int maskSize ); + +enum { FLOODFILL_FIXED_RANGE = 1 << 16, FLOODFILL_MASK_ONLY = 1 << 17 }; + +//! fills the semi-uniform image region starting from the specified seed point +CV_EXPORTS int floodFill( InputOutputArray image, + Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0, + Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), + int flags=4 ); + +//! fills the semi-uniform image region and/or the mask starting from the specified seed point +CV_EXPORTS_W int floodFill( InputOutputArray image, InputOutputArray mask, + Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0, + Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), + int flags=4 ); + + +enum +{ + COLOR_BGR2BGRA =0, + COLOR_RGB2RGBA =COLOR_BGR2BGRA, + + COLOR_BGRA2BGR =1, + COLOR_RGBA2RGB =COLOR_BGRA2BGR, + + COLOR_BGR2RGBA =2, + COLOR_RGB2BGRA =COLOR_BGR2RGBA, + + COLOR_RGBA2BGR =3, + COLOR_BGRA2RGB =COLOR_RGBA2BGR, + + COLOR_BGR2RGB =4, + COLOR_RGB2BGR =COLOR_BGR2RGB, + + COLOR_BGRA2RGBA =5, + COLOR_RGBA2BGRA =COLOR_BGRA2RGBA, + + COLOR_BGR2GRAY =6, + COLOR_RGB2GRAY =7, + COLOR_GRAY2BGR =8, + COLOR_GRAY2RGB =COLOR_GRAY2BGR, + COLOR_GRAY2BGRA =9, + COLOR_GRAY2RGBA =COLOR_GRAY2BGRA, + COLOR_BGRA2GRAY =10, + COLOR_RGBA2GRAY =11, + + COLOR_BGR2BGR565 =12, + COLOR_RGB2BGR565 =13, + COLOR_BGR5652BGR =14, + COLOR_BGR5652RGB =15, + COLOR_BGRA2BGR565 =16, + COLOR_RGBA2BGR565 =17, + COLOR_BGR5652BGRA =18, + COLOR_BGR5652RGBA =19, + + COLOR_GRAY2BGR565 =20, + COLOR_BGR5652GRAY =21, + + COLOR_BGR2BGR555 =22, + COLOR_RGB2BGR555 =23, + COLOR_BGR5552BGR =24, + COLOR_BGR5552RGB =25, + COLOR_BGRA2BGR555 =26, + COLOR_RGBA2BGR555 =27, + COLOR_BGR5552BGRA =28, + COLOR_BGR5552RGBA =29, + + COLOR_GRAY2BGR555 =30, + COLOR_BGR5552GRAY =31, + + COLOR_BGR2XYZ =32, + COLOR_RGB2XYZ =33, + COLOR_XYZ2BGR =34, + COLOR_XYZ2RGB =35, + + COLOR_BGR2YCrCb =36, + COLOR_RGB2YCrCb =37, + COLOR_YCrCb2BGR =38, + COLOR_YCrCb2RGB =39, + + COLOR_BGR2HSV =40, + COLOR_RGB2HSV =41, + + COLOR_BGR2Lab =44, + COLOR_RGB2Lab =45, + + COLOR_BayerBG2BGR =46, + COLOR_BayerGB2BGR =47, + COLOR_BayerRG2BGR =48, + COLOR_BayerGR2BGR =49, + + COLOR_BayerBG2RGB =COLOR_BayerRG2BGR, + COLOR_BayerGB2RGB =COLOR_BayerGR2BGR, + COLOR_BayerRG2RGB =COLOR_BayerBG2BGR, + COLOR_BayerGR2RGB =COLOR_BayerGB2BGR, + + COLOR_BGR2Luv =50, + COLOR_RGB2Luv =51, + COLOR_BGR2HLS =52, + COLOR_RGB2HLS =53, + + COLOR_HSV2BGR =54, + COLOR_HSV2RGB =55, + + COLOR_Lab2BGR =56, + COLOR_Lab2RGB =57, + COLOR_Luv2BGR =58, + COLOR_Luv2RGB =59, + COLOR_HLS2BGR =60, + COLOR_HLS2RGB =61, + + COLOR_BayerBG2BGR_VNG =62, + COLOR_BayerGB2BGR_VNG =63, + COLOR_BayerRG2BGR_VNG =64, + COLOR_BayerGR2BGR_VNG =65, + + COLOR_BayerBG2RGB_VNG =COLOR_BayerRG2BGR_VNG, + COLOR_BayerGB2RGB_VNG =COLOR_BayerGR2BGR_VNG, + COLOR_BayerRG2RGB_VNG =COLOR_BayerBG2BGR_VNG, + COLOR_BayerGR2RGB_VNG =COLOR_BayerGB2BGR_VNG, + + COLOR_BGR2HSV_FULL = 66, + COLOR_RGB2HSV_FULL = 67, + COLOR_BGR2HLS_FULL = 68, + COLOR_RGB2HLS_FULL = 69, + + COLOR_HSV2BGR_FULL = 70, + COLOR_HSV2RGB_FULL = 71, + COLOR_HLS2BGR_FULL = 72, + COLOR_HLS2RGB_FULL = 73, + + COLOR_LBGR2Lab = 74, + COLOR_LRGB2Lab = 75, + COLOR_LBGR2Luv = 76, + COLOR_LRGB2Luv = 77, + + COLOR_Lab2LBGR = 78, + COLOR_Lab2LRGB = 79, + COLOR_Luv2LBGR = 80, + COLOR_Luv2LRGB = 81, + + COLOR_BGR2YUV = 82, + COLOR_RGB2YUV = 83, + COLOR_YUV2BGR = 84, + COLOR_YUV2RGB = 85, + + COLOR_BayerBG2GRAY = 86, + COLOR_BayerGB2GRAY = 87, + COLOR_BayerRG2GRAY = 88, + COLOR_BayerGR2GRAY = 89, + + COLOR_YUV420i2RGB = 90, + COLOR_YUV420i2BGR = 91, + COLOR_YUV420sp2RGB = 92, + COLOR_YUV420sp2BGR = 93, + + COLOR_COLORCVT_MAX =100 +}; + + +//! converts image from one color space to another +CV_EXPORTS_W void cvtColor( InputArray src, OutputArray dst, int code, int dstCn=0 ); + +//! raster image moments +class CV_EXPORTS_W_MAP Moments +{ +public: + //! the default constructor + Moments(); + //! the full constructor + Moments(double m00, double m10, double m01, double m20, double m11, + double m02, double m30, double m21, double m12, double m03 ); + //! the conversion from CvMoments + Moments( const CvMoments& moments ); + //! the conversion to CvMoments + operator CvMoments() const; + + //! spatial moments + CV_PROP_RW double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; + //! central moments + CV_PROP_RW double mu20, mu11, mu02, mu30, mu21, mu12, mu03; + //! central normalized moments + CV_PROP_RW double nu20, nu11, nu02, nu30, nu21, nu12, nu03; +}; + +//! computes moments of the rasterized shape or a vector of points +CV_EXPORTS_W Moments moments( InputArray array, bool binaryImage=false ); + +//! computes 7 Hu invariants from the moments +CV_EXPORTS void HuMoments( const Moments& moments, double hu[7] ); +CV_EXPORTS_W void HuMoments( const Moments& m, CV_OUT OutputArray hu ); + +//! type of the template matching operation +enum { TM_SQDIFF=0, TM_SQDIFF_NORMED=1, TM_CCORR=2, TM_CCORR_NORMED=3, TM_CCOEFF=4, TM_CCOEFF_NORMED=5 }; + +//! computes the proximity map for the raster template and the image where the template is searched for +CV_EXPORTS_W void matchTemplate( InputArray image, InputArray templ, + OutputArray result, int method ); + +//! mode of the contour retrieval algorithm +enum +{ + RETR_EXTERNAL=CV_RETR_EXTERNAL, //!< retrieve only the most external (top-level) contours + RETR_LIST=CV_RETR_LIST, //!< retrieve all the contours without any hierarchical information + RETR_CCOMP=CV_RETR_CCOMP, //!< retrieve the connected components (that can possibly be nested) + RETR_TREE=CV_RETR_TREE //!< retrieve all the contours and the whole hierarchy +}; + +//! the contour approximation algorithm +enum +{ + CHAIN_APPROX_NONE=CV_CHAIN_APPROX_NONE, + CHAIN_APPROX_SIMPLE=CV_CHAIN_APPROX_SIMPLE, + CHAIN_APPROX_TC89_L1=CV_CHAIN_APPROX_TC89_L1, + CHAIN_APPROX_TC89_KCOS=CV_CHAIN_APPROX_TC89_KCOS +}; + +//! retrieves contours and the hierarchical information from black-n-white image. +CV_EXPORTS_W void findContours( InputOutputArray image, OutputArrayOfArrays contours, + OutputArray hierarchy, int mode, + int method, Point offset=Point()); + +//! retrieves contours from black-n-white image. +CV_EXPORTS void findContours( InputOutputArray image, OutputArrayOfArrays contours, + int mode, int method, Point offset=Point()); + +//! draws contours in the image +CV_EXPORTS_W void drawContours( InputOutputArray image, InputArrayOfArrays contours, + int contourIdx, const Scalar& color, + int thickness=1, int lineType=8, + InputArray hierarchy=noArray(), + int maxLevel=INT_MAX, Point offset=Point() ); + +//! approximates contour or a curve using Douglas-Peucker algorithm +CV_EXPORTS_W void approxPolyDP( InputArray curve, + OutputArray approxCurve, + double epsilon, bool closed ); + +//! computes the contour perimeter (closed=true) or a curve length +CV_EXPORTS_W double arcLength( InputArray curve, bool closed ); +//! computes the bounding rectangle for a contour +CV_EXPORTS_W Rect boundingRect( InputArray points ); +//! computes the contour area +CV_EXPORTS_W double contourArea( InputArray contour, bool oriented=false ); +//! computes the minimal rotated rectangle for a set of points +CV_EXPORTS_W RotatedRect minAreaRect( InputArray points ); +//! computes the minimal enclosing circle for a set of points +CV_EXPORTS_W void minEnclosingCircle( InputArray points, + CV_OUT Point2f& center, CV_OUT float& radius ); +//! matches two contours using one of the available algorithms +CV_EXPORTS_W double matchShapes( InputArray contour1, InputArray contour2, + int method, double parameter ); +//! computes convex hull for a set of 2D points. +CV_EXPORTS_W void convexHull( InputArray points, OutputArray hull, + bool clockwise=false, bool returnPoints=true ); + +//! returns true iff the contour is convex. Does not support contours with self-intersection +CV_EXPORTS_W bool isContourConvex( InputArray contour ); + +//! fits ellipse to the set of 2D points +CV_EXPORTS_W RotatedRect fitEllipse( InputArray points ); + +//! fits line to the set of 2D points using M-estimator algorithm +CV_EXPORTS_W void fitLine( InputArray points, OutputArray line, int distType, + double param, double reps, double aeps ); +//! checks if the point is inside the contour. Optionally computes the signed distance from the point to the contour boundary +CV_EXPORTS_W double pointPolygonTest( InputArray contour, Point2f pt, bool measureDist ); + + +class CV_EXPORTS_W Subdiv2D +{ +public: + enum + { + PTLOC_ERROR = -2, + PTLOC_OUTSIDE_RECT = -1, + PTLOC_INSIDE = 0, + PTLOC_VERTEX = 1, + PTLOC_ON_EDGE = 2 + }; + + enum + { + NEXT_AROUND_ORG = 0x00, + NEXT_AROUND_DST = 0x22, + PREV_AROUND_ORG = 0x11, + PREV_AROUND_DST = 0x33, + NEXT_AROUND_LEFT = 0x13, + NEXT_AROUND_RIGHT = 0x31, + PREV_AROUND_LEFT = 0x20, + PREV_AROUND_RIGHT = 0x02 + }; + + CV_WRAP Subdiv2D(); + CV_WRAP Subdiv2D(Rect rect); + CV_WRAP void initDelaunay(Rect rect); + + CV_WRAP int insert(Point2f pt); + CV_WRAP void insert(const vector& ptvec); + CV_WRAP int locate(Point2f pt, CV_OUT int& edge, CV_OUT int& vertex); + + CV_WRAP int findNearest(Point2f pt, CV_OUT Point2f* nearestPt=0); + CV_WRAP void getEdgeList(CV_OUT vector& edgeList) const; + CV_WRAP void getTriangleList(CV_OUT vector& triangleList) const; + CV_WRAP void getVoronoiFacetList(const vector& idx, CV_OUT vector >& facetList, + CV_OUT vector& facetCenters); + + CV_WRAP Point2f getVertex(int vertex, CV_OUT int* firstEdge=0) const; + + CV_WRAP int getEdge( int edge, int nextEdgeType ) const; + CV_WRAP int nextEdge(int edge) const; + CV_WRAP int rotateEdge(int edge, int rotate) const; + CV_WRAP int symEdge(int edge) const; + CV_WRAP int edgeOrg(int edge, CV_OUT Point2f* orgpt=0) const; + CV_WRAP int edgeDst(int edge, CV_OUT Point2f* dstpt=0) const; + +protected: + int newEdge(); + void deleteEdge(int edge); + int newPoint(Point2f pt, bool isvirtual, int firstEdge=0); + void deletePoint(int vtx); + void setEdgePoints( int edge, int orgPt, int dstPt ); + void splice( int edgeA, int edgeB ); + int connectEdges( int edgeA, int edgeB ); + void swapEdges( int edge ); + int isRightOf(Point2f pt, int edge) const; + void calcVoronoi(); + void clearVoronoi(); + void check() const; + + struct CV_EXPORTS Vertex + { + Vertex(); + Vertex(Point2f pt, bool _isvirtual, int _firstEdge=0); + bool isvirtual() const; + bool isfree() const; + int firstEdge; + int type; + Point2f pt; + }; + struct CV_EXPORTS QuadEdge + { + QuadEdge(); + QuadEdge(int edgeidx); + bool isfree() const; + int next[4]; + int pt[4]; + }; + + vector vtx; + vector qedges; + int freeQEdge; + int freePoint; + bool validGeometry; + + int recentEdge; + Point2f topLeft; + Point2f bottomRight; +}; + +} + +// 2009-01-12, Xavier Delacour + +struct lsh_hash { + int h1, h2; +}; + +struct CvLSHOperations +{ + virtual ~CvLSHOperations() {} + + virtual int vector_add(const void* data) = 0; + virtual void vector_remove(int i) = 0; + virtual const void* vector_lookup(int i) = 0; + virtual void vector_reserve(int n) = 0; + virtual unsigned int vector_count() = 0; + + virtual void hash_insert(lsh_hash h, int l, int i) = 0; + virtual void hash_remove(lsh_hash h, int l, int i) = 0; + virtual int hash_lookup(lsh_hash h, int l, int* ret_i, int ret_i_max) = 0; +}; + +#endif /* __cplusplus */ + +#endif + +/* End of file. */ diff --git a/opencv/imgproc/opencv2/imgproc/imgproc_c.h b/opencv/imgproc/opencv2/imgproc/imgproc_c.h new file mode 100644 index 0000000..b845e1c --- /dev/null +++ b/opencv/imgproc/opencv2/imgproc/imgproc_c.h @@ -0,0 +1,783 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_IMGPROC_C_H__ +#define __OPENCV_IMGPROC_IMGPROC_C_H__ + +#include "opencv2/core/core_c.h" +#include "opencv2/imgproc/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*********************** Background statistics accumulation *****************************/ + +/* Adds image to accumulator */ +CVAPI(void) cvAcc( const CvArr* image, CvArr* sum, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds squared image to accumulator */ +CVAPI(void) cvSquareAcc( const CvArr* image, CvArr* sqsum, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds a product of two images to accumulator */ +CVAPI(void) cvMultiplyAcc( const CvArr* image1, const CvArr* image2, CvArr* acc, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds image to accumulator with weights: acc = acc*(1-alpha) + image*alpha */ +CVAPI(void) cvRunningAvg( const CvArr* image, CvArr* acc, double alpha, + const CvArr* mask CV_DEFAULT(NULL) ); + +/****************************************************************************************\ +* Image Processing * +\****************************************************************************************/ + +/* Copies source 2D array inside of the larger destination array and + makes a border of the specified type (IPL_BORDER_*) around the copied area. */ +CVAPI(void) cvCopyMakeBorder( const CvArr* src, CvArr* dst, CvPoint offset, + int bordertype, CvScalar value CV_DEFAULT(cvScalarAll(0))); + +/* Smoothes array (removes noise) */ +CVAPI(void) cvSmooth( const CvArr* src, CvArr* dst, + int smoothtype CV_DEFAULT(CV_GAUSSIAN), + int size1 CV_DEFAULT(3), + int size2 CV_DEFAULT(0), + double sigma1 CV_DEFAULT(0), + double sigma2 CV_DEFAULT(0)); + +/* Convolves the image with the kernel */ +CVAPI(void) cvFilter2D( const CvArr* src, CvArr* dst, const CvMat* kernel, + CvPoint anchor CV_DEFAULT(cvPoint(-1,-1))); + +/* Finds integral image: SUM(X,Y) = sum(xnext[(edge + (int)type) & 3]; + return (edge & ~3) + ((edge + ((int)type >> 4)) & 3); +} + + +CV_INLINE CvSubdiv2DPoint* cvSubdiv2DEdgeOrg( CvSubdiv2DEdge edge ) +{ + CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3); + return (CvSubdiv2DPoint*)e->pt[edge & 3]; +} + + +CV_INLINE CvSubdiv2DPoint* cvSubdiv2DEdgeDst( CvSubdiv2DEdge edge ) +{ + CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3); + return (CvSubdiv2DPoint*)e->pt[(edge + 2) & 3]; +} + + +CV_INLINE double cvTriangleArea( CvPoint2D32f a, CvPoint2D32f b, CvPoint2D32f c ) +{ + return ((double)b.x - a.x) * ((double)c.y - a.y) - ((double)b.y - a.y) * ((double)c.x - a.x); +} + + +/****************************************************************************************\ +* Contour Processing and Shape Analysis * +\****************************************************************************************/ + +/* Approximates a single polygonal curve (contour) or + a tree of polygonal curves (contours) */ +CVAPI(CvSeq*) cvApproxPoly( const void* src_seq, + int header_size, CvMemStorage* storage, + int method, double parameter, + int parameter2 CV_DEFAULT(0)); + +/* Calculates perimeter of a contour or length of a part of contour */ +CVAPI(double) cvArcLength( const void* curve, + CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ), + int is_closed CV_DEFAULT(-1)); + +CV_INLINE double cvContourPerimeter( const void* contour ) +{ + return cvArcLength( contour, CV_WHOLE_SEQ, 1 ); +} + + +/* Calculates contour boundning rectangle (update=1) or + just retrieves pre-calculated rectangle (update=0) */ +CVAPI(CvRect) cvBoundingRect( CvArr* points, int update CV_DEFAULT(0) ); + +/* Calculates area of a contour or contour segment */ +CVAPI(double) cvContourArea( const CvArr* contour, + CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ), + int oriented CV_DEFAULT(0)); + +/* Finds minimum area rotated rectangle bounding a set of points */ +CVAPI(CvBox2D) cvMinAreaRect2( const CvArr* points, + CvMemStorage* storage CV_DEFAULT(NULL)); + +/* Finds minimum enclosing circle for a set of points */ +CVAPI(int) cvMinEnclosingCircle( const CvArr* points, + CvPoint2D32f* center, float* radius ); + +/* Compares two contours by matching their moments */ +CVAPI(double) cvMatchShapes( const void* object1, const void* object2, + int method, double parameter CV_DEFAULT(0)); + +/* Calculates exact convex hull of 2d point set */ +CVAPI(CvSeq*) cvConvexHull2( const CvArr* input, + void* hull_storage CV_DEFAULT(NULL), + int orientation CV_DEFAULT(CV_CLOCKWISE), + int return_points CV_DEFAULT(0)); + +/* Checks whether the contour is convex or not (returns 1 if convex, 0 if not) */ +CVAPI(int) cvCheckContourConvexity( const CvArr* contour ); + + +/* Finds convexity defects for the contour */ +CVAPI(CvSeq*) cvConvexityDefects( const CvArr* contour, const CvArr* convexhull, + CvMemStorage* storage CV_DEFAULT(NULL)); + +/* Fits ellipse into a set of 2d points */ +CVAPI(CvBox2D) cvFitEllipse2( const CvArr* points ); + +/* Finds minimum rectangle containing two given rectangles */ +CVAPI(CvRect) cvMaxRect( const CvRect* rect1, const CvRect* rect2 ); + +/* Finds coordinates of the box vertices */ +CVAPI(void) cvBoxPoints( CvBox2D box, CvPoint2D32f pt[4] ); + +/* Initializes sequence header for a matrix (column or row vector) of points - + a wrapper for cvMakeSeqHeaderForArray (it does not initialize bounding rectangle!!!) */ +CVAPI(CvSeq*) cvPointSeqFromMat( int seq_kind, const CvArr* mat, + CvContour* contour_header, + CvSeqBlock* block ); + +/* Checks whether the point is inside polygon, outside, on an edge (at a vertex). + Returns positive, negative or zero value, correspondingly. + Optionally, measures a signed distance between + the point and the nearest polygon edge (measure_dist=1) */ +CVAPI(double) cvPointPolygonTest( const CvArr* contour, + CvPoint2D32f pt, int measure_dist ); + +/****************************************************************************************\ +* Histogram functions * +\****************************************************************************************/ + +/* Creates new histogram */ +CVAPI(CvHistogram*) cvCreateHist( int dims, int* sizes, int type, + float** ranges CV_DEFAULT(NULL), + int uniform CV_DEFAULT(1)); + +/* Assignes histogram bin ranges */ +CVAPI(void) cvSetHistBinRanges( CvHistogram* hist, float** ranges, + int uniform CV_DEFAULT(1)); + +/* Creates histogram header for array */ +CVAPI(CvHistogram*) cvMakeHistHeaderForArray( + int dims, int* sizes, CvHistogram* hist, + float* data, float** ranges CV_DEFAULT(NULL), + int uniform CV_DEFAULT(1)); + +/* Releases histogram */ +CVAPI(void) cvReleaseHist( CvHistogram** hist ); + +/* Clears all the histogram bins */ +CVAPI(void) cvClearHist( CvHistogram* hist ); + +/* Finds indices and values of minimum and maximum histogram bins */ +CVAPI(void) cvGetMinMaxHistValue( const CvHistogram* hist, + float* min_value, float* max_value, + int* min_idx CV_DEFAULT(NULL), + int* max_idx CV_DEFAULT(NULL)); + + +/* Normalizes histogram by dividing all bins by sum of the bins, multiplied by . + After that sum of histogram bins is equal to */ +CVAPI(void) cvNormalizeHist( CvHistogram* hist, double factor ); + + +/* Clear all histogram bins that are below the threshold */ +CVAPI(void) cvThreshHist( CvHistogram* hist, double threshold ); + + +/* Compares two histogram */ +CVAPI(double) cvCompareHist( const CvHistogram* hist1, + const CvHistogram* hist2, + int method); + +/* Copies one histogram to another. Destination histogram is created if + the destination pointer is NULL */ +CVAPI(void) cvCopyHist( const CvHistogram* src, CvHistogram** dst ); + + +/* Calculates bayesian probabilistic histograms + (each or src and dst is an array of histograms */ +CVAPI(void) cvCalcBayesianProb( CvHistogram** src, int number, + CvHistogram** dst); + +/* Calculates array histogram */ +CVAPI(void) cvCalcArrHist( CvArr** arr, CvHistogram* hist, + int accumulate CV_DEFAULT(0), + const CvArr* mask CV_DEFAULT(NULL) ); + +CV_INLINE void cvCalcHist( IplImage** image, CvHistogram* hist, + int accumulate CV_DEFAULT(0), + const CvArr* mask CV_DEFAULT(NULL) ) +{ + cvCalcArrHist( (CvArr**)image, hist, accumulate, mask ); +} + +/* Calculates back project */ +CVAPI(void) cvCalcArrBackProject( CvArr** image, CvArr* dst, + const CvHistogram* hist ); +#define cvCalcBackProject(image, dst, hist) cvCalcArrBackProject((CvArr**)image, dst, hist) + + +/* Does some sort of template matching but compares histograms of + template and each window location */ +CVAPI(void) cvCalcArrBackProjectPatch( CvArr** image, CvArr* dst, CvSize range, + CvHistogram* hist, int method, + double factor ); +#define cvCalcBackProjectPatch( image, dst, range, hist, method, factor ) \ + cvCalcArrBackProjectPatch( (CvArr**)image, dst, range, hist, method, factor ) + + +/* calculates probabilistic density (divides one histogram by another) */ +CVAPI(void) cvCalcProbDensity( const CvHistogram* hist1, const CvHistogram* hist2, + CvHistogram* dst_hist, double scale CV_DEFAULT(255) ); + +/* equalizes histogram of 8-bit single-channel image */ +CVAPI(void) cvEqualizeHist( const CvArr* src, CvArr* dst ); + + +/* Applies distance transform to binary image */ +CVAPI(void) cvDistTransform( const CvArr* src, CvArr* dst, + int distance_type CV_DEFAULT(CV_DIST_L2), + int mask_size CV_DEFAULT(3), + const float* mask CV_DEFAULT(NULL), + CvArr* labels CV_DEFAULT(NULL)); + + +/* Applies fixed-level threshold to grayscale image. + This is a basic operation applied before retrieving contours */ +CVAPI(double) cvThreshold( const CvArr* src, CvArr* dst, + double threshold, double max_value, + int threshold_type ); + +/* Applies adaptive threshold to grayscale image. + The two parameters for methods CV_ADAPTIVE_THRESH_MEAN_C and + CV_ADAPTIVE_THRESH_GAUSSIAN_C are: + neighborhood size (3, 5, 7 etc.), + and a constant subtracted from mean (...,-3,-2,-1,0,1,2,3,...) */ +CVAPI(void) cvAdaptiveThreshold( const CvArr* src, CvArr* dst, double max_value, + int adaptive_method CV_DEFAULT(CV_ADAPTIVE_THRESH_MEAN_C), + int threshold_type CV_DEFAULT(CV_THRESH_BINARY), + int block_size CV_DEFAULT(3), + double param1 CV_DEFAULT(5)); + +/* Fills the connected component until the color difference gets large enough */ +CVAPI(void) cvFloodFill( CvArr* image, CvPoint seed_point, + CvScalar new_val, CvScalar lo_diff CV_DEFAULT(cvScalarAll(0)), + CvScalar up_diff CV_DEFAULT(cvScalarAll(0)), + CvConnectedComp* comp CV_DEFAULT(NULL), + int flags CV_DEFAULT(4), + CvArr* mask CV_DEFAULT(NULL)); + +/****************************************************************************************\ +* Feature detection * +\****************************************************************************************/ + +/* Runs canny edge detector */ +CVAPI(void) cvCanny( const CvArr* image, CvArr* edges, double threshold1, + double threshold2, int aperture_size CV_DEFAULT(3) ); + +/* Calculates constraint image for corner detection + Dx^2 * Dyy + Dxx * Dy^2 - 2 * Dx * Dy * Dxy. + Applying threshold to the result gives coordinates of corners */ +CVAPI(void) cvPreCornerDetect( const CvArr* image, CvArr* corners, + int aperture_size CV_DEFAULT(3) ); + +/* Calculates eigen values and vectors of 2x2 + gradient covariation matrix at every image pixel */ +CVAPI(void) cvCornerEigenValsAndVecs( const CvArr* image, CvArr* eigenvv, + int block_size, int aperture_size CV_DEFAULT(3) ); + +/* Calculates minimal eigenvalue for 2x2 gradient covariation matrix at + every image pixel */ +CVAPI(void) cvCornerMinEigenVal( const CvArr* image, CvArr* eigenval, + int block_size, int aperture_size CV_DEFAULT(3) ); + +/* Harris corner detector: + Calculates det(M) - k*(trace(M)^2), where M is 2x2 gradient covariation matrix for each pixel */ +CVAPI(void) cvCornerHarris( const CvArr* image, CvArr* harris_responce, + int block_size, int aperture_size CV_DEFAULT(3), + double k CV_DEFAULT(0.04) ); + +/* Adjust corner position using some sort of gradient search */ +CVAPI(void) cvFindCornerSubPix( const CvArr* image, CvPoint2D32f* corners, + int count, CvSize win, CvSize zero_zone, + CvTermCriteria criteria ); + +/* Finds a sparse set of points within the selected region + that seem to be easy to track */ +CVAPI(void) cvGoodFeaturesToTrack( const CvArr* image, CvArr* eig_image, + CvArr* temp_image, CvPoint2D32f* corners, + int* corner_count, double quality_level, + double min_distance, + const CvArr* mask CV_DEFAULT(NULL), + int block_size CV_DEFAULT(3), + int use_harris CV_DEFAULT(0), + double k CV_DEFAULT(0.04) ); + +/* Finds lines on binary image using one of several methods. + line_storage is either memory storage or 1 x CvMat, its + number of columns is changed by the function. + method is one of CV_HOUGH_*; + rho, theta and threshold are used for each of those methods; + param1 ~ line length, param2 ~ line gap - for probabilistic, + param1 ~ srn, param2 ~ stn - for multi-scale */ +CVAPI(CvSeq*) cvHoughLines2( CvArr* image, void* line_storage, int method, + double rho, double theta, int threshold, + double param1 CV_DEFAULT(0), double param2 CV_DEFAULT(0)); + +/* Finds circles in the image */ +CVAPI(CvSeq*) cvHoughCircles( CvArr* image, void* circle_storage, + int method, double dp, double min_dist, + double param1 CV_DEFAULT(100), + double param2 CV_DEFAULT(100), + int min_radius CV_DEFAULT(0), + int max_radius CV_DEFAULT(0)); + +/* Fits a line into set of 2d or 3d points in a robust way (M-estimator technique) */ +CVAPI(void) cvFitLine( const CvArr* points, int dist_type, double param, + double reps, double aeps, float* line ); + + +/* Constructs kd-tree from set of feature descriptors */ +CVAPI(struct CvFeatureTree*) cvCreateKDTree(CvMat* desc); + +/* Constructs spill-tree from set of feature descriptors */ +CVAPI(struct CvFeatureTree*) cvCreateSpillTree( const CvMat* raw_data, + const int naive CV_DEFAULT(50), + const double rho CV_DEFAULT(.7), + const double tau CV_DEFAULT(.1) ); + +/* Release feature tree */ +CVAPI(void) cvReleaseFeatureTree(struct CvFeatureTree* tr); + +/* Searches feature tree for k nearest neighbors of given reference points, + searching (in case of kd-tree/bbf) at most emax leaves. */ +CVAPI(void) cvFindFeatures(struct CvFeatureTree* tr, const CvMat* query_points, + CvMat* indices, CvMat* dist, int k, int emax CV_DEFAULT(20)); + +/* Search feature tree for all points that are inlier to given rect region. + Only implemented for kd trees */ +CVAPI(int) cvFindFeaturesBoxed(struct CvFeatureTree* tr, + CvMat* bounds_min, CvMat* bounds_max, + CvMat* out_indices); + + +/* Construct a Locality Sensitive Hash (LSH) table, for indexing d-dimensional vectors of + given type. Vectors will be hashed L times with k-dimensional p-stable (p=2) functions. */ +CVAPI(struct CvLSH*) cvCreateLSH(struct CvLSHOperations* ops, int d, + int L CV_DEFAULT(10), int k CV_DEFAULT(10), + int type CV_DEFAULT(CV_64FC1), double r CV_DEFAULT(4), + int64 seed CV_DEFAULT(-1)); + +/* Construct in-memory LSH table, with n bins. */ +CVAPI(struct CvLSH*) cvCreateMemoryLSH(int d, int n, int L CV_DEFAULT(10), int k CV_DEFAULT(10), + int type CV_DEFAULT(CV_64FC1), double r CV_DEFAULT(4), + int64 seed CV_DEFAULT(-1)); + +/* Free the given LSH structure. */ +CVAPI(void) cvReleaseLSH(struct CvLSH** lsh); + +/* Return the number of vectors in the LSH. */ +CVAPI(unsigned int) LSHSize(struct CvLSH* lsh); + +/* Add vectors to the LSH structure, optionally returning indices. */ +CVAPI(void) cvLSHAdd(struct CvLSH* lsh, const CvMat* data, CvMat* indices CV_DEFAULT(0)); + +/* Remove vectors from LSH, as addressed by given indices. */ +CVAPI(void) cvLSHRemove(struct CvLSH* lsh, const CvMat* indices); + +/* Query the LSH n times for at most k nearest points; data is n x d, + indices and dist are n x k. At most emax stored points will be accessed. */ +CVAPI(void) cvLSHQuery(struct CvLSH* lsh, const CvMat* query_points, + CvMat* indices, CvMat* dist, int k, int emax); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/opencv/imgproc/opencv2/imgproc/types_c.h b/opencv/imgproc/opencv2/imgproc/types_c.h new file mode 100644 index 0000000..5a984fd --- /dev/null +++ b/opencv/imgproc/opencv2/imgproc/types_c.h @@ -0,0 +1,538 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_TYPES_C_H__ +#define __OPENCV_IMGPROC_TYPES_C_H__ + +#include "opencv2/core/core_c.h" +#include "opencv2/imgproc/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Connected component structure */ +typedef struct CvConnectedComp +{ + double area; /* area of the connected component */ + CvScalar value; /* average color of the connected component */ + CvRect rect; /* ROI of the component */ + CvSeq* contour; /* optional component boundary + (the contour might have child contours corresponding to the holes)*/ +} +CvConnectedComp; + +/* Image smooth methods */ +enum +{ + CV_BLUR_NO_SCALE =0, + CV_BLUR =1, + CV_GAUSSIAN =2, + CV_MEDIAN =3, + CV_BILATERAL =4 +}; + +/* Filters used in pyramid decomposition */ +enum +{ + CV_GAUSSIAN_5x5 = 7 +}; + +/* Inpainting algorithms */ +enum +{ + CV_INPAINT_NS =0, + CV_INPAINT_TELEA =1 +}; + +/* Special filters */ +enum +{ + CV_SCHARR =-1, + CV_MAX_SOBEL_KSIZE =7 +}; + +/* Constants for color conversion */ +enum +{ + CV_BGR2BGRA =0, + CV_RGB2RGBA =CV_BGR2BGRA, + + CV_BGRA2BGR =1, + CV_RGBA2RGB =CV_BGRA2BGR, + + CV_BGR2RGBA =2, + CV_RGB2BGRA =CV_BGR2RGBA, + + CV_RGBA2BGR =3, + CV_BGRA2RGB =CV_RGBA2BGR, + + CV_BGR2RGB =4, + CV_RGB2BGR =CV_BGR2RGB, + + CV_BGRA2RGBA =5, + CV_RGBA2BGRA =CV_BGRA2RGBA, + + CV_BGR2GRAY =6, + CV_RGB2GRAY =7, + CV_GRAY2BGR =8, + CV_GRAY2RGB =CV_GRAY2BGR, + CV_GRAY2BGRA =9, + CV_GRAY2RGBA =CV_GRAY2BGRA, + CV_BGRA2GRAY =10, + CV_RGBA2GRAY =11, + + CV_BGR2BGR565 =12, + CV_RGB2BGR565 =13, + CV_BGR5652BGR =14, + CV_BGR5652RGB =15, + CV_BGRA2BGR565 =16, + CV_RGBA2BGR565 =17, + CV_BGR5652BGRA =18, + CV_BGR5652RGBA =19, + + CV_GRAY2BGR565 =20, + CV_BGR5652GRAY =21, + + CV_BGR2BGR555 =22, + CV_RGB2BGR555 =23, + CV_BGR5552BGR =24, + CV_BGR5552RGB =25, + CV_BGRA2BGR555 =26, + CV_RGBA2BGR555 =27, + CV_BGR5552BGRA =28, + CV_BGR5552RGBA =29, + + CV_GRAY2BGR555 =30, + CV_BGR5552GRAY =31, + + CV_BGR2XYZ =32, + CV_RGB2XYZ =33, + CV_XYZ2BGR =34, + CV_XYZ2RGB =35, + + CV_BGR2YCrCb =36, + CV_RGB2YCrCb =37, + CV_YCrCb2BGR =38, + CV_YCrCb2RGB =39, + + CV_BGR2HSV =40, + CV_RGB2HSV =41, + + CV_BGR2Lab =44, + CV_RGB2Lab =45, + + CV_BayerBG2BGR =46, + CV_BayerGB2BGR =47, + CV_BayerRG2BGR =48, + CV_BayerGR2BGR =49, + + CV_BayerBG2RGB =CV_BayerRG2BGR, + CV_BayerGB2RGB =CV_BayerGR2BGR, + CV_BayerRG2RGB =CV_BayerBG2BGR, + CV_BayerGR2RGB =CV_BayerGB2BGR, + + CV_BGR2Luv =50, + CV_RGB2Luv =51, + CV_BGR2HLS =52, + CV_RGB2HLS =53, + + CV_HSV2BGR =54, + CV_HSV2RGB =55, + + CV_Lab2BGR =56, + CV_Lab2RGB =57, + CV_Luv2BGR =58, + CV_Luv2RGB =59, + CV_HLS2BGR =60, + CV_HLS2RGB =61, + + CV_BayerBG2BGR_VNG =62, + CV_BayerGB2BGR_VNG =63, + CV_BayerRG2BGR_VNG =64, + CV_BayerGR2BGR_VNG =65, + + CV_BayerBG2RGB_VNG =CV_BayerRG2BGR_VNG, + CV_BayerGB2RGB_VNG =CV_BayerGR2BGR_VNG, + CV_BayerRG2RGB_VNG =CV_BayerBG2BGR_VNG, + CV_BayerGR2RGB_VNG =CV_BayerGB2BGR_VNG, + + CV_BGR2HSV_FULL = 66, + CV_RGB2HSV_FULL = 67, + CV_BGR2HLS_FULL = 68, + CV_RGB2HLS_FULL = 69, + + CV_HSV2BGR_FULL = 70, + CV_HSV2RGB_FULL = 71, + CV_HLS2BGR_FULL = 72, + CV_HLS2RGB_FULL = 73, + + CV_LBGR2Lab = 74, + CV_LRGB2Lab = 75, + CV_LBGR2Luv = 76, + CV_LRGB2Luv = 77, + + CV_Lab2LBGR = 78, + CV_Lab2LRGB = 79, + CV_Luv2LBGR = 80, + CV_Luv2LRGB = 81, + + CV_BGR2YUV = 82, + CV_RGB2YUV = 83, + CV_YUV2BGR = 84, + CV_YUV2RGB = 85, + + CV_BayerBG2GRAY = 86, + CV_BayerGB2GRAY = 87, + CV_BayerRG2GRAY = 88, + CV_BayerGR2GRAY = 89, + + CV_YUV420i2RGB = 90, + CV_YUV420i2BGR = 91, + CV_YUV420sp2RGB = 92, + CV_YUV420sp2BGR = 93, + + CV_COLORCVT_MAX =100 +}; + + +/* Sub-pixel interpolation methods */ +enum +{ + CV_INTER_NN =0, + CV_INTER_LINEAR =1, + CV_INTER_CUBIC =2, + CV_INTER_AREA =3, + CV_INTER_LANCZOS4 =4 +}; + +/* ... and other image warping flags */ +enum +{ + CV_WARP_FILL_OUTLIERS =8, + CV_WARP_INVERSE_MAP =16 +}; + +/* Shapes of a structuring element for morphological operations */ +enum +{ + CV_SHAPE_RECT =0, + CV_SHAPE_CROSS =1, + CV_SHAPE_ELLIPSE =2, + CV_SHAPE_CUSTOM =100 +}; + +/* Morphological operations */ +enum +{ + CV_MOP_ERODE =0, + CV_MOP_DILATE =1, + CV_MOP_OPEN =2, + CV_MOP_CLOSE =3, + CV_MOP_GRADIENT =4, + CV_MOP_TOPHAT =5, + CV_MOP_BLACKHAT =6 +}; + +/* Spatial and central moments */ +typedef struct CvMoments +{ + double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; /* spatial moments */ + double mu20, mu11, mu02, mu30, mu21, mu12, mu03; /* central moments */ + double inv_sqrt_m00; /* m00 != 0 ? 1/sqrt(m00) : 0 */ +} +CvMoments; + +/* Hu invariants */ +typedef struct CvHuMoments +{ + double hu1, hu2, hu3, hu4, hu5, hu6, hu7; /* Hu invariants */ +} +CvHuMoments; + +/* Template matching methods */ +enum +{ + CV_TM_SQDIFF =0, + CV_TM_SQDIFF_NORMED =1, + CV_TM_CCORR =2, + CV_TM_CCORR_NORMED =3, + CV_TM_CCOEFF =4, + CV_TM_CCOEFF_NORMED =5 +}; + +typedef float (CV_CDECL * CvDistanceFunction)( const float* a, const float* b, void* user_param ); + +/* Contour retrieval modes */ +enum +{ + CV_RETR_EXTERNAL=0, + CV_RETR_LIST=1, + CV_RETR_CCOMP=2, + CV_RETR_TREE=3 +}; + +/* Contour approximation methods */ +enum +{ + CV_CHAIN_CODE=0, + CV_CHAIN_APPROX_NONE=1, + CV_CHAIN_APPROX_SIMPLE=2, + CV_CHAIN_APPROX_TC89_L1=3, + CV_CHAIN_APPROX_TC89_KCOS=4, + CV_LINK_RUNS=5 +}; + +/* +Internal structure that is used for sequental retrieving contours from the image. +It supports both hierarchical and plane variants of Suzuki algorithm. +*/ +typedef struct _CvContourScanner* CvContourScanner; + +/* Freeman chain reader state */ +typedef struct CvChainPtReader +{ + CV_SEQ_READER_FIELDS() + char code; + CvPoint pt; + schar deltas[8][2]; +} +CvChainPtReader; + +/* initializes 8-element array for fast access to 3x3 neighborhood of a pixel */ +#define CV_INIT_3X3_DELTAS( deltas, step, nch ) \ + ((deltas)[0] = (nch), (deltas)[1] = -(step) + (nch), \ + (deltas)[2] = -(step), (deltas)[3] = -(step) - (nch), \ + (deltas)[4] = -(nch), (deltas)[5] = (step) - (nch), \ + (deltas)[6] = (step), (deltas)[7] = (step) + (nch)) + + +/****************************************************************************************\ +* Planar subdivisions * +\****************************************************************************************/ + +typedef size_t CvSubdiv2DEdge; + +#define CV_QUADEDGE2D_FIELDS() \ + int flags; \ + struct CvSubdiv2DPoint* pt[4]; \ + CvSubdiv2DEdge next[4]; + +#define CV_SUBDIV2D_POINT_FIELDS()\ + int flags; \ + CvSubdiv2DEdge first; \ + CvPoint2D32f pt; \ + int id; + +#define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30) + +typedef struct CvQuadEdge2D +{ + CV_QUADEDGE2D_FIELDS() +} +CvQuadEdge2D; + +typedef struct CvSubdiv2DPoint +{ + CV_SUBDIV2D_POINT_FIELDS() +} +CvSubdiv2DPoint; + +#define CV_SUBDIV2D_FIELDS() \ + CV_GRAPH_FIELDS() \ + int quad_edges; \ + int is_geometry_valid; \ + CvSubdiv2DEdge recent_edge; \ + CvPoint2D32f topleft; \ + CvPoint2D32f bottomright; + +typedef struct CvSubdiv2D +{ + CV_SUBDIV2D_FIELDS() +} +CvSubdiv2D; + + +typedef enum CvSubdiv2DPointLocation +{ + CV_PTLOC_ERROR = -2, + CV_PTLOC_OUTSIDE_RECT = -1, + CV_PTLOC_INSIDE = 0, + CV_PTLOC_VERTEX = 1, + CV_PTLOC_ON_EDGE = 2 +} +CvSubdiv2DPointLocation; + +typedef enum CvNextEdgeType +{ + CV_NEXT_AROUND_ORG = 0x00, + CV_NEXT_AROUND_DST = 0x22, + CV_PREV_AROUND_ORG = 0x11, + CV_PREV_AROUND_DST = 0x33, + CV_NEXT_AROUND_LEFT = 0x13, + CV_NEXT_AROUND_RIGHT = 0x31, + CV_PREV_AROUND_LEFT = 0x20, + CV_PREV_AROUND_RIGHT = 0x02 +} +CvNextEdgeType; + +/* get the next edge with the same origin point (counterwise) */ +#define CV_SUBDIV2D_NEXT_EDGE( edge ) (((CvQuadEdge2D*)((edge) & ~3))->next[(edge)&3]) + + +/* Contour approximation algorithms */ +enum +{ + CV_POLY_APPROX_DP = 0 +}; + +/* Shape matching methods */ +enum +{ + CV_CONTOURS_MATCH_I1 =1, + CV_CONTOURS_MATCH_I2 =2, + CV_CONTOURS_MATCH_I3 =3 +}; + +/* Shape orientation */ +enum +{ + CV_CLOCKWISE =1, + CV_COUNTER_CLOCKWISE =2 +}; + + +/* Convexity defect */ +typedef struct CvConvexityDefect +{ + CvPoint* start; /* point of the contour where the defect begins */ + CvPoint* end; /* point of the contour where the defect ends */ + CvPoint* depth_point; /* the farthest from the convex hull point within the defect */ + float depth; /* distance between the farthest point and the convex hull */ +} CvConvexityDefect; + + +/* Histogram comparison methods */ +enum +{ + CV_COMP_CORREL =0, + CV_COMP_CHISQR =1, + CV_COMP_INTERSECT =2, + CV_COMP_BHATTACHARYYA =3 +}; + +/* Mask size for distance transform */ +enum +{ + CV_DIST_MASK_3 =3, + CV_DIST_MASK_5 =5, + CV_DIST_MASK_PRECISE =0 +}; + +/* Distance types for Distance Transform and M-estimators */ +enum +{ + CV_DIST_USER =-1, /* User defined distance */ + CV_DIST_L1 =1, /* distance = |x1-x2| + |y1-y2| */ + CV_DIST_L2 =2, /* the simple euclidean distance */ + CV_DIST_C =3, /* distance = max(|x1-x2|,|y1-y2|) */ + CV_DIST_L12 =4, /* L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) */ + CV_DIST_FAIR =5, /* distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 */ + CV_DIST_WELSCH =6, /* distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 */ + CV_DIST_HUBER =7 /* distance = |x| threshold ? max_value : 0 */ + CV_THRESH_BINARY_INV =1, /* value = value > threshold ? 0 : max_value */ + CV_THRESH_TRUNC =2, /* value = value > threshold ? threshold : value */ + CV_THRESH_TOZERO =3, /* value = value > threshold ? value : 0 */ + CV_THRESH_TOZERO_INV =4, /* value = value > threshold ? 0 : value */ + CV_THRESH_MASK =7, + CV_THRESH_OTSU =8 /* use Otsu algorithm to choose the optimal threshold value; + combine the flag with one of the above CV_THRESH_* values */ +}; + +/* Adaptive threshold methods */ +enum +{ + CV_ADAPTIVE_THRESH_MEAN_C =0, + CV_ADAPTIVE_THRESH_GAUSSIAN_C =1 +}; + +/* FloodFill flags */ +enum +{ + CV_FLOODFILL_FIXED_RANGE =(1 << 16), + CV_FLOODFILL_MASK_ONLY =(1 << 17) +}; + + +/* Canny edge detector flags */ +enum +{ + CV_CANNY_L2_GRADIENT =(1 << 31) +}; + +/* Variants of a Hough transform */ +enum +{ + CV_HOUGH_STANDARD =0, + CV_HOUGH_PROBABILISTIC =1, + CV_HOUGH_MULTI_SCALE =2, + CV_HOUGH_GRADIENT =3 +}; + + +/* Fast search data structures */ +struct CvFeatureTree; +struct CvLSH; +struct CvLSHOperations; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/opencv/imgproc/precomp.cpp b/opencv/imgproc/precomp.cpp new file mode 100644 index 0000000..3e0ec42 --- /dev/null +++ b/opencv/imgproc/precomp.cpp @@ -0,0 +1,44 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +/* End of file. */ diff --git a/opencv/imgproc/precomp.hpp b/opencv/imgproc/precomp.hpp new file mode 100644 index 0000000..731addd --- /dev/null +++ b/opencv/imgproc/precomp.hpp @@ -0,0 +1,159 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_PRECOMP_H__ +#define __OPENCV_PRECOMP_H__ + +#if defined _MSC_VER && _MSC_VER >= 1200 + // disable warnings related to inline functions + #pragma warning( disable: 4251 4711 4710 4514 ) +#endif + +#ifdef HAVE_CVCONFIG_H +#include "cvconfig.h" +#endif + +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/core/internal.hpp" +#include +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_TEGRA_OPTIMIZATION +#include "opencv2/imgproc/imgproc_tegra.hpp" +#endif + +/* helper tables */ +extern const uchar icvSaturate8u_cv[]; +#define CV_FAST_CAST_8U(t) (assert(-256 <= (t) || (t) <= 512), icvSaturate8u_cv[(t)+256]) +#define CV_CALC_MIN_8U(a,b) (a) -= CV_FAST_CAST_8U((a) - (b)) +#define CV_CALC_MAX_8U(a,b) (a) += CV_FAST_CAST_8U((b) - (a)) + +// -256.f ... 511.f +extern const float icv8x32fTab_cv[]; +#define CV_8TO32F(x) icv8x32fTab_cv[(x)+256] + +// (-128.f)^2 ... (255.f)^2 +extern const float icv8x32fSqrTab[]; +#define CV_8TO32F_SQR(x) icv8x32fSqrTab[(x)+128] + +namespace cv +{ + +static inline Point normalizeAnchor( Point anchor, Size ksize ) +{ + if( anchor.x == -1 ) + anchor.x = ksize.width/2; + if( anchor.y == -1 ) + anchor.y = ksize.height/2; + CV_Assert( anchor.inside(Rect(0, 0, ksize.width, ksize.height)) ); + return anchor; +} + +void preprocess2DKernel( const Mat& kernel, vector& coords, vector& coeffs ); +void crossCorr( const Mat& src, const Mat& templ, Mat& dst, + Size corrsize, int ctype, + Point anchor=Point(0,0), double delta=0, + int borderType=BORDER_REFLECT_101 ); + +} + +typedef struct CvPyramid +{ + uchar **ptr; + CvSize *sz; + double *rate; + int *step; + uchar *state; + int level; +} +CvPyramid; + +#define CV_COPY( dst, src, len, idx ) \ + for( (idx) = 0; (idx) < (len); (idx)++) (dst)[idx] = (src)[idx] + +#define CV_SET( dst, val, len, idx ) \ + for( (idx) = 0; (idx) < (len); (idx)++) (dst)[idx] = (val) + +/* performs convolution of 2d floating-point array with 3x1, 1x3 or separable 3x3 mask */ +void icvSepConvSmall3_32f( float* src, int src_step, float* dst, int dst_step, + CvSize src_size, const float* kx, const float* ky, float* buffer ); + +#undef CV_CALC_MIN +#define CV_CALC_MIN(a, b) if((a) > (b)) (a) = (b) + +#undef CV_CALC_MAX +#define CV_CALC_MAX(a, b) if((a) < (b)) (a) = (b) + +CvStatus CV_STDCALL +icvCopyReplicateBorder_8u( const uchar* src, int srcstep, CvSize srcroi, + uchar* dst, int dststep, CvSize dstroi, + int left, int right, int cn, const uchar* value = 0 ); + +CvStatus CV_STDCALL icvGetRectSubPix_8u_C1R +( const uchar* src, int src_step, CvSize src_size, + uchar* dst, int dst_step, CvSize win_size, CvPoint2D32f center ); +CvStatus CV_STDCALL icvGetRectSubPix_8u32f_C1R +( const uchar* src, int src_step, CvSize src_size, + float* dst, int dst_step, CvSize win_size, CvPoint2D32f center ); +CvStatus CV_STDCALL icvGetRectSubPix_32f_C1R +( const float* src, int src_step, CvSize src_size, + float* dst, int dst_step, CvSize win_size, CvPoint2D32f center ); + +CvStatus CV_STDCALL icvGetQuadrangleSubPix_8u_C1R +( const uchar* src, int src_step, CvSize src_size, + uchar* dst, int dst_step, CvSize win_size, const float *matrix ); +CvStatus CV_STDCALL icvGetQuadrangleSubPix_8u32f_C1R +( const uchar* src, int src_step, CvSize src_size, + float* dst, int dst_step, CvSize win_size, const float *matrix ); +CvStatus CV_STDCALL icvGetQuadrangleSubPix_32f_C1R +( const float* src, int src_step, CvSize src_size, + float* dst, int dst_step, CvSize win_size, const float *matrix ); + +#include "_geom.h" + +#endif /*__OPENCV_CV_INTERNAL_H_*/ diff --git a/opencv/imgproc/pyramids.cpp b/opencv/imgproc/pyramids.cpp new file mode 100644 index 0000000..65123e6 --- /dev/null +++ b/opencv/imgproc/pyramids.cpp @@ -0,0 +1,569 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +namespace cv +{ + +template struct FixPtCast +{ + typedef int type1; + typedef T rtype; + rtype operator ()(type1 arg) const { return (T)((arg + (1 << (shift-1))) >> shift); } +}; + +template struct FltCast +{ + typedef T type1; + typedef T rtype; + rtype operator ()(type1 arg) const { return arg*(T)(1./(1 << shift)); } +}; + +template struct NoVec +{ + int operator()(T1**, T2*, int, int) const { return 0; } +}; + +#if CV_SSE2 + +struct PyrDownVec_32s8u +{ + int operator()(int** src, uchar* dst, int, int width) const + { + if( !checkHardwareSupport(CV_CPU_SSE2) ) + return 0; + + int x = 0; + const int *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4]; + __m128i delta = _mm_set1_epi16(128); + + for( ; x <= width - 16; x += 16 ) + { + __m128i r0, r1, r2, r3, r4, t0, t1; + r0 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row0 + x)), + _mm_load_si128((const __m128i*)(row0 + x + 4))); + r1 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row1 + x)), + _mm_load_si128((const __m128i*)(row1 + x + 4))); + r2 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row2 + x)), + _mm_load_si128((const __m128i*)(row2 + x + 4))); + r3 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row3 + x)), + _mm_load_si128((const __m128i*)(row3 + x + 4))); + r4 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row4 + x)), + _mm_load_si128((const __m128i*)(row4 + x + 4))); + r0 = _mm_add_epi16(r0, r4); + r1 = _mm_add_epi16(_mm_add_epi16(r1, r3), r2); + r0 = _mm_add_epi16(r0, _mm_add_epi16(r2, r2)); + t0 = _mm_add_epi16(r0, _mm_slli_epi16(r1, 2)); + r0 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row0 + x + 8)), + _mm_load_si128((const __m128i*)(row0 + x + 12))); + r1 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row1 + x + 8)), + _mm_load_si128((const __m128i*)(row1 + x + 12))); + r2 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row2 + x + 8)), + _mm_load_si128((const __m128i*)(row2 + x + 12))); + r3 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row3 + x + 8)), + _mm_load_si128((const __m128i*)(row3 + x + 12))); + r4 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row4 + x + 8)), + _mm_load_si128((const __m128i*)(row4 + x + 12))); + r0 = _mm_add_epi16(r0, r4); + r1 = _mm_add_epi16(_mm_add_epi16(r1, r3), r2); + r0 = _mm_add_epi16(r0, _mm_add_epi16(r2, r2)); + t1 = _mm_add_epi16(r0, _mm_slli_epi16(r1, 2)); + t0 = _mm_srli_epi16(_mm_add_epi16(t0, delta), 8); + t1 = _mm_srli_epi16(_mm_add_epi16(t1, delta), 8); + _mm_storeu_si128((__m128i*)(dst + x), _mm_packus_epi16(t0, t1)); + } + + for( ; x <= width - 4; x += 4 ) + { + __m128i r0, r1, r2, r3, r4, z = _mm_setzero_si128(); + r0 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row0 + x)), z); + r1 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row1 + x)), z); + r2 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row2 + x)), z); + r3 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row3 + x)), z); + r4 = _mm_packs_epi32(_mm_load_si128((const __m128i*)(row4 + x)), z); + r0 = _mm_add_epi16(r0, r4); + r1 = _mm_add_epi16(_mm_add_epi16(r1, r3), r2); + r0 = _mm_add_epi16(r0, _mm_add_epi16(r2, r2)); + r0 = _mm_add_epi16(r0, _mm_slli_epi16(r1, 2)); + r0 = _mm_srli_epi16(_mm_add_epi16(r0, delta), 8); + *(int*)(dst + x) = _mm_cvtsi128_si32(_mm_packus_epi16(r0, r0)); + } + + return x; + } +}; + +struct PyrDownVec_32f +{ + int operator()(float** src, float* dst, int, int width) const + { + if( !checkHardwareSupport(CV_CPU_SSE) ) + return 0; + + int x = 0; + const float *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4]; + __m128 _4 = _mm_set1_ps(4.f), _scale = _mm_set1_ps(1.f/256); + for( ; x <= width - 8; x += 8 ) + { + __m128 r0, r1, r2, r3, r4, t0, t1; + r0 = _mm_load_ps(row0 + x); + r1 = _mm_load_ps(row1 + x); + r2 = _mm_load_ps(row2 + x); + r3 = _mm_load_ps(row3 + x); + r4 = _mm_load_ps(row4 + x); + r0 = _mm_add_ps(r0, r4); + r1 = _mm_add_ps(_mm_add_ps(r1, r3), r2); + r0 = _mm_add_ps(r0, _mm_add_ps(r2, r2)); + t0 = _mm_add_ps(r0, _mm_mul_ps(r1, _4)); + + r0 = _mm_load_ps(row0 + x + 4); + r1 = _mm_load_ps(row1 + x + 4); + r2 = _mm_load_ps(row2 + x + 4); + r3 = _mm_load_ps(row3 + x + 4); + r4 = _mm_load_ps(row4 + x + 4); + r0 = _mm_add_ps(r0, r4); + r1 = _mm_add_ps(_mm_add_ps(r1, r3), r2); + r0 = _mm_add_ps(r0, _mm_add_ps(r2, r2)); + t1 = _mm_add_ps(r0, _mm_mul_ps(r1, _4)); + + t0 = _mm_mul_ps(t0, _scale); + t1 = _mm_mul_ps(t1, _scale); + + _mm_storeu_ps(dst + x, t0); + _mm_storeu_ps(dst + x + 4, t1); + } + + return x; + } +}; + +#else + +typedef NoVec PyrDownVec_32s8u; +typedef NoVec PyrDownVec_32f; + +#endif + +template void +pyrDown_( const Mat& _src, Mat& _dst ) +{ + const int PD_SZ = 5; + typedef typename CastOp::type1 WT; + typedef typename CastOp::rtype T; + + Size ssize = _src.size(), dsize = _dst.size(); + int cn = _src.channels(); + int bufstep = (int)alignSize(dsize.width*cn, 16); + AutoBuffer _buf(bufstep*PD_SZ + 16); + WT* buf = alignPtr((WT*)_buf, 16); + int tabL[CV_CN_MAX*(PD_SZ+2)], tabR[CV_CN_MAX*(PD_SZ+2)]; + AutoBuffer _tabM(dsize.width*cn); + int* tabM = _tabM; + WT* rows[PD_SZ]; + CastOp castOp; + VecOp vecOp; + + CV_Assert( std::abs((long int)(dsize.width*2 - ssize.width)) <= 2 && + std::abs((long int)(dsize.height*2 - ssize.height)) <= 2 ); + int k, x, sy0 = -PD_SZ/2, sy = sy0, width0 = std::min((ssize.width-PD_SZ/2-1)/2 + 1, dsize.width); + + for( x = 0; x <= PD_SZ+1; x++ ) + { + int sx0 = borderInterpolate(x - PD_SZ/2, ssize.width, BORDER_REFLECT_101)*cn; + int sx1 = borderInterpolate(x + width0*2 - PD_SZ/2, ssize.width, BORDER_REFLECT_101)*cn; + for( k = 0; k < cn; k++ ) + { + tabL[x*cn + k] = sx0 + k; + tabR[x*cn + k] = sx1 + k; + } + } + + ssize.width *= cn; + dsize.width *= cn; + width0 *= cn; + + for( x = 0; x < dsize.width; x++ ) + tabM[x] = (x/cn)*2*cn + x % cn; + + for( int y = 0; y < dsize.height; y++ ) + { + T* dst = (T*)(_dst.data + _dst.step*y); + WT *row0, *row1, *row2, *row3, *row4; + + // fill the ring buffer (horizontal convolution and decimation) + for( ; sy <= y*2 + 2; sy++ ) + { + WT* row = buf + ((sy - sy0) % PD_SZ)*bufstep; + int _sy = borderInterpolate(sy, ssize.height, BORDER_REFLECT_101); + const T* src = (const T*)(_src.data + _src.step*_sy); + int limit = cn; + const int* tab = tabL; + + for( x = 0;;) + { + for( ; x < limit; x++ ) + { + row[x] = src[tab[x+cn*2]]*6 + (src[tab[x+cn]] + src[tab[x+cn*3]])*4 + + src[tab[x]] + src[tab[x+cn*4]]; + } + + if( x == dsize.width ) + break; + + if( cn == 1 ) + { + for( ; x < width0; x++ ) + row[x] = src[x*2]*6 + (src[x*2 - 1] + src[x*2 + 1])*4 + + src[x*2 - 2] + src[x*2 + 2]; + } + else if( cn == 3 ) + { + for( ; x < width0; x += 3 ) + { + const T* s = src + x*2; + WT t0 = s[0]*6 + (s[-3] + s[3])*4 + s[-6] + s[6]; + WT t1 = s[1]*6 + (s[-2] + s[4])*4 + s[-5] + s[7]; + WT t2 = s[2]*6 + (s[-1] + s[5])*4 + s[-4] + s[8]; + row[x] = t0; row[x+1] = t1; row[x+2] = t2; + } + } + else if( cn == 4 ) + { + for( ; x < width0; x += 4 ) + { + const T* s = src + x*2; + WT t0 = s[0]*6 + (s[-4] + s[4])*4 + s[-8] + s[8]; + WT t1 = s[1]*6 + (s[-3] + s[5])*4 + s[-7] + s[9]; + row[x] = t0; row[x+1] = t1; + t0 = s[2]*6 + (s[-2] + s[6])*4 + s[-6] + s[10]; + t1 = s[3]*6 + (s[-1] + s[7])*4 + s[-5] + s[11]; + row[x+2] = t0; row[x+3] = t1; + } + } + else + { + for( ; x < width0; x++ ) + { + int sx = tabM[x]; + row[x] = src[sx]*6 + (src[sx - cn] + src[sx + cn])*4 + + src[sx - cn*2] + src[sx + cn*2]; + } + } + + limit = dsize.width; + tab = tabR - x; + } + } + + // do vertical convolution and decimation and write the result to the destination image + for( k = 0; k < PD_SZ; k++ ) + rows[k] = buf + ((y*2 - PD_SZ/2 + k - sy0) % PD_SZ)*bufstep; + row0 = rows[0]; row1 = rows[1]; row2 = rows[2]; row3 = rows[3]; row4 = rows[4]; + + x = vecOp(rows, dst, (int)_dst.step, dsize.width); + for( ; x < dsize.width; x++ ) + dst[x] = castOp(row2[x]*6 + (row1[x] + row3[x])*4 + row0[x] + row4[x]); + } +} + + +template void +pyrUp_( const Mat& _src, Mat& _dst ) +{ + const int PU_SZ = 3; + typedef typename CastOp::type1 WT; + typedef typename CastOp::rtype T; + + Size ssize = _src.size(), dsize = _dst.size(); + int cn = _src.channels(); + int bufstep = (int)alignSize((dsize.width+1)*cn, 16); + AutoBuffer _buf(bufstep*PU_SZ + 16); + WT* buf = alignPtr((WT*)_buf, 16); + AutoBuffer _dtab(ssize.width*cn); + int* dtab = _dtab; + WT* rows[PU_SZ]; + CastOp castOp; + VecOp vecOp; + + CV_Assert( std::abs((long int)(dsize.width - ssize.width*2)) == dsize.width % 2 && + std::abs((long int)(dsize.height - ssize.height*2)) == dsize.height % 2); + int k, x, sy0 = -PU_SZ/2, sy = sy0, width0 = ssize.width - 1; + + ssize.width *= cn; + dsize.width *= cn; + width0 *= cn; + + for( x = 0; x < ssize.width; x++ ) + dtab[x] = (x/cn)*2*cn + x % cn; + + for( int y = 0; y < ssize.height; y++ ) + { + T* dst0 = (T*)(_dst.data + _dst.step*y*2); + T* dst1 = (T*)(_dst.data + _dst.step*(y*2+1)); + WT *row0, *row1, *row2; + + if( y*2+1 >= dsize.height ) + dst1 = dst0; + + // fill the ring buffer (horizontal convolution and decimation) + for( ; sy <= y + 1; sy++ ) + { + WT* row = buf + ((sy - sy0) % PU_SZ)*bufstep; + int _sy = borderInterpolate(sy, ssize.height, BORDER_REFLECT_101); + const T* src = (const T*)(_src.data + _src.step*_sy); + + if( ssize.width == cn ) + { + for( x = 0; x < cn; x++ ) + row[x] = row[x + cn] = src[x]*8; + continue; + } + + for( x = 0; x < cn; x++ ) + { + int dx = dtab[x]; + WT t0 = src[x]*6 + src[x + cn]*2; + WT t1 = (src[x] + src[x + cn])*4; + row[dx] = t0; row[dx + cn] = t1; + dx = dtab[ssize.width - cn + x]; + int sx = ssize.width - cn + x; + t0 = src[sx - cn] + src[sx]*7; + t1 = src[sx]*8; + row[dx] = t0; row[dx + cn] = t1; + } + + for( x = cn; x < ssize.width - cn; x++ ) + { + int dx = dtab[x]; + WT t0 = src[x-cn] + src[x]*6 + src[x+cn]; + WT t1 = (src[x] + src[x+cn])*4; + row[dx] = t0; + row[dx+cn] = t1; + } + } + + // do vertical convolution and decimation and write the result to the destination image + for( k = 0; k < PU_SZ; k++ ) + rows[k] = buf + ((y - PU_SZ/2 + k - sy0) % PU_SZ)*bufstep; + row0 = rows[0]; row1 = rows[1]; row2 = rows[2]; + + x = vecOp(rows, dst0, (int)_dst.step, dsize.width); + for( ; x < dsize.width; x++ ) + { + T t1 = castOp((row1[x] + row2[x])*4); + T t0 = castOp(row0[x] + row1[x]*6 + row2[x]); + dst1[x] = t1; dst0[x] = t0; + } + } +} + +typedef void (*PyrFunc)(const Mat&, Mat&); + +} + +void cv::pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz ) +{ + Mat src = _src.getMat(); + Size dsz = _dsz == Size() ? Size((src.cols + 1)/2, (src.rows + 1)/2) : _dsz; + _dst.create( dsz, src.type() ); + Mat dst = _dst.getMat(); + int depth = src.depth(); + PyrFunc func = 0; + if( depth == CV_8U ) + func = pyrDown_, PyrDownVec_32s8u>; + else if( depth == CV_16S ) + func = pyrDown_, NoVec >; + else if( depth == CV_16U ) + func = pyrDown_, NoVec >; + else if( depth == CV_32F ) + func = pyrDown_, PyrDownVec_32f>; + else if( depth == CV_64F ) + func = pyrDown_, NoVec >; + else + CV_Error( CV_StsUnsupportedFormat, "" ); + + func( src, dst ); +} + +void cv::pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz ) +{ + Mat src = _src.getMat(); + Size dsz = _dsz == Size() ? Size(src.cols*2, src.rows*2) : _dsz; + _dst.create( dsz, src.type() ); + Mat dst = _dst.getMat(); + int depth = src.depth(); + PyrFunc func = 0; + if( depth == CV_8U ) + func = pyrUp_, NoVec >; + else if( depth == CV_16S ) + func = pyrUp_, NoVec >; + else if( depth == CV_16U ) + func = pyrUp_, NoVec >; + else if( depth == CV_32F ) + func = pyrUp_, NoVec >; + else if( depth == CV_64F ) + func = pyrUp_, NoVec >; + else + CV_Error( CV_StsUnsupportedFormat, "" ); + + func( src, dst ); +} + +void cv::buildPyramid( InputArray _src, OutputArrayOfArrays _dst, int maxlevel ) +{ + Mat src = _src.getMat(); + _dst.create( maxlevel + 1, 1, 0 ); + _dst.getMatRef(0) = src; + for( int i = 1; i <= maxlevel; i++ ) + pyrDown( _dst.getMatRef(i-1), _dst.getMatRef(i) ); +} + +CV_IMPL void cvPyrDown( const void* srcarr, void* dstarr, int _filter ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + + CV_Assert( _filter == CV_GAUSSIAN_5x5 && src.type() == dst.type()); + cv::pyrDown( src, dst, dst.size() ); +} + +CV_IMPL void cvPyrUp( const void* srcarr, void* dstarr, int _filter ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + + CV_Assert( _filter == CV_GAUSSIAN_5x5 && src.type() == dst.type()); + cv::pyrUp( src, dst, dst.size() ); +} + + +CV_IMPL void +cvReleasePyramid( CvMat*** _pyramid, int extra_layers ) +{ + if( !_pyramid ) + CV_Error( CV_StsNullPtr, "" ); + + if( *_pyramid ) + for( int i = 0; i <= extra_layers; i++ ) + cvReleaseMat( &(*_pyramid)[i] ); + + cvFree( _pyramid ); +} + + +CV_IMPL CvMat** +cvCreatePyramid( const CvArr* srcarr, int extra_layers, double rate, + const CvSize* layer_sizes, CvArr* bufarr, + int calc, int filter ) +{ + const float eps = 0.1f; + uchar* ptr = 0; + + CvMat stub, *src = cvGetMat( srcarr, &stub ); + + if( extra_layers < 0 ) + CV_Error( CV_StsOutOfRange, "The number of extra layers must be non negative" ); + + int i, layer_step, elem_size = CV_ELEM_SIZE(src->type); + CvSize layer_size, size = cvGetMatSize(src); + + if( bufarr ) + { + CvMat bstub, *buf; + int bufsize = 0; + + buf = cvGetMat( bufarr, &bstub ); + bufsize = buf->rows*buf->cols*CV_ELEM_SIZE(buf->type); + layer_size = size; + for( i = 1; i <= extra_layers; i++ ) + { + if( !layer_sizes ) + { + layer_size.width = cvRound(layer_size.width*rate+eps); + layer_size.height = cvRound(layer_size.height*rate+eps); + } + else + layer_size = layer_sizes[i-1]; + layer_step = layer_size.width*elem_size; + bufsize -= layer_step*layer_size.height; + } + + if( bufsize < 0 ) + CV_Error( CV_StsOutOfRange, "The buffer is too small to fit the pyramid" ); + ptr = buf->data.ptr; + } + + CvMat** pyramid = (CvMat**)cvAlloc( (extra_layers+1)*sizeof(pyramid[0]) ); + memset( pyramid, 0, (extra_layers+1)*sizeof(pyramid[0]) ); + + pyramid[0] = cvCreateMatHeader( size.height, size.width, src->type ); + cvSetData( pyramid[0], src->data.ptr, src->step ); + layer_size = size; + + for( i = 1; i <= extra_layers; i++ ) + { + if( !layer_sizes ) + { + layer_size.width = cvRound(layer_size.width*rate + eps); + layer_size.height = cvRound(layer_size.height*rate + eps); + } + else + layer_size = layer_sizes[i]; + + if( bufarr ) + { + pyramid[i] = cvCreateMatHeader( layer_size.height, layer_size.width, src->type ); + layer_step = layer_size.width*elem_size; + cvSetData( pyramid[i], ptr, layer_step ); + ptr += layer_step*layer_size.height; + } + else + pyramid[i] = cvCreateMat( layer_size.height, layer_size.width, src->type ); + + if( calc ) + cvPyrDown( pyramid[i-1], pyramid[i], filter ); + //cvResize( pyramid[i-1], pyramid[i], CV_INTER_LINEAR ); + } + + return pyramid; +} + +/* End of file. */ diff --git a/opencv/imgproc/pyrsegmentation.cpp b/opencv/imgproc/pyrsegmentation.cpp new file mode 100644 index 0000000..4441d56 --- /dev/null +++ b/opencv/imgproc/pyrsegmentation.cpp @@ -0,0 +1,1880 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" + +typedef struct _CvRGBf +{ float blue; + float green; + float red; +} +_CvRGBf; + +typedef struct _CvRect16u +{ + ushort x1, y1, x2, y2; +} +_CvRect16u; + +typedef struct _CvPyramid +{ + float c; + struct _CvPyramid *p; + int a; + _CvRect16u rect; /* ROI for the connected component */ +} _CvPyramid; + +/* element of base layer */ +typedef struct _CvPyramidBase +{ + float c; + struct _CvPyramid *p; +} +_CvPyramidBase; + +typedef struct _CvPyramidC3 +{ + _CvRGBf c; + struct _CvPyramidC3 *p; + int a; + _CvRect16u rect; /* ROI for the connected component */ +} _CvPyramidC3; + +/* element of base layer */ +typedef struct _CvPyramidBaseC3 +{ + _CvRGBf c; + struct _CvPyramidC3 *p; +} +_CvPyramidBaseC3; + +typedef struct _CvListNode +{ + struct _CvListNode* next; + void* data; +} +_CvListNode; + + +static CvStatus icvSegmentClusterC1( CvSeq* cmp_seq, CvSeq* res_seq, + double threshold, + _CvPyramid* first_level_end, + CvSize first_level_size ); + +static CvStatus icvSegmentClusterC3( CvSeq* cmp_seq, CvSeq* res_seq, + double threshold, + _CvPyramidC3* first_level_end, + CvSize first_level_size ); + +typedef void (CV_CDECL * CvWriteNodeFunction)(void* seq,void* node); + +static CvStatus icvUpdatePyrLinks_8u_C1 + (int layer, void *layer_data, CvSize size, void *parent_layer, + void *_writer, float threshold, int is_last_iter, void *_stub, CvWriteNodeFunction /*func*/); + +static CvStatus icvUpdatePyrLinks_8u_C3 + (int layer, void *layer_data, CvSize size, void *parent_layer, + void *_writer, float threshold, int is_last_iter, void *_stub, CvWriteNodeFunction /*func*/); + +static void icvMaxRoi( _CvRect16u *max_rect, _CvRect16u* cur_rect ); +static void icvMaxRoi1( _CvRect16u *max_rect, int x, int y ); + + +#define _CV_CHECK( icvFun ) \ + { \ + if( icvFun != CV_OK ) \ + goto M_END; \ + } + + +#define _CV_MAX3( a, b, c) ((a)>(b) ? ((a)>(c) ? (a) : (c)) : ((b)>(c) ? (b) : (c))) + +/*#define _CV_RGB_DIST(a, b) _CV_MAX3((float)fabs((a).red - (b).red), \ + (float)fabs((a).green - (b).green), \ + (float)fabs((a).blue - (b).blue))*/ + +#define _CV_NEXT_BASE_C1(p,n) (_CvPyramid*)((char*)(p) + (n)*sizeof(_CvPyramidBase)) +#define _CV_NEXT_BASE_C3(p,n) (_CvPyramidC3*)((char*)(p) + (n)*sizeof(_CvPyramidBaseC3)) + + +CV_INLINE float icvRGBDist_Max( const _CvRGBf& a, const _CvRGBf& b ) +{ + float tr = (float)fabs(a.red - b.red); + float tg = (float)fabs(a.green - b.green); + float tb = (float)fabs(a.blue - b.blue); + + return _CV_MAX3( tr, tg, tb ); +} + +CV_INLINE float icvRGBDist_Sum( const _CvRGBf& a, const _CvRGBf& b ) +{ + float tr = (float)fabs(a.red - b.red); + float tg = (float)fabs(a.green - b.green); + float tb = (float)fabs(a.blue - b.blue); + + return (tr + tg + tb); +} + +#if 1 +#define _CV_RGB_DIST icvRGBDist_Max +#define _CV_RGB_THRESH_SCALE 1 +#else +#define _CV_RGB_DIST icvRGBDist_Sum +#define _CV_RGB_THRESH_SCALE 3 +#endif + +#define _CV_INV_TAB_SIZE 32 + +static const float icvInvTab[ /*_CV_INV_TAB_SIZE*/ ] = +{ + 1.00000000f, 0.50000000f, 0.33333333f, 0.25000000f, 0.20000000f, 0.16666667f, + 0.14285714f, 0.12500000f, 0.11111111f, 0.10000000f, 0.09090909f, 0.08333333f, + 0.07692308f, 0.07142857f, 0.06666667f, 0.06250000f, 0.05882353f, 0.05555556f, + 0.05263158f, 0.05000000f, 0.04761905f, 0.04545455f, 0.04347826f, 0.04166667f, + 0.04000000f, 0.03846154f, 0.03703704f, 0.03571429f, 0.03448276f, 0.03333333f, + 0.03225806f, 0.03125000f +}; + +static void +icvWritePyrNode( void *elem, void *writer ) +{ + CV_WRITE_SEQ_ELEM( *(_CvListNode *) elem, *(CvSeqWriter *) writer ); +} + + +static CvStatus +icvPyrSegmentation8uC1R( uchar * src_image, int src_step, + uchar * dst_image, int dst_step, + CvSize roi, int filter, + CvSeq ** dst_comp, CvMemStorage * storage, + int level, int threshold1, int threshold2 ) +{ + int i, j, l; + int step; + const int max_iter = 3; /* maximum number of iterations */ + int cur_iter = 0; /* current iteration */ + + _CvPyramid *pyram[16]; /* pointers to the pyramid down up to level */ + + float *pyramida = 0; + _CvPyramid stub; + + _CvPyramid *p_cur; + _CvPyramidBase *p_base; + _CvListNode cmp_node; + + CvSeq *cmp_seq = 0; + CvSeq *res_seq = 0; + CvMemStorage *temp_storage = 0; + CvSize size; + CvStatus status; + CvSeqWriter writer; + + int buffer_size; + char *buffer = 0; + + status = CV_OK; + + /* clear pointer to resultant sequence */ + if( dst_comp ) + *dst_comp = 0; + + /* check args */ + if( !src_image || !dst_image || !storage || !dst_comp ) + return CV_NULLPTR_ERR; + if( roi.width <= 0 || roi.height <= 0 || src_step < roi.width || dst_step < roi.width ) + return CV_BADSIZE_ERR; + if( filter != CV_GAUSSIAN_5x5 ) + return CV_BADRANGE_ERR; + if( threshold1 < 0 || threshold2 < 0 ) + return CV_BADRANGE_ERR; + if( level <= 0 ) + return CV_BADRANGE_ERR; + + if( ((roi.width | roi.height) & ((1 << level) - 1)) != 0 ) + return CV_BADCOEF_ERR; + + temp_storage = cvCreateChildMemStorage( storage ); + + /* sequence for temporary components */ + cmp_seq = cvCreateSeq( 0, sizeof( CvSeq ), sizeof( _CvListNode ), temp_storage ); + assert( cmp_seq != 0 ); + + res_seq = cvCreateSeq( CV_SEQ_CONNECTED_COMP, sizeof( CvSeq ), + sizeof( CvConnectedComp ), storage ); + assert( res_seq != 0 ); + + /* calculate buffer size */ + buffer_size = roi.width * roi.height * (sizeof( float ) + sizeof( _CvPyramidBase )); + + for( l = 1; l <= level; l++ ) + buffer_size += ((roi.width >> l) + 1) * ((roi.height >> l) + 1) * sizeof(_CvPyramid); + + /* allocate buffer */ + buffer = (char *) cvAlloc( buffer_size ); + if( !buffer ) + { + status = CV_OUTOFMEM_ERR; + goto M_END; + } + + pyramida = (float *) buffer; + + /* initialization pyramid-linking properties down up to level */ + step = roi.width * sizeof( float ); + + { + CvMat _src; + CvMat _pyramida; + cvInitMatHeader( &_src, roi.height, roi.width, CV_8UC1, src_image, src_step ); + cvInitMatHeader( &_pyramida, roi.height, roi.width, CV_32FC1, pyramida, step ); + cvConvert( &_src, &_pyramida ); + /*_CV_CHECK( icvCvtTo_32f_C1R( src_image, src_step, pyramida, step, roi, CV_8UC1 ));*/ + } + p_base = (_CvPyramidBase *) (buffer + step * roi.height); + pyram[0] = (_CvPyramid *) p_base; + + /* fill base level of pyramid */ + for( i = 0; i < roi.height; i++ ) + { + for( j = 0; j < roi.width; j++, p_base++ ) + { + p_base->c = pyramida[i * roi.width + j]; + p_base->p = &stub; + } + } + + p_cur = (_CvPyramid *) p_base; + size = roi; + + /* calculate initial pyramid */ + for( l = 1; l <= level; l++ ) + { + CvSize dst_size = { size.width/2+1, size.height/2+1 }; + CvMat prev_level = cvMat( size.height, size.width, CV_32FC1 ); + CvMat next_level = cvMat( dst_size.height, dst_size.width, CV_32FC1 ); + + cvSetData( &prev_level, pyramida, step ); + cvSetData( &next_level, pyramida, step ); + cvPyrDown( &prev_level, &next_level ); + + //_CV_CHECK( icvPyrDown_Gauss5x5_32f_C1R( pyramida, step, pyramida, step, size, buff )); + //_CV_CHECK( icvPyrDownBorder_32f_CnR( pyramida, step, size, pyramida, step, dst_size, 1 )); + pyram[l] = p_cur; + + size.width = dst_size.width - 1; + size.height = dst_size.height - 1; + + /* fill layer #l */ + for( i = 0; i <= size.height; i++ ) + { + for( j = 0; j <= size.width; j++, p_cur++ ) + { + p_cur->c = pyramida[i * roi.width + j]; + p_cur->p = &stub; + p_cur->a = 0; + p_cur->rect.x2 = 0; + } + } + } + + cvStartAppendToSeq( cmp_seq, &writer ); + + /* do several iterations to determine son-father links */ + for( cur_iter = 0; cur_iter < max_iter; cur_iter++ ) + { + int is_last_iter = cur_iter == max_iter - 1; + + size = roi; + + /* build son-father links down up to level */ + for( l = 0; l < level; l++ ) + { + icvUpdatePyrLinks_8u_C1( l, pyram[l], size, pyram[l + 1], &writer, + (float) threshold1, is_last_iter, &stub, + icvWritePyrNode ); + + /* clear last border row */ + if( l > 0 ) + { + p_cur = pyram[l] + (size.width + 1) * size.height; + for( j = 0; j <= size.width; j++ ) + p_cur[j].c = 0; + } + + size.width >>= 1; + size.height >>= 1; + } + +/* clear the old c value for the last level */ + p_cur = pyram[level]; + for( i = 0; i <= size.height; i++, p_cur += size.width + 1 ) + for( j = 0; j <= size.width; j++ ) + p_cur[j].c = 0; + + size = roi; + step = roi.width; + +/* calculate average c value for the 0 < l <=level */ + for( l = 0; l < level; l++, step = (step >> 1) + 1 ) + { + _CvPyramid *p_prev, *p_row_prev; + + stub.c = 0; + + /* calculate average c value for the next level */ + if( l == 0 ) + { + p_base = (_CvPyramidBase *) pyram[0]; + for( i = 0; i < roi.height; i++, p_base += size.width ) + { + for( j = 0; j < size.width; j += 2 ) + { + _CvPyramid *p1 = p_base[j].p; + _CvPyramid *p2 = p_base[j + 1].p; + + p1->c += p_base[j].c; + p2->c += p_base[j + 1].c; + } + } + } + else + { + p_cur = pyram[l]; + for( i = 0; i < size.height; i++, p_cur += size.width + 1 ) + { + for( j = 0; j < size.width; j += 2 ) + { + _CvPyramid *p1 = p_cur[j].p; + _CvPyramid *p2 = p_cur[j + 1].p; + + float t0 = (float) p_cur[j].a * p_cur[j].c; + float t1 = (float) p_cur[j + 1].a * p_cur[j + 1].c; + + p1->c += t0; + p2->c += t1; + + if( !is_last_iter ) + p_cur[j].a = p_cur[j + 1].a = 0; + } + if( !is_last_iter ) + p_cur[size.width].a = 0; + } + if( !is_last_iter ) + { + for( j = 0; j <= size.width; j++ ) + { + p_cur[j].a = 0; + } + } + } + + /* assign random values of the next level null c */ + p_cur = pyram[l + 1]; + p_row_prev = p_prev = pyram[l]; + + size.width >>= 1; + size.height >>= 1; + + for( i = 0; i <= size.height; i++, p_cur += size.width + 1 ) + { + if( i < size.height || !is_last_iter ) + { + for( j = 0; j < size.width; j++ ) + { + int a = p_cur[j].a; + + if( a != 0 ) + { + if( a <= _CV_INV_TAB_SIZE ) + { + p_cur[j].c *= icvInvTab[a - 1]; + } + else + { + p_cur[j].c /= a; + } + } + else + { + p_cur[j].c = p_prev->c; + } + + if( l == 0 ) + p_prev = _CV_NEXT_BASE_C1(p_prev,2); + else + p_prev += 2; + } + + if( p_cur[size.width].a == 0 ) + { + p_cur[size.width].c = p_prev[(l != 0) - 1].c; + } + else + { + p_cur[size.width].c /= p_cur[size.width].a; + if( is_last_iter ) + { + cmp_node.data = p_cur + size.width; + CV_WRITE_SEQ_ELEM( cmp_node, writer ); + } + } + } + else + { + for( j = 0; j <= size.width; j++ ) + { + int a = p_cur[j].a; + + if( a != 0 ) + { + if( a <= _CV_INV_TAB_SIZE ) + { + p_cur[j].c *= icvInvTab[a - 1]; + } + else + { + p_cur[j].c /= a; + } + + cmp_node.data = p_cur + j; + CV_WRITE_SEQ_ELEM( cmp_node, writer ); + } + else + { + p_cur[j].c = p_prev->c; + } + + if( l == 0 ) + { + p_prev = _CV_NEXT_BASE_C1(p_prev, (j * 2 < step - 2 ? 2 : 1)); + } + else + { + p_prev++; + } + } + } + + if( l + 1 == level && !is_last_iter ) + for( j = 0; j <= size.width; j++ ) + p_cur[j].a = 0; + + if( !(i & 1) ) + { + p_prev = p_row_prev; + } + else + { + p_prev = (_CvPyramid*)((char*)p_row_prev + step * + (l == 0 ? sizeof(_CvPyramidBase) : sizeof(_CvPyramid))); + } + } + } + } /* end of the iteration process */ + + /* construct a connected components */ + size.width = roi.width >> level; + size.height = roi.height >> level; + + p_cur = pyram[level]; + + for( i = 0; i < size.height; i++, p_cur += size.width + 1 ) + { + for( j = 0; j < size.width; j++ ) + { + if( p_cur[j].a != 0 ) + { + cmp_node.data = p_cur + j; + CV_WRITE_SEQ_ELEM( cmp_node, writer ); + } + } + } + + cvEndWriteSeq( &writer ); + +/* clusterization segmented components and construction + output connected components */ + icvSegmentClusterC1( cmp_seq, res_seq, threshold2, pyram[1], roi ); + +/* convert (inplace) resultant segment values to int (top level) */ + +/* propagate segment values top down */ + for( l = level - 1; l >= 0; l-- ) + { + p_cur = pyram[l]; + + size.width <<= 1; + size.height <<= 1; + + if( l == 0 ) + { + size.width--; + size.height--; + } + + for( i = 0; i <= size.height; i++ ) + { + for( j = 0; j <= size.width; j++ ) + { + _CvPyramid *p = p_cur->p; + + assert( p != 0 ); + if( p != &stub ) + p_cur->c = p->c; + + if( l == 0 ) + { + Cv32suf _c; + /* copy the segmented values to destination image */ + _c.f = p_cur->c; dst_image[j] = (uchar)_c.i; + p_cur = _CV_NEXT_BASE_C1(p_cur, 1); + } + else + { + p_cur++; + } + } + if( l == 0 ) + dst_image += dst_step; + } + } + M_END: + + cvFree( &buffer ); + cvReleaseMemStorage( &temp_storage ); + + if( status == CV_OK ) + *dst_comp = res_seq; + + return status; +} + + + +/****************************************************************************************\ + color!!! image segmentation by pyramid-linking +\****************************************************************************************/ +static CvStatus +icvPyrSegmentation8uC3R( uchar * src_image, int src_step, + uchar * dst_image, int dst_step, + CvSize roi, int filter, + CvSeq ** dst_comp, CvMemStorage * storage, + int level, int threshold1, int threshold2 ) +{ + int i, j, l; + + int step; + const int max_iter = 3; /* maximum number of iterations */ + int cur_iter = 0; /* current iteration */ + + _CvPyramidC3 *pyram[16]; /* pointers to the pyramid down up to level */ + + float *pyramida = 0; + _CvPyramidC3 stub; + + _CvPyramidC3 *p_cur; + _CvPyramidBaseC3 *p_base; + _CvListNode cmp_node; + + CvSeq *cmp_seq = 0; + CvSeq *res_seq = 0; + CvMemStorage *temp_storage = 0; + CvSize size; + CvStatus status; + CvSeqWriter writer; + + int buffer_size; + char *buffer = 0; + + status = CV_OK; + + threshold1 *= _CV_RGB_THRESH_SCALE; + threshold2 *= _CV_RGB_THRESH_SCALE; + + /* clear pointer to resultant sequence */ + if( dst_comp ) + *dst_comp = 0; + + /* check args */ + if( !src_image || !dst_image || !storage || !dst_comp ) + return CV_NULLPTR_ERR; + if( roi.width <= 0 || roi.height <= 0 || + src_step < roi.width * 3 || dst_step < roi.width * 3 ) return CV_BADSIZE_ERR; + if( filter != CV_GAUSSIAN_5x5 ) + return CV_BADRANGE_ERR; + if( threshold1 < 0 || threshold2 < 0 ) + return CV_BADRANGE_ERR; + if( level <= 0 ) + return CV_BADRANGE_ERR; + + if( ((roi.width | roi.height) & ((1 << level) - 1)) != 0 ) + return CV_BADCOEF_ERR; + + temp_storage = cvCreateChildMemStorage( storage ); + + /* sequence for temporary components */ + cmp_seq = cvCreateSeq( 0, sizeof( CvSeq ), sizeof( _CvListNode ), temp_storage ); + assert( cmp_seq != 0 ); + + res_seq = cvCreateSeq( CV_SEQ_CONNECTED_COMP, sizeof( CvSeq ), + sizeof( CvConnectedComp ), storage ); + assert( res_seq != 0 ); + + /* calculate buffer size */ + buffer_size = roi.width * roi.height * (sizeof( _CvRGBf ) + sizeof( _CvPyramidBaseC3 )); + + for( l = 1; l <= level; l++ ) + buffer_size += ((roi.width >> l) + 1) * ((roi.height >> l) + 1) * sizeof(_CvPyramidC3); + + /* allocate buffer */ + buffer = (char *) cvAlloc( buffer_size ); + if( !buffer ) + { + status = CV_OUTOFMEM_ERR; + goto M_END; + } + + pyramida = (float *) buffer; + + /* initialization pyramid-linking properties down up to level */ + step = roi.width * sizeof( _CvRGBf ); + + { + CvMat _src; + CvMat _pyramida; + cvInitMatHeader( &_src, roi.height, roi.width, CV_8UC3, src_image, src_step ); + cvInitMatHeader( &_pyramida, roi.height, roi.width, CV_32FC3, pyramida, step ); + cvConvert( &_src, &_pyramida ); + /*_CV_CHECK( icvCvtTo_32f_C1R( src_image, src_step, pyramida, step, + cvSize( roi.width * 3, roi.height ), CV_8UC1 ));*/ + } + + p_base = (_CvPyramidBaseC3 *) (buffer + step * roi.height); + pyram[0] = (_CvPyramidC3 *) p_base; + + /* fill base level of pyramid */ + for( i = 0; i < roi.height; i++ ) + { + for( j = 0; j < roi.width; j++, p_base++ ) + { + p_base->c = ((_CvRGBf *) pyramida)[i * roi.width + j]; + p_base->p = &stub; + } + } + + p_cur = (_CvPyramidC3 *) p_base; + size = roi; + + /* calculate initial pyramid */ + for( l = 1; l <= level; l++ ) + { + CvSize dst_size = { size.width/2 + 1, size.height/2 + 1 }; + CvMat prev_level = cvMat( size.height, size.width, CV_32FC3 ); + CvMat next_level = cvMat( dst_size.height, dst_size.width, CV_32FC3 ); + + cvSetData( &prev_level, pyramida, step ); + cvSetData( &next_level, pyramida, step ); + cvPyrDown( &prev_level, &next_level ); + + //_CV_CHECK( icvPyrDown_Gauss5x5_32f_C3R( pyramida, step, pyramida, step, size, buff )); + //_CV_CHECK( icvPyrDownBorder_32f_CnR( pyramida, step, size, pyramida, step, dst_size, 3 )); + pyram[l] = p_cur; + + size.width = dst_size.width - 1; + size.height = dst_size.height - 1; + + /* fill layer #l */ + for( i = 0; i <= size.height; i++ ) + { + assert( (char*)p_cur - buffer < buffer_size ); + for( j = 0; j <= size.width; j++, p_cur++ ) + { + p_cur->c = ((_CvRGBf *) pyramida)[i * roi.width + j]; + p_cur->p = &stub; + p_cur->a = 0; + p_cur->rect.x2 = 0; + } + } + } + + cvStartAppendToSeq( cmp_seq, &writer ); + + /* do several iterations to determine son-father links */ + for( cur_iter = 0; cur_iter < max_iter; cur_iter++ ) + { + int is_last_iter = cur_iter == max_iter - 1; + + size = roi; + + /* build son-father links down up to level */ + for( l = 0; l < level; l++ ) + { + icvUpdatePyrLinks_8u_C3( l, pyram[l], size, pyram[l + 1], &writer, + (float) threshold1, is_last_iter, &stub, + icvWritePyrNode ); + + /* clear last border row */ + if( l > 0 ) + { + p_cur = pyram[l] + (size.width + 1) * size.height; + for( j = 0; j <= size.width; j++ ) + p_cur[j].c.blue = p_cur[j].c.green = p_cur[j].c.red = 0; + } + + size.width >>= 1; + size.height >>= 1; + } + +/* clear the old c value for the last level */ + p_cur = pyram[level]; + for( i = 0; i <= size.height; i++, p_cur += size.width + 1 ) + for( j = 0; j <= size.width; j++ ) + p_cur[j].c.blue = p_cur[j].c.green = p_cur[j].c.red = 0; + + size = roi; + step = roi.width; + +/* calculate average c value for the 0 < l <=level */ + for( l = 0; l < level; l++, step = (step >> 1) + 1 ) + { + _CvPyramidC3 *p_prev, *p_row_prev; + + stub.c.blue = stub.c.green = stub.c.red = 0; + + /* calculate average c value for the next level */ + if( l == 0 ) + { + p_base = (_CvPyramidBaseC3 *) pyram[0]; + for( i = 0; i < roi.height; i++, p_base += size.width ) + { + for( j = 0; j < size.width; j++ ) + { + _CvPyramidC3 *p = p_base[j].p; + + p->c.blue += p_base[j].c.blue; + p->c.green += p_base[j].c.green; + p->c.red += p_base[j].c.red; + } + } + } + else + { + p_cur = pyram[l]; + for( i = 0; i < size.height; i++, p_cur += size.width + 1 ) + { + for( j = 0; j < size.width; j++ ) + { + _CvPyramidC3 *p = p_cur[j].p; + float a = (float) p_cur[j].a; + + p->c.blue += a * p_cur[j].c.blue; + p->c.green += a * p_cur[j].c.green; + p->c.red += a * p_cur[j].c.red; + + if( !is_last_iter ) + p_cur[j].a = 0; + } + if( !is_last_iter ) + p_cur[size.width].a = 0; + } + if( !is_last_iter ) + { + for( j = 0; j <= size.width; j++ ) + { + p_cur[j].a = 0; + } + } + } + + /* assign random values of the next level null c */ + p_cur = pyram[l + 1]; + p_row_prev = p_prev = pyram[l]; + + size.width >>= 1; + size.height >>= 1; + + for( i = 0; i <= size.height; i++, p_cur += size.width + 1 ) + { + if( i < size.height || !is_last_iter ) + { + for( j = 0; j < size.width; j++ ) + { + int a = p_cur[j].a; + + if( a != 0 ) + { + float inv_a; + + if( a <= _CV_INV_TAB_SIZE ) + { + inv_a = icvInvTab[a - 1]; + } + else + { + inv_a = 1.f / a; + } + p_cur[j].c.blue *= inv_a; + p_cur[j].c.green *= inv_a; + p_cur[j].c.red *= inv_a; + } + else + { + p_cur[j].c = p_prev->c; + } + + if( l == 0 ) + p_prev = _CV_NEXT_BASE_C3( p_prev, 2 ); + else + p_prev += 2; + } + + if( p_cur[size.width].a == 0 ) + { + p_cur[size.width].c = p_prev[(l != 0) - 1].c; + } + else + { + p_cur[size.width].c.blue /= p_cur[size.width].a; + p_cur[size.width].c.green /= p_cur[size.width].a; + p_cur[size.width].c.red /= p_cur[size.width].a; + if( is_last_iter ) + { + cmp_node.data = p_cur + size.width; + CV_WRITE_SEQ_ELEM( cmp_node, writer ); + } + } + } + else + { + for( j = 0; j <= size.width; j++ ) + { + int a = p_cur[j].a; + + if( a != 0 ) + { + float inv_a; + + if( a <= _CV_INV_TAB_SIZE ) + { + inv_a = icvInvTab[a - 1]; + } + else + { + inv_a = 1.f / a; + } + p_cur[j].c.blue *= inv_a; + p_cur[j].c.green *= inv_a; + p_cur[j].c.red *= inv_a; + + cmp_node.data = p_cur + j; + CV_WRITE_SEQ_ELEM( cmp_node, writer ); + } + else + { + p_cur[j].c = p_prev->c; + } + + if( l == 0 ) + { + p_prev = _CV_NEXT_BASE_C3( p_prev, (j * 2 < step - 2 ? 2 : 1)); + } + else + { + p_prev++; + } + } + } + + if( l + 1 == level && !is_last_iter ) + for( j = 0; j <= size.width; j++ ) + p_cur[j].a = 0; + + if( !(i & 1) ) + { + p_prev = p_row_prev; + } + else + { + p_prev = (_CvPyramidC3*)((char*)p_row_prev + step * + (l == 0 ? sizeof( _CvPyramidBaseC3 ) : sizeof( _CvPyramidC3 ))); + } + } + } + } /* end of the iteration process */ + + /* construct a connected components */ + size.width = roi.width >> level; + size.height = roi.height >> level; + + p_cur = pyram[level]; + + for( i = 0; i < size.height; i++, p_cur += size.width + 1 ) + { + for( j = 0; j < size.width; j++ ) + { + if( p_cur[j].a != 0 ) + { + cmp_node.data = p_cur + j; + CV_WRITE_SEQ_ELEM( cmp_node, writer ); + } + } + } + + cvEndWriteSeq( &writer ); + +/* clusterization segmented components and construction + output connected components */ + icvSegmentClusterC3( cmp_seq, res_seq, threshold2, pyram[1], roi ); + +/* convert (inplace) resultant segment values to int (top level) */ + +/* propagate segment values top down */ + for( l = level - 1; l >= 0; l-- ) + { + p_cur = pyram[l]; + + size.width <<= 1; + size.height <<= 1; + + if( l == 0 ) + { + size.width--; + size.height--; + } + + for( i = 0; i <= size.height; i++ ) + { + for( j = 0; j <= size.width; j++ ) + { + _CvPyramidC3 *p = p_cur->p; + + assert( p != 0 ); + if( p != &stub ) + { + p_cur->c = p->c; + } + + if( l == 0 ) + { + Cv32suf _c; + /* copy the segmented values to destination image */ + _c.f = p_cur->c.blue; dst_image[j*3] = (uchar)_c.i; + _c.f = p_cur->c.green; dst_image[j*3+1] = (uchar)_c.i; + _c.f = p_cur->c.red; dst_image[j*3+2] = (uchar)_c.i; + p_cur = _CV_NEXT_BASE_C3(p_cur,1); + } + else + { + p_cur++; + } + } + if( l == 0 ) + dst_image += dst_step; + } + } + + M_END: + + cvFree( &buffer ); + cvReleaseMemStorage( &temp_storage ); + + if( status == CV_OK ) + *dst_comp = res_seq; + + return status; +} + + +static CvStatus icvUpdatePyrLinks_8u_C1 + (int layer, void *layer_data, CvSize size, void *parent_layer, + void *_writer, float threshold, int is_last_iter, void *_stub, CvWriteNodeFunction /*func*/) +{ + int i, j; + _CvListNode cmp_node; + + _CvPyramid *stub = (_CvPyramid *) _stub; + _CvPyramid *p_cur = (_CvPyramid *) layer_data; + _CvPyramid *p_next1 = (_CvPyramid *) parent_layer; + _CvPyramid *p_next3 = p_next1 + (size.width >> 1) + 1; + + CvSeqWriter & writer = *(CvSeqWriter *) _writer; + + for( i = 0; i < size.height; i++ ) + { + for( j = 0; j < size.width; j += 2 ) + { + float c0, c1, c2, c3, c4; + _CvPyramid *p; + +/* son-father threshold linking for the current node establish */ + c0 = p_cur->c; + +/* find pointer for the first pixel */ + c1 = (float) fabs( c0 - p_next1[0].c ); + c2 = (float) fabs( c0 - p_next1[1].c ); + c3 = (float) fabs( c0 - p_next3[0].c ); + c4 = (float) fabs( c0 - p_next3[1].c ); + + p = p_next1; + + if( c1 > c2 ) + { + p = p_next1 + 1; + c1 = c2; + } + if( c1 > c3 ) + { + p = p_next3; + c1 = c3; + } + if( c1 > c4 ) + { + p = p_next3 + 1; + c1 = c4; + } + + if( c1 <= threshold ) + { + p_cur->p = p; + + if( layer == 0 ) + { + p->a++; + p_cur = (_CvPyramid*)((char*)p_cur + sizeof(_CvPyramidBase)); + if( is_last_iter ) + icvMaxRoi1( &(p->rect), j, i ); + } + else + { + int a = p_cur->a; + + p->a += a; + p_cur->c = 0; + p_cur++; + if( is_last_iter && a != 0 ) + icvMaxRoi( &(p->rect), &(p_cur[-1].rect) ); + } + } + else + { + p_cur->p = stub; + if( is_last_iter ) + { + cmp_node.data = p_cur; + CV_WRITE_SEQ_ELEM( cmp_node, writer ); + } + if( layer == 0 ) + { + p_cur = _CV_NEXT_BASE_C1(p_cur,1); + } + else + { + p_cur->c = 0; + p_cur++; + } + } + + /* find pointer for the second pixel */ + c0 = p_cur->c; + + c1 = (float) fabs( c0 - p_next1[0].c ); + c2 = (float) fabs( c0 - p_next1[1].c ); + c3 = (float) fabs( c0 - p_next3[0].c ); + c4 = (float) fabs( c0 - p_next3[1].c ); + + p = p_next1; + p_next1++; + + if( c1 > c2 ) + { + p = p_next1; + c1 = c2; + } + if( c1 > c3 ) + { + p = p_next3; + c1 = c3; + } + + p_next3++; + if( c1 > c4 ) + { + p = p_next3; + c1 = c4; + } + + if( c1 <= threshold ) + { + p_cur->p = p; + + if( layer == 0 ) + { + p->a++; + p_cur = _CV_NEXT_BASE_C1(p_cur,1); + if( is_last_iter ) + icvMaxRoi1( &(p->rect), j + 1, i ); + } + else + { + int a = p_cur->a; + + p->a += a; + p_cur->c = 0; + p_cur++; + if( is_last_iter && a != 0 ) + icvMaxRoi( &(p->rect), &(p_cur[-1].rect) ); + } + } + else + { + p_cur->p = stub; + if( is_last_iter ) + { + cmp_node.data = p_cur; + CV_WRITE_SEQ_ELEM( cmp_node, writer ); + } + if( layer == 0 ) + { + p_cur = _CV_NEXT_BASE_C1(p_cur,1); + } + else + { + p_cur->c = 0; + p_cur++; + } + } + } + + /* clear c's */ + if( layer > 0 ) + { + p_cur->c = 0; + p_cur++; + } + + if( !(i & 1) ) + { + p_next1 -= size.width >> 1; + p_next3 -= size.width >> 1; + } + else + { + p_next1++; + p_next3++; + } + } + + return CV_OK; +} + + +static CvStatus icvUpdatePyrLinks_8u_C3 + (int layer, void *layer_data, CvSize size, void *parent_layer, + void *_writer, float threshold, int is_last_iter, void *_stub, CvWriteNodeFunction /*func*/) +{ + int i, j; + _CvListNode cmp_node; + + _CvPyramidC3 *stub = (_CvPyramidC3 *) _stub; + _CvPyramidC3 *p_cur = (_CvPyramidC3 *) layer_data; + _CvPyramidC3 *p_next1 = (_CvPyramidC3 *) parent_layer; + _CvPyramidC3 *p_next3 = p_next1 + (size.width >> 1) + 1; + + CvSeqWriter & writer = *(CvSeqWriter *) _writer; + + for( i = 0; i < size.height; i++ ) + { + for( j = 0; j < size.width; j += 2 ) + { + float c1, c2, c3, c4; + _CvPyramidC3 *p; + +/* find pointer for the first pixel */ + c1 = _CV_RGB_DIST( p_cur->c, p_next1[0].c ); + c2 = _CV_RGB_DIST( p_cur->c, p_next1[1].c ); + c3 = _CV_RGB_DIST( p_cur->c, p_next3[0].c ); + c4 = _CV_RGB_DIST( p_cur->c, p_next3[1].c ); + + p = p_next1; + + if( c1 > c2 ) + { + p = p_next1 + 1; + c1 = c2; + } + if( c1 > c3 ) + { + p = p_next3; + c1 = c3; + } + if( c1 > c4 ) + { + p = p_next3 + 1; + c1 = c4; + } + + if( c1 < threshold ) + { + p_cur->p = p; + + if( layer == 0 ) + { + p->a++; + p_cur = _CV_NEXT_BASE_C3(p_cur,1); + if( is_last_iter ) + icvMaxRoi1( &(p->rect), j, i ); + } + else + { + int a = p_cur->a; + + p->a += a; + p_cur->c.blue = p_cur->c.green = p_cur->c.red = 0; + p_cur++; + if( is_last_iter && a != 0 ) + icvMaxRoi( &(p->rect), &(p_cur[-1].rect) ); + } + } + else + { + p_cur->p = stub; + if( is_last_iter /* && ( == 0 || p_cur->a != 0) */ ) + { + cmp_node.data = p_cur; + CV_WRITE_SEQ_ELEM( cmp_node, writer ); + } + + if( layer == 0 ) + { + p_cur = _CV_NEXT_BASE_C3(p_cur,1); + } + else + { + p_cur->c.blue = p_cur->c.green = p_cur->c.red = 0; + p_cur++; + } + } + + /* find pointer for the second pixel */ + c1 = _CV_RGB_DIST( p_cur->c, p_next1[0].c ); + c2 = _CV_RGB_DIST( p_cur->c, p_next1[1].c ); + c3 = _CV_RGB_DIST( p_cur->c, p_next3[0].c ); + c4 = _CV_RGB_DIST( p_cur->c, p_next3[1].c ); + + p = p_next1; + p_next1++; + + if( c1 > c2 ) + { + p = p_next1; + c1 = c2; + } + if( c1 > c3 ) + { + p = p_next3; + c1 = c3; + } + + p_next3++; + if( c1 > c4 ) + { + p = p_next3; + c1 = c4; + } + + if( c1 < threshold ) + { + p_cur->p = p; + + if( layer == 0 ) + { + p->a++; + p_cur = _CV_NEXT_BASE_C3(p_cur,1); + if( is_last_iter ) + icvMaxRoi1( &(p->rect), j + 1, i ); + } + else + { + int a = p_cur->a; + + p->a += a; + p_cur->c.blue = p_cur->c.green = p_cur->c.red = 0; + p_cur++; + if( is_last_iter && a != 0 ) + icvMaxRoi( &(p->rect), &(p_cur[-1].rect) ); + } + } + else + { + p_cur->p = stub; + if( is_last_iter /* && ( == 0 || p_cur->a != 0) */ ) + { + cmp_node.data = p_cur; + CV_WRITE_SEQ_ELEM( cmp_node, writer ); + } + if( layer == 0 ) + { + p_cur = _CV_NEXT_BASE_C3(p_cur,1); + } + else + { + p_cur->c.blue = p_cur->c.green = p_cur->c.red = 0; + p_cur++; + } + } + } + + /* clear c's */ + if( layer > 0 ) + { + p_cur->c.blue = p_cur->c.green = p_cur->c.red = 0; + p_cur++; + } + + if( !(i & 1) ) + { + p_next1 -= size.width >> 1; + p_next3 -= size.width >> 1; + } + else + { + p_next1++; + p_next3++; + } + } + + return CV_OK; +} + + + +/****************************************************************************************\ + + clusterization segmented components + +\****************************************************************************************/ +static void +icvExpandBaseLevelC1( _CvPyramid * base_p, _CvPyramid * p, _CvPyramidBase * start, int width ) +{ + int x = (int)((_CvPyramidBase *) base_p - start); + int y = x / width; + + x -= y * width; + p->a = 1; + p->rect.x1 = (ushort) x; + p->rect.y1 = (ushort) y; + p->rect.x2 = (ushort) (x + 1); + p->rect.y2 = (ushort) (y + 1); + p->c = base_p->c; +} + +CvStatus +icvSegmentClusterC1( CvSeq * cmp_seq, CvSeq * res_seq, + double threshold, _CvPyramid * first_level_end, CvSize first_level_size ) +{ + const double eps = 1.; + CvSeqWriter writer; + CvSeqReader reader; + _CvPyramid temp_cmp; + _CvPyramidBase *first_level_start = (_CvPyramidBase *) first_level_end - + first_level_size.width * first_level_size.height; + int c, i, count = cmp_seq->total; + + cvStartReadSeq( cmp_seq, &reader, 0 ); + cvStartAppendToSeq( res_seq, &writer ); + + if( threshold < eps ) + { + /* if threshold is too small then simply copy all + the components to the output sequence */ + for( i = 0; i < count; i++ ) + { + CvConnectedComp comp; + _CvPyramid *cmp = (_CvPyramid *) (((_CvListNode *) reader.ptr)->data); + Cv32suf _c; + + if( cmp < first_level_end ) + { + icvExpandBaseLevelC1( cmp, &temp_cmp, first_level_start, + first_level_size.width ); + cmp = &temp_cmp; + } + + _c.i = cvRound( cmp->c ); + cmp->c = _c.f; + comp.value = cvRealScalar(_c.i); + comp.area = cmp->a; + comp.rect.x = cmp->rect.x1; + comp.rect.y = cmp->rect.y1; + comp.rect.width = cmp->rect.x2 - cmp->rect.x1; + comp.rect.height = cmp->rect.y2 - cmp->rect.y1; + comp.contour = 0; + + CV_WRITE_SEQ_ELEM( comp, writer ); + CV_NEXT_SEQ_ELEM( sizeof( _CvListNode ), reader ); + } + } + else + { + _CvListNode stub_node; + _CvListNode *prev = &stub_node; + + stub_node.next = 0; + + for( i = 0; i < count; i++ ) + { + _CvListNode *node = (_CvListNode *) reader.ptr; + + prev->next = node; + prev = node; + CV_NEXT_SEQ_ELEM( sizeof( _CvListNode ), reader ); + } + prev->next = 0; + prev = stub_node.next; + + while( prev ) + { + _CvListNode *node = prev->next; + _CvListNode *acc = prev; + _CvPyramid *cmp = (_CvPyramid *) (acc->data); + CvConnectedComp comp; + float c0 = cmp->c; + + if( cmp < first_level_end ) + { + icvExpandBaseLevelC1( cmp, &temp_cmp, first_level_start, + first_level_size.width ); + } + else + { + temp_cmp = *cmp; + temp_cmp.c *= temp_cmp.a; + } + + acc->next = 0; + stub_node.next = 0; + prev = &stub_node; + + while( node ) + { + cmp = (_CvPyramid *) (node->data); + if( fabs( c0 - cmp->c ) < threshold ) + { + _CvPyramid temp; + + /* exclude from global list and add to list of joint component */ + prev->next = node->next; + node->next = acc; + acc = node; + + if( cmp < first_level_end ) + { + icvExpandBaseLevelC1( cmp, &temp, first_level_start, + first_level_size.width ); + cmp = &temp; + } + + temp_cmp.a += cmp->a; + temp_cmp.c += cmp->c * cmp->a; + icvMaxRoi( &(temp_cmp.rect), &(cmp->rect) ); + } + else + { + if( prev == &stub_node ) + { + stub_node.next = node; + } + prev = node; + } + node = prev->next; + } + + if( temp_cmp.a != 0 ) + { + c = cvRound( temp_cmp.c / temp_cmp.a ); + } + else + { + c = cvRound( c0 ); + } + node = acc; + + while( node ) + { + Cv32suf _c; + cmp = (_CvPyramid *) (node->data); + _c.i = c; cmp->c = _c.f; + node = node->next; + } + + comp.value = cvRealScalar(c); + comp.area = temp_cmp.a; + comp.rect.x = temp_cmp.rect.x1; + comp.rect.y = temp_cmp.rect.y1; + comp.rect.width = temp_cmp.rect.x2 - temp_cmp.rect.x1; + comp.rect.height = temp_cmp.rect.y2 - temp_cmp.rect.y1; + comp.contour = 0; + + CV_WRITE_SEQ_ELEM( comp, writer ); + prev = stub_node.next; + } + } + + cvEndWriteSeq( &writer ); + return CV_OK; +} + +/****************************************************************************************\ + + clusterization segmented components + +\****************************************************************************************/ +static void +icvExpandBaseLevelC3( _CvPyramidC3 * base_p, _CvPyramidC3 * p, + _CvPyramidBaseC3 * start, int width ) +{ + int x = (int)((_CvPyramidBaseC3 *) base_p - start); + int y = x / width; + + x -= y * width; + p->a = 1; + p->rect.x1 = (ushort) x; + p->rect.y1 = (ushort) y; + p->rect.x2 = (ushort) (x + 1); + p->rect.y2 = (ushort) (y + 1); + p->c = base_p->c; +} + +CvStatus +icvSegmentClusterC3( CvSeq * cmp_seq, CvSeq * res_seq, + double threshold, + _CvPyramidC3 * first_level_end, CvSize first_level_size ) +{ + const double eps = 1.; + CvSeqWriter writer; + CvSeqReader reader; + _CvPyramidC3 temp_cmp; + _CvPyramidBaseC3 *first_level_start = (_CvPyramidBaseC3 *) first_level_end - + first_level_size.width * first_level_size.height; + int i, count = cmp_seq->total; + int c_blue, c_green, c_red; + + cvStartReadSeq( cmp_seq, &reader, 0 ); + cvStartAppendToSeq( res_seq, &writer ); + + if( threshold < eps ) + { + /* if threshold is too small then simply copy all + the components to the output sequence */ + for( i = 0; i < count; i++ ) + { + CvConnectedComp comp; + _CvPyramidC3 *cmp = (_CvPyramidC3 *) (((_CvListNode *) reader.ptr)->data); + Cv32suf _c; + + if( cmp < first_level_end ) + { + icvExpandBaseLevelC3( cmp, &temp_cmp, first_level_start, + first_level_size.width ); + cmp = &temp_cmp; + } + + c_blue = cvRound( cmp->c.blue ); + c_green = cvRound( cmp->c.green ); + c_red = cvRound( cmp->c.red ); + _c.i = c_blue; cmp->c.blue = _c.f; + _c.i = c_green; cmp->c.green = _c.f; + _c.i = c_red; cmp->c.red = _c.f; + comp.value = cvScalar( c_blue, c_green, c_red ); + comp.area = cmp->a; + comp.rect.x = cmp->rect.x1; + comp.rect.y = cmp->rect.y1; + comp.rect.width = cmp->rect.x2 - cmp->rect.x1; + comp.rect.height = cmp->rect.y2 - cmp->rect.y1; + comp.contour = 0; + + CV_WRITE_SEQ_ELEM( comp, writer ); + CV_NEXT_SEQ_ELEM( sizeof( _CvListNode ), reader ); + } + } + else + { + _CvListNode stub_node; + _CvListNode *prev = &stub_node; + + stub_node.next = 0; + + for( i = 0; i < count; i++ ) + { + _CvListNode *node = (_CvListNode *) reader.ptr; + + prev->next = node; + prev = node; + CV_NEXT_SEQ_ELEM( sizeof( _CvListNode ), reader ); + } + prev->next = 0; + prev = stub_node.next; + + while( prev ) + { + _CvListNode *node = prev->next; + _CvListNode *acc = prev; + _CvPyramidC3 *cmp = (_CvPyramidC3 *) (acc->data); + CvConnectedComp comp; + _CvRGBf c0 = cmp->c; + + if( cmp < first_level_end ) + { + icvExpandBaseLevelC3( cmp, &temp_cmp, first_level_start, + first_level_size.width ); + } + else + { + temp_cmp = *cmp; + temp_cmp.c.blue *= temp_cmp.a; + temp_cmp.c.green *= temp_cmp.a; + temp_cmp.c.red *= temp_cmp.a; + } + + acc->next = 0; + stub_node.next = 0; + prev = &stub_node; + + while( node ) + { + cmp = (_CvPyramidC3 *) (node->data); + if( _CV_RGB_DIST( c0, cmp->c ) < threshold ) + { + _CvPyramidC3 temp; + + /* exclude from global list and add to list of joint component */ + prev->next = node->next; + node->next = acc; + acc = node; + + if( cmp < first_level_end ) + { + icvExpandBaseLevelC3( cmp, &temp, first_level_start, + first_level_size.width ); + cmp = &temp; + } + + temp_cmp.a += cmp->a; + temp_cmp.c.blue += cmp->c.blue * cmp->a; + temp_cmp.c.green += cmp->c.green * cmp->a; + temp_cmp.c.red += cmp->c.red * cmp->a; + icvMaxRoi( &(temp_cmp.rect), &(cmp->rect) ); + } + else + { + if( prev == &stub_node ) + { + stub_node.next = node; + } + prev = node; + } + node = prev->next; + } + + if( temp_cmp.a != 0 ) + { + c_blue = cvRound( temp_cmp.c.blue / temp_cmp.a ); + c_green = cvRound( temp_cmp.c.green / temp_cmp.a ); + c_red = cvRound( temp_cmp.c.red / temp_cmp.a ); + } + else + { + c_blue = cvRound( c0.blue ); + c_green = cvRound( c0.green ); + c_red = cvRound( c0.red ); + } + node = acc; + + while( node ) + { + Cv32suf _c; + cmp = (_CvPyramidC3 *) (node->data); + _c.i = c_blue; cmp->c.blue = _c.f; + _c.i = c_green; cmp->c.green = _c.f; + _c.i = c_red; cmp->c.red = _c.f; + node = node->next; + } + + comp.value = cvScalar( c_blue, c_green, c_red ); + comp.area = temp_cmp.a; + comp.rect.x = temp_cmp.rect.x1; + comp.rect.y = temp_cmp.rect.y1; + comp.rect.width = temp_cmp.rect.x2 - temp_cmp.rect.x1; + comp.rect.height = temp_cmp.rect.y2 - temp_cmp.rect.y1; + comp.contour = 0; + + CV_WRITE_SEQ_ELEM( comp, writer ); + prev = stub_node.next; + } + } + + cvEndWriteSeq( &writer ); + return CV_OK; +} + +/****************************************************************************************\ + + definition of the maximum roi size + +\****************************************************************************************/ +void +icvMaxRoi( _CvRect16u * max_rect, _CvRect16u * cur_rect ) +{ + if( max_rect->x2 == 0 ) + *max_rect = *cur_rect; + else + { + if( max_rect->x1 > cur_rect->x1 ) + max_rect->x1 = cur_rect->x1; + if( max_rect->y1 > cur_rect->y1 ) + max_rect->y1 = cur_rect->y1; + + if( max_rect->x2 < cur_rect->x2 ) + max_rect->x2 = cur_rect->x2; + if( max_rect->y2 < cur_rect->y2 ) + max_rect->y2 = cur_rect->y2; + } +} + +void +icvMaxRoi1( _CvRect16u * max_rect, int x, int y ) +{ + if( max_rect->x2 == 0 ) + { + max_rect->x1 = (ushort) x; + max_rect->y1 = (ushort) y; + + ++x; + ++y; + + max_rect->x2 = (ushort) x; + max_rect->y2 = (ushort) y; + } + else + { + if( max_rect->x1 > x ) + max_rect->x1 = (ushort) x; + if( max_rect->y1 > y ) + max_rect->y1 = (ushort) y; + + ++x; + ++y; + + if( max_rect->x2 < x ) + max_rect->x2 = (ushort) x; + if( max_rect->y2 < y ) + max_rect->y2 = (ushort) y; + } +} + + +/*F/////////////////////////////////////////////////////////////////////////////////////// +// Name: cvPyrSegmentation +// Purpose: +// segments an image using pyramid-linking technique +// Context: +// Parameters: +// src - source image +// dst - destination image +// comp - pointer to returned connected component sequence +// storage - where the sequence is stored +// level - maximal pyramid level +// threshold1 - first threshold, affecting on detalization level when pyramid +// is built. +// threshold2 - second threshold - affects on final components merging. +// Returns: +// Notes: +// Source and destination image must be equal types and channels +//F*/ +CV_IMPL void +cvPyrSegmentation( IplImage * src, + IplImage * dst, + CvMemStorage * storage, + CvSeq ** comp, int level, double threshold1, double threshold2 ) +{ + CvSize src_size, dst_size; + uchar *src_data = 0; + uchar *dst_data = 0; + int src_step = 0, dst_step = 0; + int thresh1 = cvRound( threshold1 ); + int thresh2 = cvRound( threshold2 ); + + if( src->depth != IPL_DEPTH_8U ) + CV_Error( CV_BadDepth, cvUnsupportedFormat ); + + if( src->depth != dst->depth || src->nChannels != dst->nChannels ) + CV_Error( CV_StsBadArg, "src and dst have different formats" ); + + cvGetRawData( src, &src_data, &src_step, &src_size ); + cvGetRawData( dst, &dst_data, &dst_step, &dst_size ); + + if( src_size.width != dst_size.width || + src_size.height != dst_size.height ) + CV_Error( CV_StsBadArg, "src and dst have different ROIs" ); + + switch (src->nChannels) + { + case 1: + IPPI_CALL( icvPyrSegmentation8uC1R( src_data, src_step, + dst_data, dst_step, + src_size, + CV_GAUSSIAN_5x5, + comp, storage, level, thresh1, thresh2 )); + break; + case 3: + IPPI_CALL( icvPyrSegmentation8uC3R( src_data, src_step, + dst_data, dst_step, + src_size, + CV_GAUSSIAN_5x5, + comp, storage, level, thresh1, thresh2 )); + break; + default: + CV_Error( CV_BadNumChannels, cvUnsupportedFormat ); + } +} + + +/* End of file. */ diff --git a/opencv/imgproc/rotcalipers.cpp b/opencv/imgproc/rotcalipers.cpp new file mode 100644 index 0000000..4ea8f75 --- /dev/null +++ b/opencv/imgproc/rotcalipers.cpp @@ -0,0 +1,441 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" + +typedef struct +{ + int bottom; + int left; + float height; + float width; + float base_a; + float base_b; +} +icvMinAreaState; + +#define CV_CALIPERS_MAXHEIGHT 0 +#define CV_CALIPERS_MINAREARECT 1 +#define CV_CALIPERS_MAXDIST 2 + +/*F/////////////////////////////////////////////////////////////////////////////////////// +// Name: icvRotatingCalipers +// Purpose: +// Rotating calipers algorithm with some applications +// +// Context: +// Parameters: +// points - convex hull vertices ( any orientation ) +// n - number of vertices +// mode - concrete application of algorithm +// can be CV_CALIPERS_MAXDIST or +// CV_CALIPERS_MINAREARECT +// left, bottom, right, top - indexes of extremal points +// out - output info. +// In case CV_CALIPERS_MAXDIST it points to float value - +// maximal height of polygon. +// In case CV_CALIPERS_MINAREARECT +// ((CvPoint2D32f*)out)[0] - corner +// ((CvPoint2D32f*)out)[1] - vector1 +// ((CvPoint2D32f*)out)[0] - corner2 +// +// ^ +// | +// vector2 | +// | +// |____________\ +// corner / +// vector1 +// +// Returns: +// Notes: +//F*/ + +/* we will use usual cartesian coordinates */ +static void +icvRotatingCalipers( CvPoint2D32f* points, int n, int mode, float* out ) +{ + float minarea = FLT_MAX; + float max_dist = 0; + char buffer[32]; + int i, k; + CvPoint2D32f* vect = (CvPoint2D32f*)cvAlloc( n * sizeof(vect[0]) ); + float* inv_vect_length = (float*)cvAlloc( n * sizeof(inv_vect_length[0]) ); + int left = 0, bottom = 0, right = 0, top = 0; + int seq[4] = { -1, -1, -1, -1 }; + + /* rotating calipers sides will always have coordinates + (a,b) (-b,a) (-a,-b) (b, -a) + */ + /* this is a first base bector (a,b) initialized by (1,0) */ + float orientation = 0; + float base_a; + float base_b = 0; + + float left_x, right_x, top_y, bottom_y; + CvPoint2D32f pt0 = points[0]; + + left_x = right_x = pt0.x; + top_y = bottom_y = pt0.y; + + for( i = 0; i < n; i++ ) + { + double dx, dy; + + if( pt0.x < left_x ) + left_x = pt0.x, left = i; + + if( pt0.x > right_x ) + right_x = pt0.x, right = i; + + if( pt0.y > top_y ) + top_y = pt0.y, top = i; + + if( pt0.y < bottom_y ) + bottom_y = pt0.y, bottom = i; + + CvPoint2D32f pt = points[(i+1) & (i+1 < n ? -1 : 0)]; + + dx = pt.x - pt0.x; + dy = pt.y - pt0.y; + + vect[i].x = (float)dx; + vect[i].y = (float)dy; + inv_vect_length[i] = (float)(1./sqrt(dx*dx + dy*dy)); + + pt0 = pt; + } + + //cvbInvSqrt( inv_vect_length, inv_vect_length, n ); + + /* find convex hull orientation */ + { + double ax = vect[n-1].x; + double ay = vect[n-1].y; + + for( i = 0; i < n; i++ ) + { + double bx = vect[i].x; + double by = vect[i].y; + + double convexity = ax * by - ay * bx; + + if( convexity != 0 ) + { + orientation = (convexity > 0) ? 1.f : (-1.f); + break; + } + ax = bx; + ay = by; + } + assert( orientation != 0 ); + } + base_a = orientation; + +/*****************************************************************************************/ +/* init calipers position */ + seq[0] = bottom; + seq[1] = right; + seq[2] = top; + seq[3] = left; +/*****************************************************************************************/ +/* Main loop - evaluate angles and rotate calipers */ + + /* all of edges will be checked while rotating calipers by 90 degrees */ + for( k = 0; k < n; k++ ) + { + /* sinus of minimal angle */ + /*float sinus;*/ + + /* compute cosine of angle between calipers side and polygon edge */ + /* dp - dot product */ + float dp0 = base_a * vect[seq[0]].x + base_b * vect[seq[0]].y; + float dp1 = -base_b * vect[seq[1]].x + base_a * vect[seq[1]].y; + float dp2 = -base_a * vect[seq[2]].x - base_b * vect[seq[2]].y; + float dp3 = base_b * vect[seq[3]].x - base_a * vect[seq[3]].y; + + float cosalpha = dp0 * inv_vect_length[seq[0]]; + float maxcos = cosalpha; + + /* number of calipers edges, that has minimal angle with edge */ + int main_element = 0; + + /* choose minimal angle */ + cosalpha = dp1 * inv_vect_length[seq[1]]; + maxcos = (cosalpha > maxcos) ? (main_element = 1, cosalpha) : maxcos; + cosalpha = dp2 * inv_vect_length[seq[2]]; + maxcos = (cosalpha > maxcos) ? (main_element = 2, cosalpha) : maxcos; + cosalpha = dp3 * inv_vect_length[seq[3]]; + maxcos = (cosalpha > maxcos) ? (main_element = 3, cosalpha) : maxcos; + + /*rotate calipers*/ + { + //get next base + int pindex = seq[main_element]; + float lead_x = vect[pindex].x*inv_vect_length[pindex]; + float lead_y = vect[pindex].y*inv_vect_length[pindex]; + switch( main_element ) + { + case 0: + base_a = lead_x; + base_b = lead_y; + break; + case 1: + base_a = lead_y; + base_b = -lead_x; + break; + case 2: + base_a = -lead_x; + base_b = -lead_y; + break; + case 3: + base_a = -lead_y; + base_b = lead_x; + break; + default: assert(0); + } + } + /* change base point of main edge */ + seq[main_element] += 1; + seq[main_element] = (seq[main_element] == n) ? 0 : seq[main_element]; + + + switch (mode) + { + case CV_CALIPERS_MAXHEIGHT: + { + /* now main element lies on edge alligned to calipers side */ + + /* find opposite element i.e. transform */ + /* 0->2, 1->3, 2->0, 3->1 */ + int opposite_el = main_element ^ 2; + + float dx = points[seq[opposite_el]].x - points[seq[main_element]].x; + float dy = points[seq[opposite_el]].y - points[seq[main_element]].y; + float dist; + + if( main_element & 1 ) + dist = (float)fabs(dx * base_a + dy * base_b); + else + dist = (float)fabs(dx * (-base_b) + dy * base_a); + + if( dist > max_dist ) + max_dist = dist; + + break; + } + case CV_CALIPERS_MINAREARECT: + /* find area of rectangle */ + { + float height; + float area; + + /* find vector left-right */ + float dx = points[seq[1]].x - points[seq[3]].x; + float dy = points[seq[1]].y - points[seq[3]].y; + + /* dotproduct */ + float width = dx * base_a + dy * base_b; + + /* find vector left-right */ + dx = points[seq[2]].x - points[seq[0]].x; + dy = points[seq[2]].y - points[seq[0]].y; + + /* dotproduct */ + height = -dx * base_b + dy * base_a; + + area = width * height; + if( area <= minarea ) + { + float *buf = (float *) buffer; + + minarea = area; + /* leftist point */ + ((int *) buf)[0] = seq[3]; + buf[1] = base_a; + buf[2] = width; + buf[3] = base_b; + buf[4] = height; + /* bottom point */ + ((int *) buf)[5] = seq[0]; + buf[6] = area; + } + break; + } + } /*switch */ + } /* for */ + + switch (mode) + { + case CV_CALIPERS_MINAREARECT: + { + float *buf = (float *) buffer; + + float A1 = buf[1]; + float B1 = buf[3]; + + float A2 = -buf[3]; + float B2 = buf[1]; + + float C1 = A1 * points[((int *) buf)[0]].x + points[((int *) buf)[0]].y * B1; + float C2 = A2 * points[((int *) buf)[5]].x + points[((int *) buf)[5]].y * B2; + + float idet = 1.f / (A1 * B2 - A2 * B1); + + float px = (C1 * B2 - C2 * B1) * idet; + float py = (A1 * C2 - A2 * C1) * idet; + + out[0] = px; + out[1] = py; + + out[2] = A1 * buf[2]; + out[3] = B1 * buf[2]; + + out[4] = A2 * buf[4]; + out[5] = B2 * buf[4]; + } + break; + case CV_CALIPERS_MAXHEIGHT: + { + out[0] = max_dist; + } + break; + } + + cvFree( &vect ); + cvFree( &inv_vect_length ); +} + + +CV_IMPL CvBox2D +cvMinAreaRect2( const CvArr* array, CvMemStorage* storage ) +{ + cv::Ptr temp_storage; + CvBox2D box; + cv::AutoBuffer _points; + CvPoint2D32f* points; + + memset(&box, 0, sizeof(box)); + + int i, n; + CvSeqReader reader; + CvContour contour_header; + CvSeqBlock block; + CvSeq* ptseq = (CvSeq*)array; + CvPoint2D32f out[3]; + + if( CV_IS_SEQ(ptseq) ) + { + if( !CV_IS_SEQ_POINT_SET(ptseq) && + (CV_SEQ_KIND(ptseq) != CV_SEQ_KIND_CURVE || + CV_SEQ_ELTYPE(ptseq) != CV_SEQ_ELTYPE_PPOINT )) + CV_Error( CV_StsUnsupportedFormat, + "Input sequence must consist of 2d points or pointers to 2d points" ); + if( !storage ) + storage = ptseq->storage; + } + else + { + ptseq = cvPointSeqFromMat( CV_SEQ_KIND_GENERIC, array, &contour_header, &block ); + } + + if( storage ) + { + temp_storage = cvCreateChildMemStorage( storage ); + } + else + { + temp_storage = cvCreateMemStorage(1 << 10); + } + + ptseq = cvConvexHull2( ptseq, temp_storage, CV_CLOCKWISE, 1 ); + n = ptseq->total; + + _points.allocate(n); + points = _points; + cvStartReadSeq( ptseq, &reader ); + + if( CV_SEQ_ELTYPE( ptseq ) == CV_32SC2 ) + { + for( i = 0; i < n; i++ ) + { + CvPoint pt; + CV_READ_SEQ_ELEM( pt, reader ); + points[i].x = (float)pt.x; + points[i].y = (float)pt.y; + } + } + else + { + for( i = 0; i < n; i++ ) + { + CV_READ_SEQ_ELEM( points[i], reader ); + } + } + + if( n > 2 ) + { + icvRotatingCalipers( points, n, CV_CALIPERS_MINAREARECT, (float*)out ); + box.center.x = out[0].x + (out[1].x + out[2].x)*0.5f; + box.center.y = out[0].y + (out[1].y + out[2].y)*0.5f; + box.size.width = (float)sqrt((double)out[1].x*out[1].x + (double)out[1].y*out[1].y); + box.size.height = (float)sqrt((double)out[2].x*out[2].x + (double)out[2].y*out[2].y); + box.angle = (float)atan2( (double)out[1].y, (double)out[1].x ); + } + else if( n == 2 ) + { + box.center.x = (points[0].x + points[1].x)*0.5f; + box.center.y = (points[0].y + points[1].y)*0.5f; + double dx = points[1].x - points[0].x; + double dy = points[1].y - points[0].y; + box.size.width = (float)sqrt(dx*dx + dy*dy); + box.size.height = 0; + box.angle = (float)atan2( dy, dx ); + } + else + { + if( n == 1 ) + box.center = points[0]; + } + + box.angle = (float)(box.angle*180/CV_PI); + return box; +} + diff --git a/opencv/imgproc/samplers.cpp b/opencv/imgproc/samplers.cpp new file mode 100644 index 0000000..636c9a9 --- /dev/null +++ b/opencv/imgproc/samplers.cpp @@ -0,0 +1,882 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +/**************************************************************************************\ +* line samplers * +\**************************************************************************************/ + +CV_IMPL int +cvSampleLine( const void* img, CvPoint pt1, CvPoint pt2, + void* _buffer, int connectivity ) +{ + int count = -1; + + int i, coi = 0, pix_size; + CvMat stub, *mat = cvGetMat( img, &stub, &coi ); + CvLineIterator iterator; + uchar* buffer = (uchar*)_buffer; + + if( coi != 0 ) + CV_Error( CV_BadCOI, "" ); + + if( !buffer ) + CV_Error( CV_StsNullPtr, "" ); + + count = cvInitLineIterator( mat, pt1, pt2, &iterator, connectivity ); + + pix_size = CV_ELEM_SIZE(mat->type); + for( i = 0; i < count; i++ ) + { + for( int j = 0; j < pix_size; j++ ) + buffer[j] = iterator.ptr[j]; + buffer += pix_size; + CV_NEXT_LINE_POINT( iterator ); + } + + return count; +} + + +static const void* +icvAdjustRect( const void* srcptr, int src_step, int pix_size, + CvSize src_size, CvSize win_size, + CvPoint ip, CvRect* pRect ) +{ + CvRect rect; + const char* src = (const char*)srcptr; + + if( ip.x >= 0 ) + { + src += ip.x*pix_size; + rect.x = 0; + } + else + { + rect.x = -ip.x; + if( rect.x > win_size.width ) + rect.x = win_size.width; + } + + if( ip.x + win_size.width < src_size.width ) + rect.width = win_size.width; + else + { + rect.width = src_size.width - ip.x - 1; + if( rect.width < 0 ) + { + src += rect.width*pix_size; + rect.width = 0; + } + assert( rect.width <= win_size.width ); + } + + if( ip.y >= 0 ) + { + src += ip.y * src_step; + rect.y = 0; + } + else + rect.y = -ip.y; + + if( ip.y + win_size.height < src_size.height ) + rect.height = win_size.height; + else + { + rect.height = src_size.height - ip.y - 1; + if( rect.height < 0 ) + { + src += rect.height*src_step; + rect.height = 0; + } + } + + *pRect = rect; + return src - rect.x*pix_size; +} + + +#define ICV_DEF_GET_RECT_SUB_PIX_FUNC( flavor, srctype, dsttype, worktype, \ + cast_macro, scale_macro, cast_macro2 )\ +CvStatus CV_STDCALL icvGetRectSubPix_##flavor##_C1R \ +( const srctype* src, int src_step, CvSize src_size, \ + dsttype* dst, int dst_step, CvSize win_size, CvPoint2D32f center ) \ +{ \ + CvPoint ip; \ + worktype a11, a12, a21, a22, b1, b2; \ + float a, b; \ + int i, j; \ + \ + center.x -= (win_size.width-1)*0.5f; \ + center.y -= (win_size.height-1)*0.5f; \ + \ + ip.x = cvFloor( center.x ); \ + ip.y = cvFloor( center.y ); \ + \ + a = center.x - ip.x; \ + b = center.y - ip.y; \ + a11 = scale_macro((1.f-a)*(1.f-b)); \ + a12 = scale_macro(a*(1.f-b)); \ + a21 = scale_macro((1.f-a)*b); \ + a22 = scale_macro(a*b); \ + b1 = scale_macro(1.f - b); \ + b2 = scale_macro(b); \ + \ + src_step /= sizeof(src[0]); \ + dst_step /= sizeof(dst[0]); \ + \ + if( 0 <= ip.x && ip.x + win_size.width < src_size.width && \ + 0 <= ip.y && ip.y + win_size.height < src_size.height ) \ + { \ + /* extracted rectangle is totally inside the image */ \ + src += ip.y * src_step + ip.x; \ + \ + for( i = 0; i < win_size.height; i++, src += src_step, \ + dst += dst_step ) \ + { \ + for( j = 0; j <= win_size.width - 2; j += 2 ) \ + { \ + worktype s0 = cast_macro(src[j])*a11 + \ + cast_macro(src[j+1])*a12 + \ + cast_macro(src[j+src_step])*a21 + \ + cast_macro(src[j+src_step+1])*a22; \ + worktype s1 = cast_macro(src[j+1])*a11 + \ + cast_macro(src[j+2])*a12 + \ + cast_macro(src[j+src_step+1])*a21 + \ + cast_macro(src[j+src_step+2])*a22; \ + \ + dst[j] = (dsttype)cast_macro2(s0); \ + dst[j+1] = (dsttype)cast_macro2(s1); \ + } \ + \ + for( ; j < win_size.width; j++ ) \ + { \ + worktype s0 = cast_macro(src[j])*a11 + \ + cast_macro(src[j+1])*a12 + \ + cast_macro(src[j+src_step])*a21 + \ + cast_macro(src[j+src_step+1])*a22; \ + \ + dst[j] = (dsttype)cast_macro2(s0); \ + } \ + } \ + } \ + else \ + { \ + CvRect r; \ + \ + src = (const srctype*)icvAdjustRect( src, src_step*sizeof(*src), \ + sizeof(*src), src_size, win_size,ip, &r); \ + \ + for( i = 0; i < win_size.height; i++, dst += dst_step ) \ + { \ + const srctype *src2 = src + src_step; \ + \ + if( i < r.y || i >= r.height ) \ + src2 -= src_step; \ + \ + for( j = 0; j < r.x; j++ ) \ + { \ + worktype s0 = cast_macro(src[r.x])*b1 + \ + cast_macro(src2[r.x])*b2; \ + \ + dst[j] = (dsttype)cast_macro2(s0); \ + } \ + \ + for( ; j < r.width; j++ ) \ + { \ + worktype s0 = cast_macro(src[j])*a11 + \ + cast_macro(src[j+1])*a12 + \ + cast_macro(src2[j])*a21 + \ + cast_macro(src2[j+1])*a22; \ + \ + dst[j] = (dsttype)cast_macro2(s0); \ + } \ + \ + for( ; j < win_size.width; j++ ) \ + { \ + worktype s0 = cast_macro(src[r.width])*b1 + \ + cast_macro(src2[r.width])*b2; \ + \ + dst[j] = (dsttype)cast_macro2(s0); \ + } \ + \ + if( i < r.height ) \ + src = src2; \ + } \ + } \ + \ + return CV_OK; \ +} + + +#define ICV_DEF_GET_RECT_SUB_PIX_FUNC_C3( flavor, srctype, dsttype, worktype, \ + cast_macro, scale_macro, mul_macro )\ +static CvStatus CV_STDCALL icvGetRectSubPix_##flavor##_C3R \ +( const srctype* src, int src_step, CvSize src_size, \ + dsttype* dst, int dst_step, CvSize win_size, CvPoint2D32f center ) \ +{ \ + CvPoint ip; \ + worktype a, b; \ + int i, j; \ + \ + center.x -= (win_size.width-1)*0.5f; \ + center.y -= (win_size.height-1)*0.5f; \ + \ + ip.x = cvFloor( center.x ); \ + ip.y = cvFloor( center.y ); \ + \ + a = scale_macro( center.x - ip.x ); \ + b = scale_macro( center.y - ip.y ); \ + \ + src_step /= sizeof( src[0] ); \ + dst_step /= sizeof( dst[0] ); \ + \ + if( 0 <= ip.x && ip.x + win_size.width < src_size.width && \ + 0 <= ip.y && ip.y + win_size.height < src_size.height ) \ + { \ + /* extracted rectangle is totally inside the image */ \ + src += ip.y * src_step + ip.x*3; \ + \ + for( i = 0; i < win_size.height; i++, src += src_step, \ + dst += dst_step ) \ + { \ + for( j = 0; j < win_size.width; j++ ) \ + { \ + worktype s0 = cast_macro(src[j*3]); \ + worktype s1 = cast_macro(src[j*3 + src_step]); \ + s0 += mul_macro( a, (cast_macro(src[j*3+3]) - s0)); \ + s1 += mul_macro( a, (cast_macro(src[j*3+3+src_step]) - s1));\ + dst[j*3] = (dsttype)(s0 + mul_macro( b, (s1 - s0))); \ + \ + s0 = cast_macro(src[j*3+1]); \ + s1 = cast_macro(src[j*3+1 + src_step]); \ + s0 += mul_macro( a, (cast_macro(src[j*3+4]) - s0)); \ + s1 += mul_macro( a, (cast_macro(src[j*3+4+src_step]) - s1));\ + dst[j*3+1] = (dsttype)(s0 + mul_macro( b, (s1 - s0))); \ + \ + s0 = cast_macro(src[j*3+2]); \ + s1 = cast_macro(src[j*3+2 + src_step]); \ + s0 += mul_macro( a, (cast_macro(src[j*3+5]) - s0)); \ + s1 += mul_macro( a, (cast_macro(src[j*3+5+src_step]) - s1));\ + dst[j*3+2] = (dsttype)(s0 + mul_macro( b, (s1 - s0))); \ + } \ + } \ + } \ + else \ + { \ + CvRect r; \ + \ + src = (const srctype*)icvAdjustRect( src, src_step*sizeof(*src), \ + sizeof(*src)*3, src_size, win_size, ip, &r ); \ + \ + for( i = 0; i < win_size.height; i++, dst += dst_step ) \ + { \ + const srctype *src2 = src + src_step; \ + \ + if( i < r.y || i >= r.height ) \ + src2 -= src_step; \ + \ + for( j = 0; j < r.x; j++ ) \ + { \ + worktype s0 = cast_macro(src[r.x*3]); \ + worktype s1 = cast_macro(src2[r.x*3]); \ + dst[j*3] = (dsttype)(s0 + mul_macro( b, (s1 - s0))); \ + \ + s0 = cast_macro(src[r.x*3+1]); \ + s1 = cast_macro(src2[r.x*3+1]); \ + dst[j*3+1] = (dsttype)(s0 + mul_macro( b, (s1 - s0))); \ + \ + s0 = cast_macro(src[r.x*3+2]); \ + s1 = cast_macro(src2[r.x*3+2]); \ + dst[j*3+2] = (dsttype)(s0 + mul_macro( b, (s1 - s0))); \ + } \ + \ + for( ; j < r.width; j++ ) \ + { \ + worktype s0 = cast_macro(src[j*3]); \ + worktype s1 = cast_macro(src2[j*3]); \ + s0 += mul_macro( a, (cast_macro(src[j*3 + 3]) - s0)); \ + s1 += mul_macro( a, (cast_macro(src2[j*3 + 3]) - s1)); \ + dst[j*3] = (dsttype)(s0 + mul_macro( b, (s1 - s0))); \ + \ + s0 = cast_macro(src[j*3+1]); \ + s1 = cast_macro(src2[j*3+1]); \ + s0 += mul_macro( a, (cast_macro(src[j*3 + 4]) - s0)); \ + s1 += mul_macro( a, (cast_macro(src2[j*3 + 4]) - s1)); \ + dst[j*3+1] = (dsttype)(s0 + mul_macro( b, (s1 - s0))); \ + \ + s0 = cast_macro(src[j*3+2]); \ + s1 = cast_macro(src2[j*3+2]); \ + s0 += mul_macro( a, (cast_macro(src[j*3 + 5]) - s0)); \ + s1 += mul_macro( a, (cast_macro(src2[j*3 + 5]) - s1)); \ + dst[j*3+2] = (dsttype)(s0 + mul_macro( b, (s1 - s0))); \ + } \ + \ + for( ; j < win_size.width; j++ ) \ + { \ + worktype s0 = cast_macro(src[r.width*3]); \ + worktype s1 = cast_macro(src2[r.width*3]); \ + dst[j*3] = (dsttype)(s0 + mul_macro( b, (s1 - s0))); \ + \ + s0 = cast_macro(src[r.width*3+1]); \ + s1 = cast_macro(src2[r.width*3+1]); \ + dst[j*3+1] = (dsttype)(s0 + mul_macro( b, (s1 - s0))); \ + \ + s0 = cast_macro(src[r.width*3+2]); \ + s1 = cast_macro(src2[r.width*3+2]); \ + dst[j*3+2] = (dsttype)(s0 + mul_macro( b, (s1 - s0))); \ + } \ + \ + if( i < r.height ) \ + src = src2; \ + } \ + } \ + \ + return CV_OK; \ +} + + + +CvStatus CV_STDCALL icvGetRectSubPix_8u32f_C1R +( const uchar* src, int src_step, CvSize src_size, + float* dst, int dst_step, CvSize win_size, CvPoint2D32f center ) +{ + CvPoint ip; + float a12, a22, b1, b2; + float a, b; + double s = 0; + int i, j; + + center.x -= (win_size.width-1)*0.5f; + center.y -= (win_size.height-1)*0.5f; + + ip.x = cvFloor( center.x ); + ip.y = cvFloor( center.y ); + + if( win_size.width <= 0 || win_size.height <= 0 ) + return CV_BADRANGE_ERR; + + a = center.x - ip.x; + b = center.y - ip.y; + a = MAX(a,0.0001f); + a12 = a*(1.f-b); + a22 = a*b; + b1 = 1.f - b; + b2 = b; + s = (1. - a)/a; + + src_step /= sizeof(src[0]); + dst_step /= sizeof(dst[0]); + + if( 0 <= ip.x && ip.x + win_size.width < src_size.width && + 0 <= ip.y && ip.y + win_size.height < src_size.height ) + { + // extracted rectangle is totally inside the image + src += ip.y * src_step + ip.x; + +#if 0 + if( icvCopySubpix_8u32f_C1R_p && + icvCopySubpix_8u32f_C1R_p( src, src_step, dst, + dst_step*sizeof(dst[0]), win_size, a, b ) >= 0 ) + return CV_OK; +#endif + + for( ; win_size.height--; src += src_step, dst += dst_step ) + { + float prev = (1 - a)*(b1*CV_8TO32F(src[0]) + b2*CV_8TO32F(src[src_step])); + for( j = 0; j < win_size.width; j++ ) + { + float t = a12*CV_8TO32F(src[j+1]) + a22*CV_8TO32F(src[j+1+src_step]); + dst[j] = prev + t; + prev = (float)(t*s); + } + } + } + else + { + CvRect r; + + src = (const uchar*)icvAdjustRect( src, src_step*sizeof(*src), + sizeof(*src), src_size, win_size,ip, &r); + + for( i = 0; i < win_size.height; i++, dst += dst_step ) + { + const uchar *src2 = src + src_step; + + if( i < r.y || i >= r.height ) + src2 -= src_step; + + for( j = 0; j < r.x; j++ ) + { + float s0 = CV_8TO32F(src[r.x])*b1 + + CV_8TO32F(src2[r.x])*b2; + + dst[j] = (float)(s0); + } + + if( j < r.width ) + { + float prev = (1 - a)*(b1*CV_8TO32F(src[j]) + b2*CV_8TO32F(src2[j])); + + for( ; j < r.width; j++ ) + { + float t = a12*CV_8TO32F(src[j+1]) + a22*CV_8TO32F(src2[j+1]); + dst[j] = prev + t; + prev = (float)(t*s); + } + } + + for( ; j < win_size.width; j++ ) + { + float s0 = CV_8TO32F(src[r.width])*b1 + + CV_8TO32F(src2[r.width])*b2; + + dst[j] = (float)(s0); + } + + if( i < r.height ) + src = src2; + } + } + + return CV_OK; +} + + + +#define ICV_SHIFT 16 +#define ICV_SCALE(x) cvRound((x)*(1 << ICV_SHIFT)) +#define ICV_MUL_SCALE(x,y) (((x)*(y) + (1 << (ICV_SHIFT-1))) >> ICV_SHIFT) +#define ICV_DESCALE(x) (((x)+(1 << (ICV_SHIFT-1))) >> ICV_SHIFT) + +/*icvCopySubpix_8u_C1R_t icvCopySubpix_8u_C1R_p = 0; +icvCopySubpix_8u32f_C1R_t icvCopySubpix_8u32f_C1R_p = 0; +icvCopySubpix_32f_C1R_t icvCopySubpix_32f_C1R_p = 0;*/ + +ICV_DEF_GET_RECT_SUB_PIX_FUNC( 8u, uchar, uchar, int, CV_NOP, ICV_SCALE, ICV_DESCALE ) +//ICV_DEF_GET_RECT_SUB_PIX_FUNC( 8u32f, uchar, float, float, CV_8TO32F, CV_NOP, CV_NOP ) +ICV_DEF_GET_RECT_SUB_PIX_FUNC( 32f, float, float, float, CV_NOP, CV_NOP, CV_NOP ) + +ICV_DEF_GET_RECT_SUB_PIX_FUNC_C3( 8u, uchar, uchar, int, CV_NOP, ICV_SCALE, ICV_MUL_SCALE ) +ICV_DEF_GET_RECT_SUB_PIX_FUNC_C3( 8u32f, uchar, float, float, CV_8TO32F, CV_NOP, CV_MUL ) +ICV_DEF_GET_RECT_SUB_PIX_FUNC_C3( 32f, float, float, float, CV_NOP, CV_NOP, CV_MUL ) + + +#define ICV_DEF_INIT_SUBPIX_TAB( FUNCNAME, FLAG ) \ +static void icvInit##FUNCNAME##FLAG##Table( CvFuncTable* tab ) \ +{ \ + tab->fn_2d[CV_8U] = (void*)icv##FUNCNAME##_8u_##FLAG; \ + tab->fn_2d[CV_32F] = (void*)icv##FUNCNAME##_32f_##FLAG; \ + \ + tab->fn_2d[1] = (void*)icv##FUNCNAME##_8u32f_##FLAG; \ +} + + +ICV_DEF_INIT_SUBPIX_TAB( GetRectSubPix, C1R ) +ICV_DEF_INIT_SUBPIX_TAB( GetRectSubPix, C3R ) + +typedef CvStatus (CV_STDCALL *CvGetRectSubPixFunc)( const void* src, int src_step, + CvSize src_size, void* dst, + int dst_step, CvSize win_size, + CvPoint2D32f center ); + +CV_IMPL void +cvGetRectSubPix( const void* srcarr, void* dstarr, CvPoint2D32f center ) +{ + static CvFuncTable gr_tab[2]; + static int inittab = 0; + + CvMat srcstub, *src = (CvMat*)srcarr; + CvMat dststub, *dst = (CvMat*)dstarr; + CvSize src_size, dst_size; + CvGetRectSubPixFunc func; + int cn, src_step, dst_step; + + if( !inittab ) + { + icvInitGetRectSubPixC1RTable( gr_tab + 0 ); + icvInitGetRectSubPixC3RTable( gr_tab + 1 ); + inittab = 1; + } + + if( !CV_IS_MAT(src)) + src = cvGetMat( src, &srcstub ); + + if( !CV_IS_MAT(dst)) + dst = cvGetMat( dst, &dststub ); + + cn = CV_MAT_CN( src->type ); + + if( (cn != 1 && cn != 3) || !CV_ARE_CNS_EQ( src, dst )) + CV_Error( CV_StsUnsupportedFormat, "" ); + + src_size = cvGetMatSize( src ); + dst_size = cvGetMatSize( dst ); + src_step = src->step ? src->step : CV_STUB_STEP; + dst_step = dst->step ? dst->step : CV_STUB_STEP; + + //if( dst_size.width > src_size.width || dst_size.height > src_size.height ) + // CV_ERROR( CV_StsBadSize, "destination ROI must be smaller than source ROI" ); + + if( CV_ARE_DEPTHS_EQ( src, dst )) + { + func = (CvGetRectSubPixFunc)(gr_tab[cn != 1].fn_2d[CV_MAT_DEPTH(src->type)]); + } + else + { + if( CV_MAT_DEPTH( src->type ) != CV_8U || CV_MAT_DEPTH( dst->type ) != CV_32F ) + CV_Error( CV_StsUnsupportedFormat, "" ); + + func = (CvGetRectSubPixFunc)(gr_tab[cn != 1].fn_2d[1]); + } + + if( !func ) + CV_Error( CV_StsUnsupportedFormat, "" ); + + IPPI_CALL( func( src->data.ptr, src_step, src_size, + dst->data.ptr, dst_step, dst_size, center )); +} + + +#define ICV_32F8U(x) ((uchar)cvRound(x)) + +#define ICV_DEF_GET_QUADRANGLE_SUB_PIX_FUNC( flavor, srctype, dsttype, \ + worktype, cast_macro, cvt ) \ +CvStatus CV_STDCALL \ +icvGetQuadrangleSubPix_##flavor##_C1R \ +( const srctype * src, int src_step, CvSize src_size, \ + dsttype *dst, int dst_step, CvSize win_size, const float *matrix ) \ +{ \ + int x, y; \ + double dx = (win_size.width - 1)*0.5; \ + double dy = (win_size.height - 1)*0.5; \ + double A11 = matrix[0], A12 = matrix[1], A13 = matrix[2]-A11*dx-A12*dy; \ + double A21 = matrix[3], A22 = matrix[4], A23 = matrix[5]-A21*dx-A22*dy; \ + \ + src_step /= sizeof(srctype); \ + dst_step /= sizeof(dsttype); \ + \ + for( y = 0; y < win_size.height; y++, dst += dst_step ) \ + { \ + double xs = A12*y + A13; \ + double ys = A22*y + A23; \ + double xe = A11*(win_size.width-1) + A12*y + A13; \ + double ye = A21*(win_size.width-1) + A22*y + A23; \ + \ + if( (unsigned)(cvFloor(xs)-1) < (unsigned)(src_size.width - 3) && \ + (unsigned)(cvFloor(ys)-1) < (unsigned)(src_size.height - 3) && \ + (unsigned)(cvFloor(xe)-1) < (unsigned)(src_size.width - 3) && \ + (unsigned)(cvFloor(ye)-1) < (unsigned)(src_size.height - 3)) \ + { \ + for( x = 0; x < win_size.width; x++ ) \ + { \ + int ixs = cvFloor( xs ); \ + int iys = cvFloor( ys ); \ + const srctype *ptr = src + src_step*iys + ixs; \ + double a = xs - ixs, b = ys - iys, a1 = 1.f - a; \ + worktype p0 = cvt(ptr[0])*a1 + cvt(ptr[1])*a; \ + worktype p1 = cvt(ptr[src_step])*a1 + cvt(ptr[src_step+1])*a;\ + xs += A11; \ + ys += A21; \ + \ + dst[x] = cast_macro(p0 + b * (p1 - p0)); \ + } \ + } \ + else \ + { \ + for( x = 0; x < win_size.width; x++ ) \ + { \ + int ixs = cvFloor( xs ), iys = cvFloor( ys ); \ + double a = xs - ixs, b = ys - iys, a1 = 1.f - a; \ + const srctype *ptr0, *ptr1; \ + worktype p0, p1; \ + xs += A11; ys += A21; \ + \ + if( (unsigned)iys < (unsigned)(src_size.height-1) ) \ + ptr0 = src + src_step*iys, ptr1 = ptr0 + src_step; \ + else \ + ptr0 = ptr1 = src + (iys < 0 ? 0 : src_size.height-1)*src_step; \ + \ + if( (unsigned)ixs < (unsigned)(src_size.width-1) ) \ + { \ + p0 = cvt(ptr0[ixs])*a1 + cvt(ptr0[ixs+1])*a; \ + p1 = cvt(ptr1[ixs])*a1 + cvt(ptr1[ixs+1])*a; \ + } \ + else \ + { \ + ixs = ixs < 0 ? 0 : src_size.width - 1; \ + p0 = cvt(ptr0[ixs]); p1 = cvt(ptr1[ixs]); \ + } \ + dst[x] = cast_macro(p0 + b * (p1 - p0)); \ + } \ + } \ + } \ + \ + return CV_OK; \ +} + + +#define ICV_DEF_GET_QUADRANGLE_SUB_PIX_FUNC_C3( flavor, srctype, dsttype, \ + worktype, cast_macro, cvt ) \ +static CvStatus CV_STDCALL \ +icvGetQuadrangleSubPix_##flavor##_C3R \ +( const srctype * src, int src_step, CvSize src_size, \ + dsttype *dst, int dst_step, CvSize win_size, const float *matrix ) \ +{ \ + int x, y; \ + double dx = (win_size.width - 1)*0.5; \ + double dy = (win_size.height - 1)*0.5; \ + double A11 = matrix[0], A12 = matrix[1], A13 = matrix[2]-A11*dx-A12*dy; \ + double A21 = matrix[3], A22 = matrix[4], A23 = matrix[5]-A21*dx-A22*dy; \ + \ + src_step /= sizeof(srctype); \ + dst_step /= sizeof(dsttype); \ + \ + for( y = 0; y < win_size.height; y++, dst += dst_step ) \ + { \ + double xs = A12*y + A13; \ + double ys = A22*y + A23; \ + double xe = A11*(win_size.width-1) + A12*y + A13; \ + double ye = A21*(win_size.width-1) + A22*y + A23; \ + \ + if( (unsigned)(cvFloor(xs)-1) < (unsigned)(src_size.width - 3) && \ + (unsigned)(cvFloor(ys)-1) < (unsigned)(src_size.height - 3) && \ + (unsigned)(cvFloor(xe)-1) < (unsigned)(src_size.width - 3) && \ + (unsigned)(cvFloor(ye)-1) < (unsigned)(src_size.height - 3)) \ + { \ + for( x = 0; x < win_size.width; x++ ) \ + { \ + int ixs = cvFloor( xs ); \ + int iys = cvFloor( ys ); \ + const srctype *ptr = src + src_step*iys + ixs*3; \ + double a = xs - ixs, b = ys - iys, a1 = 1.f - a; \ + worktype p0, p1; \ + xs += A11; \ + ys += A21; \ + \ + p0 = cvt(ptr[0])*a1 + cvt(ptr[3])*a; \ + p1 = cvt(ptr[src_step])*a1 + cvt(ptr[src_step+3])*a; \ + dst[x*3] = cast_macro(p0 + b * (p1 - p0)); \ + \ + p0 = cvt(ptr[1])*a1 + cvt(ptr[4])*a; \ + p1 = cvt(ptr[src_step+1])*a1 + cvt(ptr[src_step+4])*a; \ + dst[x*3+1] = cast_macro(p0 + b * (p1 - p0)); \ + \ + p0 = cvt(ptr[2])*a1 + cvt(ptr[5])*a; \ + p1 = cvt(ptr[src_step+2])*a1 + cvt(ptr[src_step+5])*a; \ + dst[x*3+2] = cast_macro(p0 + b * (p1 - p0)); \ + } \ + } \ + else \ + { \ + for( x = 0; x < win_size.width; x++ ) \ + { \ + int ixs = cvFloor(xs), iys = cvFloor(ys); \ + double a = xs - ixs, b = ys - iys; \ + const srctype *ptr0, *ptr1; \ + xs += A11; ys += A21; \ + \ + if( (unsigned)iys < (unsigned)(src_size.height-1) ) \ + ptr0 = src + src_step*iys, ptr1 = ptr0 + src_step; \ + else \ + ptr0 = ptr1 = src + (iys < 0 ? 0 : src_size.height-1)*src_step; \ + \ + if( (unsigned)ixs < (unsigned)(src_size.width - 1) ) \ + { \ + double a1 = 1.f - a; \ + worktype p0, p1; \ + ptr0 += ixs*3; ptr1 += ixs*3; \ + p0 = cvt(ptr0[0])*a1 + cvt(ptr0[3])*a; \ + p1 = cvt(ptr1[0])*a1 + cvt(ptr1[3])*a; \ + dst[x*3] = cast_macro(p0 + b * (p1 - p0)); \ + \ + p0 = cvt(ptr0[1])*a1 + cvt(ptr0[4])*a; \ + p1 = cvt(ptr1[1])*a1 + cvt(ptr1[4])*a; \ + dst[x*3+1] = cast_macro(p0 + b * (p1 - p0)); \ + \ + p0 = cvt(ptr0[2])*a1 + cvt(ptr0[5])*a; \ + p1 = cvt(ptr1[2])*a1 + cvt(ptr1[5])*a; \ + dst[x*3+2] = cast_macro(p0 + b * (p1 - p0)); \ + } \ + else \ + { \ + double b1 = 1.f - b; \ + ixs = ixs < 0 ? 0 : src_size.width - 1; \ + ptr0 += ixs*3; ptr1 += ixs*3; \ + \ + dst[x*3] = cast_macro(cvt(ptr0[0])*b1 + cvt(ptr1[0])*b);\ + dst[x*3+1]=cast_macro(cvt(ptr0[1])*b1 + cvt(ptr1[1])*b);\ + dst[x*3+2]=cast_macro(cvt(ptr0[2])*b1 + cvt(ptr1[2])*b);\ + } \ + } \ + } \ + } \ + \ + return CV_OK; \ +} + + +/*#define srctype uchar +#define dsttype uchar +#define worktype float +#define cvt CV_8TO32F +#define cast_macro ICV_32F8U + +#undef srctype +#undef dsttype +#undef worktype +#undef cvt +#undef cast_macro*/ + +ICV_DEF_GET_QUADRANGLE_SUB_PIX_FUNC( 8u, uchar, uchar, double, ICV_32F8U, CV_8TO32F ) +ICV_DEF_GET_QUADRANGLE_SUB_PIX_FUNC( 32f, float, float, double, CV_CAST_32F, CV_NOP ) +ICV_DEF_GET_QUADRANGLE_SUB_PIX_FUNC( 8u32f, uchar, float, double, CV_CAST_32F, CV_8TO32F ) + +ICV_DEF_GET_QUADRANGLE_SUB_PIX_FUNC_C3( 8u, uchar, uchar, double, ICV_32F8U, CV_8TO32F ) +ICV_DEF_GET_QUADRANGLE_SUB_PIX_FUNC_C3( 32f, float, float, double, CV_CAST_32F, CV_NOP ) +ICV_DEF_GET_QUADRANGLE_SUB_PIX_FUNC_C3( 8u32f, uchar, float, double, CV_CAST_32F, CV_8TO32F ) + +ICV_DEF_INIT_SUBPIX_TAB( GetQuadrangleSubPix, C1R ) +ICV_DEF_INIT_SUBPIX_TAB( GetQuadrangleSubPix, C3R ) + +typedef CvStatus (CV_STDCALL *CvGetQuadrangleSubPixFunc)( + const void* src, int src_step, + CvSize src_size, void* dst, + int dst_step, CvSize win_size, + const float* matrix ); + +CV_IMPL void +cvGetQuadrangleSubPix( const void* srcarr, void* dstarr, const CvMat* mat ) +{ + static CvFuncTable gq_tab[2]; + static int inittab = 0; + + CvMat srcstub, *src = (CvMat*)srcarr; + CvMat dststub, *dst = (CvMat*)dstarr; + CvSize src_size, dst_size; + CvGetQuadrangleSubPixFunc func; + float m[6]; + int k, cn; + + if( !inittab ) + { + icvInitGetQuadrangleSubPixC1RTable( gq_tab + 0 ); + icvInitGetQuadrangleSubPixC3RTable( gq_tab + 1 ); + inittab = 1; + } + + if( !CV_IS_MAT(src)) + src = cvGetMat( src, &srcstub ); + + if( !CV_IS_MAT(dst)) + dst = cvGetMat( dst, &dststub ); + + if( !CV_IS_MAT(mat)) + CV_Error( CV_StsBadArg, "map matrix is not valid" ); + + cn = CV_MAT_CN( src->type ); + + if( (cn != 1 && cn != 3) || !CV_ARE_CNS_EQ( src, dst )) + CV_Error( CV_StsUnsupportedFormat, "" ); + + src_size = cvGetMatSize( src ); + dst_size = cvGetMatSize( dst ); + + /*if( dst_size.width > src_size.width || dst_size.height > src_size.height ) + CV_ERROR( CV_StsBadSize, "destination ROI must not be larger than source ROI" );*/ + + if( mat->rows != 2 || mat->cols != 3 ) + CV_Error( CV_StsBadArg, + "Transformation matrix must be 2x3" ); + + if( CV_MAT_TYPE( mat->type ) == CV_32FC1 ) + { + for( k = 0; k < 3; k++ ) + { + m[k] = mat->data.fl[k]; + m[3 + k] = ((float*)(mat->data.ptr + mat->step))[k]; + } + } + else if( CV_MAT_TYPE( mat->type ) == CV_64FC1 ) + { + for( k = 0; k < 3; k++ ) + { + m[k] = (float)mat->data.db[k]; + m[3 + k] = (float)((double*)(mat->data.ptr + mat->step))[k]; + } + } + else + CV_Error( CV_StsUnsupportedFormat, + "The transformation matrix should have 32fC1 or 64fC1 type" ); + + if( CV_ARE_DEPTHS_EQ( src, dst )) + { + func = (CvGetQuadrangleSubPixFunc)(gq_tab[cn != 1].fn_2d[CV_MAT_DEPTH(src->type)]); + } + else + { + if( CV_MAT_DEPTH( src->type ) != CV_8U || CV_MAT_DEPTH( dst->type ) != CV_32F ) + CV_Error( CV_StsUnsupportedFormat, "" ); + + func = (CvGetQuadrangleSubPixFunc)(gq_tab[cn != 1].fn_2d[1]); + } + + if( !func ) + CV_Error( CV_StsUnsupportedFormat, "" ); + + IPPI_CALL( func( src->data.ptr, src->step, src_size, + dst->data.ptr, dst->step, dst_size, m )); +} + + +void cv::getRectSubPix( InputArray _image, Size patchSize, Point2f center, + OutputArray _patch, int patchType ) +{ + Mat image = _image.getMat(); + _patch.create(patchSize, patchType < 0 ? image.type() : + CV_MAKETYPE(CV_MAT_DEPTH(patchType),image.channels())); + Mat patch = _patch.getMat(); + CvMat _cimage = image, _cpatch = patch; + cvGetRectSubPix(&_cimage, &_cpatch, center); +} + +/* End of file. */ diff --git a/opencv/imgproc/segmentation.cpp b/opencv/imgproc/segmentation.cpp new file mode 100644 index 0000000..cb335e2 --- /dev/null +++ b/opencv/imgproc/segmentation.cpp @@ -0,0 +1,541 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +/****************************************************************************************\ +* Watershed * +\****************************************************************************************/ + +typedef struct CvWSNode +{ + struct CvWSNode* next; + int mask_ofs; + int img_ofs; +} +CvWSNode; + +typedef struct CvWSQueue +{ + CvWSNode* first; + CvWSNode* last; +} +CvWSQueue; + +static CvWSNode* +icvAllocWSNodes( CvMemStorage* storage ) +{ + CvWSNode* n = 0; + + int i, count = (storage->block_size - sizeof(CvMemBlock))/sizeof(*n) - 1; + + n = (CvWSNode*)cvMemStorageAlloc( storage, count*sizeof(*n) ); + for( i = 0; i < count-1; i++ ) + n[i].next = n + i + 1; + n[count-1].next = 0; + + return n; +} + + +CV_IMPL void +cvWatershed( const CvArr* srcarr, CvArr* dstarr ) +{ + const int IN_QUEUE = -2; + const int WSHED = -1; + const int NQ = 256; + cv::Ptr storage; + + CvMat sstub, *src; + CvMat dstub, *dst; + CvSize size; + CvWSNode* free_node = 0, *node; + CvWSQueue q[NQ]; + int active_queue; + int i, j; + int db, dg, dr; + int* mask; + uchar* img; + int mstep, istep; + int subs_tab[513]; + + // MAX(a,b) = b + MAX(a-b,0) + #define ws_max(a,b) ((b) + subs_tab[(a)-(b)+NQ]) + // MIN(a,b) = a - MAX(a-b,0) + #define ws_min(a,b) ((a) - subs_tab[(a)-(b)+NQ]) + + #define ws_push(idx,mofs,iofs) \ + { \ + if( !free_node ) \ + free_node = icvAllocWSNodes( storage );\ + node = free_node; \ + free_node = free_node->next;\ + node->next = 0; \ + node->mask_ofs = mofs; \ + node->img_ofs = iofs; \ + if( q[idx].last ) \ + q[idx].last->next=node; \ + else \ + q[idx].first = node; \ + q[idx].last = node; \ + } + + #define ws_pop(idx,mofs,iofs) \ + { \ + node = q[idx].first; \ + q[idx].first = node->next; \ + if( !node->next ) \ + q[idx].last = 0; \ + node->next = free_node; \ + free_node = node; \ + mofs = node->mask_ofs; \ + iofs = node->img_ofs; \ + } + + #define c_diff(ptr1,ptr2,diff) \ + { \ + db = abs((ptr1)[0] - (ptr2)[0]);\ + dg = abs((ptr1)[1] - (ptr2)[1]);\ + dr = abs((ptr1)[2] - (ptr2)[2]);\ + diff = ws_max(db,dg); \ + diff = ws_max(diff,dr); \ + assert( 0 <= diff && diff <= 255 ); \ + } + + src = cvGetMat( srcarr, &sstub ); + dst = cvGetMat( dstarr, &dstub ); + + if( CV_MAT_TYPE(src->type) != CV_8UC3 ) + CV_Error( CV_StsUnsupportedFormat, "Only 8-bit, 3-channel input images are supported" ); + + if( CV_MAT_TYPE(dst->type) != CV_32SC1 ) + CV_Error( CV_StsUnsupportedFormat, + "Only 32-bit, 1-channel output images are supported" ); + + if( !CV_ARE_SIZES_EQ( src, dst )) + CV_Error( CV_StsUnmatchedSizes, "The input and output images must have the same size" ); + + size = cvGetMatSize(src); + storage = cvCreateMemStorage(); + + istep = src->step; + img = src->data.ptr; + mstep = dst->step / sizeof(mask[0]); + mask = dst->data.i; + + memset( q, 0, NQ*sizeof(q[0]) ); + + for( i = 0; i < 256; i++ ) + subs_tab[i] = 0; + for( i = 256; i <= 512; i++ ) + subs_tab[i] = i - 256; + + // draw a pixel-wide border of dummy "watershed" (i.e. boundary) pixels + for( j = 0; j < size.width; j++ ) + mask[j] = mask[j + mstep*(size.height-1)] = WSHED; + + // initial phase: put all the neighbor pixels of each marker to the ordered queue - + // determine the initial boundaries of the basins + for( i = 1; i < size.height-1; i++ ) + { + img += istep; mask += mstep; + mask[0] = mask[size.width-1] = WSHED; + + for( j = 1; j < size.width-1; j++ ) + { + int* m = mask + j; + if( m[0] < 0 ) m[0] = 0; + if( m[0] == 0 && (m[-1] > 0 || m[1] > 0 || m[-mstep] > 0 || m[mstep] > 0) ) + { + uchar* ptr = img + j*3; + int idx = 256, t; + if( m[-1] > 0 ) + c_diff( ptr, ptr - 3, idx ); + if( m[1] > 0 ) + { + c_diff( ptr, ptr + 3, t ); + idx = ws_min( idx, t ); + } + if( m[-mstep] > 0 ) + { + c_diff( ptr, ptr - istep, t ); + idx = ws_min( idx, t ); + } + if( m[mstep] > 0 ) + { + c_diff( ptr, ptr + istep, t ); + idx = ws_min( idx, t ); + } + assert( 0 <= idx && idx <= 255 ); + ws_push( idx, i*mstep + j, i*istep + j*3 ); + m[0] = IN_QUEUE; + } + } + } + + // find the first non-empty queue + for( i = 0; i < NQ; i++ ) + if( q[i].first ) + break; + + // if there is no markers, exit immediately + if( i == NQ ) + return; + + active_queue = i; + img = src->data.ptr; + mask = dst->data.i; + + // recursively fill the basins + for(;;) + { + int mofs, iofs; + int lab = 0, t; + int* m; + uchar* ptr; + + if( q[active_queue].first == 0 ) + { + for( i = active_queue+1; i < NQ; i++ ) + if( q[i].first ) + break; + if( i == NQ ) + break; + active_queue = i; + } + + ws_pop( active_queue, mofs, iofs ); + + m = mask + mofs; + ptr = img + iofs; + t = m[-1]; + if( t > 0 ) lab = t; + t = m[1]; + if( t > 0 ) + { + if( lab == 0 ) lab = t; + else if( t != lab ) lab = WSHED; + } + t = m[-mstep]; + if( t > 0 ) + { + if( lab == 0 ) lab = t; + else if( t != lab ) lab = WSHED; + } + t = m[mstep]; + if( t > 0 ) + { + if( lab == 0 ) lab = t; + else if( t != lab ) lab = WSHED; + } + assert( lab != 0 ); + m[0] = lab; + if( lab == WSHED ) + continue; + + if( m[-1] == 0 ) + { + c_diff( ptr, ptr - 3, t ); + ws_push( t, mofs - 1, iofs - 3 ); + active_queue = ws_min( active_queue, t ); + m[-1] = IN_QUEUE; + } + if( m[1] == 0 ) + { + c_diff( ptr, ptr + 3, t ); + ws_push( t, mofs + 1, iofs + 3 ); + active_queue = ws_min( active_queue, t ); + m[1] = IN_QUEUE; + } + if( m[-mstep] == 0 ) + { + c_diff( ptr, ptr - istep, t ); + ws_push( t, mofs - mstep, iofs - istep ); + active_queue = ws_min( active_queue, t ); + m[-mstep] = IN_QUEUE; + } + if( m[mstep] == 0 ) + { + c_diff( ptr, ptr + istep, t ); + ws_push( t, mofs + mstep, iofs + istep ); + active_queue = ws_min( active_queue, t ); + m[mstep] = IN_QUEUE; + } + } +} + + +void cv::watershed( InputArray _src, InputOutputArray markers ) +{ + Mat src = _src.getMat(); + CvMat c_src = _src.getMat(), c_markers = markers.getMat(); + cvWatershed( &c_src, &c_markers ); +} + + +/****************************************************************************************\ +* Meanshift * +\****************************************************************************************/ + +CV_IMPL void +cvPyrMeanShiftFiltering( const CvArr* srcarr, CvArr* dstarr, + double sp0, double sr, int max_level, + CvTermCriteria termcrit ) +{ + const int cn = 3; + const int MAX_LEVELS = 8; + cv::Mat* src_pyramid = new cv::Mat[MAX_LEVELS+1]; + cv::Mat* dst_pyramid = new cv::Mat[MAX_LEVELS+1]; + cv::Mat mask0; + int i, j, level; + //uchar* submask = 0; + + #define cdiff(ofs0) (tab[c0-dptr[ofs0]+255] + \ + tab[c1-dptr[(ofs0)+1]+255] + tab[c2-dptr[(ofs0)+2]+255] >= isr22) + + double sr2 = sr * sr; + int isr2 = cvRound(sr2), isr22 = MAX(isr2,16); + int tab[768]; + cv::Mat src0 = cv::cvarrToMat(srcarr); + cv::Mat dst0 = cv::cvarrToMat(dstarr); + + if( src0.type() != CV_8UC3 ) + CV_Error( CV_StsUnsupportedFormat, "Only 8-bit, 3-channel images are supported" ); + + if( src0.type() != dst0.type() ) + CV_Error( CV_StsUnmatchedFormats, "The input and output images must have the same type" ); + + if( src0.size() != dst0.size() ) + CV_Error( CV_StsUnmatchedSizes, "The input and output images must have the same size" ); + + if( (unsigned)max_level > (unsigned)MAX_LEVELS ) + CV_Error( CV_StsOutOfRange, "The number of pyramid levels is too large or negative" ); + + if( !(termcrit.type & CV_TERMCRIT_ITER) ) + termcrit.max_iter = 5; + termcrit.max_iter = MAX(termcrit.max_iter,1); + termcrit.max_iter = MIN(termcrit.max_iter,100); + if( !(termcrit.type & CV_TERMCRIT_EPS) ) + termcrit.epsilon = 1.f; + termcrit.epsilon = MAX(termcrit.epsilon, 0.f); + + for( i = 0; i < 768; i++ ) + tab[i] = (i - 255)*(i - 255); + + // 1. construct pyramid + src_pyramid[0] = src0; + dst_pyramid[0] = dst0; + for( level = 1; level <= max_level; level++ ) + { + src_pyramid[level].create( (src_pyramid[level-1].rows+1)/2, + (src_pyramid[level-1].cols+1)/2, src_pyramid[level-1].type() ); + dst_pyramid[level].create( src_pyramid[level].rows, + src_pyramid[level].cols, src_pyramid[level].type() ); + cv::pyrDown( src_pyramid[level-1], src_pyramid[level], src_pyramid[level].size() ); + //CV_CALL( cvResize( src_pyramid[level-1], src_pyramid[level], CV_INTER_AREA )); + } + + mask0.create(src0.rows, src0.cols, CV_8UC1); + //CV_CALL( submask = (uchar*)cvAlloc( (sp+2)*(sp+2) )); + + // 2. apply meanshift, starting from the pyramid top (i.e. the smallest layer) + for( level = max_level; level >= 0; level-- ) + { + cv::Mat src = src_pyramid[level]; + cv::Size size = src.size(); + uchar* sptr = src.data; + int sstep = (int)src.step; + uchar* mask = 0; + int mstep = 0; + uchar* dptr; + int dstep; + float sp = (float)(sp0 / (1 << level)); + sp = MAX( sp, 1 ); + + if( level < max_level ) + { + cv::Size size1 = dst_pyramid[level+1].size(); + cv::Mat m( size.height, size.width, CV_8UC1, mask0.data ); + dstep = (int)dst_pyramid[level+1].step; + dptr = dst_pyramid[level+1].data + dstep + cn; + mstep = (int)m.step; + mask = m.data + mstep; + //cvResize( dst_pyramid[level+1], dst_pyramid[level], CV_INTER_CUBIC ); + cv::pyrUp( dst_pyramid[level+1], dst_pyramid[level], dst_pyramid[level].size() ); + m.setTo(cv::Scalar::all(0)); + + for( i = 1; i < size1.height-1; i++, dptr += dstep - (size1.width-2)*3, mask += mstep*2 ) + { + for( j = 1; j < size1.width-1; j++, dptr += cn ) + { + int c0 = dptr[0], c1 = dptr[1], c2 = dptr[2]; + mask[j*2 - 1] = cdiff(-3) || cdiff(3) || cdiff(-dstep-3) || cdiff(-dstep) || + cdiff(-dstep+3) || cdiff(dstep-3) || cdiff(dstep) || cdiff(dstep+3); + } + } + + cv::dilate( m, m, cv::Mat() ); + mask = m.data; + } + + dptr = dst_pyramid[level].data; + dstep = (int)dst_pyramid[level].step; + + for( i = 0; i < size.height; i++, sptr += sstep - size.width*3, + dptr += dstep - size.width*3, + mask += mstep ) + { + for( j = 0; j < size.width; j++, sptr += 3, dptr += 3 ) + { + int x0 = j, y0 = i, x1, y1, iter; + int c0, c1, c2; + + if( mask && !mask[j] ) + continue; + + c0 = sptr[0], c1 = sptr[1], c2 = sptr[2]; + + // iterate meanshift procedure + for( iter = 0; iter < termcrit.max_iter; iter++ ) + { + uchar* ptr; + int x, y, count = 0; + int minx, miny, maxx, maxy; + int s0 = 0, s1 = 0, s2 = 0, sx = 0, sy = 0; + double icount; + int stop_flag; + + //mean shift: process pixels in window (p-sigmaSp)x(p+sigmaSp) + minx = cvRound(x0 - sp); minx = MAX(minx, 0); + miny = cvRound(y0 - sp); miny = MAX(miny, 0); + maxx = cvRound(x0 + sp); maxx = MIN(maxx, size.width-1); + maxy = cvRound(y0 + sp); maxy = MIN(maxy, size.height-1); + ptr = sptr + (miny - i)*sstep + (minx - j)*3; + + for( y = miny; y <= maxy; y++, ptr += sstep - (maxx-minx+1)*3 ) + { + int row_count = 0; + x = minx; + for( ; x + 3 <= maxx; x += 4, ptr += 12 ) + { + int t0 = ptr[0], t1 = ptr[1], t2 = ptr[2]; + if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) + { + s0 += t0; s1 += t1; s2 += t2; + sx += x; row_count++; + } + t0 = ptr[3], t1 = ptr[4], t2 = ptr[5]; + if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) + { + s0 += t0; s1 += t1; s2 += t2; + sx += x+1; row_count++; + } + t0 = ptr[6], t1 = ptr[7], t2 = ptr[8]; + if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) + { + s0 += t0; s1 += t1; s2 += t2; + sx += x+2; row_count++; + } + t0 = ptr[9], t1 = ptr[10], t2 = ptr[11]; + if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) + { + s0 += t0; s1 += t1; s2 += t2; + sx += x+3; row_count++; + } + } + + for( ; x <= maxx; x++, ptr += 3 ) + { + int t0 = ptr[0], t1 = ptr[1], t2 = ptr[2]; + if( tab[t0-c0+255] + tab[t1-c1+255] + tab[t2-c2+255] <= isr2 ) + { + s0 += t0; s1 += t1; s2 += t2; + sx += x; row_count++; + } + } + count += row_count; + sy += y*row_count; + } + + if( count == 0 ) + break; + + icount = 1./count; + x1 = cvRound(sx*icount); + y1 = cvRound(sy*icount); + s0 = cvRound(s0*icount); + s1 = cvRound(s1*icount); + s2 = cvRound(s2*icount); + + stop_flag = (x0 == x1 && y0 == y1) || abs(x1-x0) + abs(y1-y0) + + tab[s0 - c0 + 255] + tab[s1 - c1 + 255] + + tab[s2 - c2 + 255] <= termcrit.epsilon; + + x0 = x1; y0 = y1; + c0 = s0; c1 = s1; c2 = s2; + + if( stop_flag ) + break; + } + + dptr[0] = (uchar)c0; + dptr[1] = (uchar)c1; + dptr[2] = (uchar)c2; + } + } + } + delete[] src_pyramid; + delete[] dst_pyramid; +} + +void cv::pyrMeanShiftFiltering( InputArray _src, OutputArray _dst, + double sp, double sr, int maxLevel, + TermCriteria termcrit ) +{ + Mat src = _src.getMat(); + + if( src.empty() ) + return; + + _dst.create( src.size(), src.type() ); + CvMat c_src = src, c_dst = _dst.getMat(); + cvPyrMeanShiftFiltering( &c_src, &c_dst, sp, sr, maxLevel, termcrit ); +} diff --git a/opencv/imgproc/shapedescr.cpp b/opencv/imgproc/shapedescr.cpp new file mode 100644 index 0000000..8af036d --- /dev/null +++ b/opencv/imgproc/shapedescr.cpp @@ -0,0 +1,1306 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" + +/* calculates length of a curve (e.g. contour perimeter) */ +CV_IMPL double +cvArcLength( const void *array, CvSlice slice, int is_closed ) +{ + double perimeter = 0; + + int i, j = 0, count; + const int N = 16; + float buf[N]; + CvMat buffer = cvMat( 1, N, CV_32F, buf ); + CvSeqReader reader; + CvContour contour_header; + CvSeq* contour = 0; + CvSeqBlock block; + + if( CV_IS_SEQ( array )) + { + contour = (CvSeq*)array; + if( !CV_IS_SEQ_POLYLINE( contour )) + CV_Error( CV_StsBadArg, "Unsupported sequence type" ); + if( is_closed < 0 ) + is_closed = CV_IS_SEQ_CLOSED( contour ); + } + else + { + is_closed = is_closed > 0; + contour = cvPointSeqFromMat( + CV_SEQ_KIND_CURVE | (is_closed ? CV_SEQ_FLAG_CLOSED : 0), + array, &contour_header, &block ); + } + + if( contour->total > 1 ) + { + int is_float = CV_SEQ_ELTYPE( contour ) == CV_32FC2; + + cvStartReadSeq( contour, &reader, 0 ); + cvSetSeqReaderPos( &reader, slice.start_index ); + count = cvSliceLength( slice, contour ); + + count -= !is_closed && count == contour->total; + + /* scroll the reader by 1 point */ + reader.prev_elem = reader.ptr; + CV_NEXT_SEQ_ELEM( sizeof(CvPoint), reader ); + + for( i = 0; i < count; i++ ) + { + float dx, dy; + + if( !is_float ) + { + CvPoint* pt = (CvPoint*)reader.ptr; + CvPoint* prev_pt = (CvPoint*)reader.prev_elem; + + dx = (float)pt->x - (float)prev_pt->x; + dy = (float)pt->y - (float)prev_pt->y; + } + else + { + CvPoint2D32f* pt = (CvPoint2D32f*)reader.ptr; + CvPoint2D32f* prev_pt = (CvPoint2D32f*)reader.prev_elem; + + dx = pt->x - prev_pt->x; + dy = pt->y - prev_pt->y; + } + + reader.prev_elem = reader.ptr; + CV_NEXT_SEQ_ELEM( contour->elem_size, reader ); + + buffer.data.fl[j] = dx * dx + dy * dy; + if( ++j == N || i == count - 1 ) + { + buffer.cols = j; + cvPow( &buffer, &buffer, 0.5 ); + for( ; j > 0; j-- ) + perimeter += buffer.data.fl[j-1]; + } + } + } + + return perimeter; +} + + +static CvStatus +icvFindCircle( CvPoint2D32f pt0, CvPoint2D32f pt1, + CvPoint2D32f pt2, CvPoint2D32f * center, float *radius ) +{ + double x1 = (pt0.x + pt1.x) * 0.5; + double dy1 = pt0.x - pt1.x; + double x2 = (pt1.x + pt2.x) * 0.5; + double dy2 = pt1.x - pt2.x; + double y1 = (pt0.y + pt1.y) * 0.5; + double dx1 = pt1.y - pt0.y; + double y2 = (pt1.y + pt2.y) * 0.5; + double dx2 = pt2.y - pt1.y; + double t = 0; + + CvStatus result = CV_OK; + + if( icvIntersectLines( x1, dx1, y1, dy1, x2, dx2, y2, dy2, &t ) >= 0 ) + { + center->x = (float) (x2 + dx2 * t); + center->y = (float) (y2 + dy2 * t); + *radius = (float) icvDistanceL2_32f( *center, pt0 ); + } + else + { + center->x = center->y = 0.f; + radius = 0; + result = CV_NOTDEFINED_ERR; + } + + return result; +} + + +CV_INLINE double icvIsPtInCircle( CvPoint2D32f pt, CvPoint2D32f center, float radius ) +{ + double dx = pt.x - center.x; + double dy = pt.y - center.y; + return (double)radius*radius - dx*dx - dy*dy; +} + + +static int +icvFindEnslosingCicle4pts_32f( CvPoint2D32f * pts, CvPoint2D32f * _center, float *_radius ) +{ + int shuffles[4][4] = { {0, 1, 2, 3}, {0, 1, 3, 2}, {2, 3, 0, 1}, {2, 3, 1, 0} }; + + int idxs[4] = { 0, 1, 2, 3 }; + int i, j, k = 1, mi = 0; + float max_dist = 0; + CvPoint2D32f center; + CvPoint2D32f min_center; + float radius, min_radius = FLT_MAX; + CvPoint2D32f res_pts[4]; + + center = min_center = pts[0]; + radius = 1.f; + + for( i = 0; i < 4; i++ ) + for( j = i + 1; j < 4; j++ ) + { + float dist = icvDistanceL2_32f( pts[i], pts[j] ); + + if( max_dist < dist ) + { + max_dist = dist; + idxs[0] = i; + idxs[1] = j; + } + } + + if( max_dist == 0 ) + goto function_exit; + + k = 2; + for( i = 0; i < 4; i++ ) + { + for( j = 0; j < k; j++ ) + if( i == idxs[j] ) + break; + if( j == k ) + idxs[k++] = i; + } + + center = cvPoint2D32f( (pts[idxs[0]].x + pts[idxs[1]].x)*0.5f, + (pts[idxs[0]].y + pts[idxs[1]].y)*0.5f ); + radius = (float)(icvDistanceL2_32f( pts[idxs[0]], center )*1.03); + if( radius < 1.f ) + radius = 1.f; + + if( icvIsPtInCircle( pts[idxs[2]], center, radius ) >= 0 && + icvIsPtInCircle( pts[idxs[3]], center, radius ) >= 0 ) + { + k = 2; //rand()%2+2; + } + else + { + mi = -1; + for( i = 0; i < 4; i++ ) + { + if( icvFindCircle( pts[shuffles[i][0]], pts[shuffles[i][1]], + pts[shuffles[i][2]], ¢er, &radius ) >= 0 ) + { + radius *= 1.03f; + if( radius < 2.f ) + radius = 2.f; + + if( icvIsPtInCircle( pts[shuffles[i][3]], center, radius ) >= 0 && + min_radius > radius ) + { + min_radius = radius; + min_center = center; + mi = i; + } + } + } + assert( mi >= 0 ); + if( mi < 0 ) + mi = 0; + k = 3; + center = min_center; + radius = min_radius; + for( i = 0; i < 4; i++ ) + idxs[i] = shuffles[mi][i]; + } + + function_exit: + + *_center = center; + *_radius = radius; + + /* reorder output points */ + for( i = 0; i < 4; i++ ) + res_pts[i] = pts[idxs[i]]; + + for( i = 0; i < 4; i++ ) + { + pts[i] = res_pts[i]; + assert( icvIsPtInCircle( pts[i], center, radius ) >= 0 ); + } + + return k; +} + + +CV_IMPL int +cvMinEnclosingCircle( const void* array, CvPoint2D32f * _center, float *_radius ) +{ + const int max_iters = 100; + const float eps = FLT_EPSILON*2; + CvPoint2D32f center = { 0, 0 }; + float radius = 0; + int result = 0; + + if( _center ) + _center->x = _center->y = 0.f; + if( _radius ) + *_radius = 0; + + CvSeqReader reader; + int i, k, count; + CvPoint2D32f pts[8]; + CvContour contour_header; + CvSeqBlock block; + CvSeq* sequence = 0; + int is_float; + + if( !_center || !_radius ) + CV_Error( CV_StsNullPtr, "Null center or radius pointers" ); + + if( CV_IS_SEQ(array) ) + { + sequence = (CvSeq*)array; + if( !CV_IS_SEQ_POINT_SET( sequence )) + CV_Error( CV_StsBadArg, "The passed sequence is not a valid contour" ); + } + else + { + sequence = cvPointSeqFromMat( + CV_SEQ_KIND_GENERIC, array, &contour_header, &block ); + } + + if( sequence->total <= 0 ) + CV_Error( CV_StsBadSize, "" ); + + cvStartReadSeq( sequence, &reader, 0 ); + + count = sequence->total; + is_float = CV_SEQ_ELTYPE(sequence) == CV_32FC2; + + if( !is_float ) + { + CvPoint *pt_left, *pt_right, *pt_top, *pt_bottom; + CvPoint pt; + pt_left = pt_right = pt_top = pt_bottom = (CvPoint *)(reader.ptr); + CV_READ_SEQ_ELEM( pt, reader ); + + for( i = 1; i < count; i++ ) + { + CvPoint* pt_ptr = (CvPoint*)reader.ptr; + CV_READ_SEQ_ELEM( pt, reader ); + + if( pt.x < pt_left->x ) + pt_left = pt_ptr; + if( pt.x > pt_right->x ) + pt_right = pt_ptr; + if( pt.y < pt_top->y ) + pt_top = pt_ptr; + if( pt.y > pt_bottom->y ) + pt_bottom = pt_ptr; + } + + pts[0] = cvPointTo32f( *pt_left ); + pts[1] = cvPointTo32f( *pt_right ); + pts[2] = cvPointTo32f( *pt_top ); + pts[3] = cvPointTo32f( *pt_bottom ); + } + else + { + CvPoint2D32f *pt_left, *pt_right, *pt_top, *pt_bottom; + CvPoint2D32f pt; + pt_left = pt_right = pt_top = pt_bottom = (CvPoint2D32f *) (reader.ptr); + CV_READ_SEQ_ELEM( pt, reader ); + + for( i = 1; i < count; i++ ) + { + CvPoint2D32f* pt_ptr = (CvPoint2D32f*)reader.ptr; + CV_READ_SEQ_ELEM( pt, reader ); + + if( pt.x < pt_left->x ) + pt_left = pt_ptr; + if( pt.x > pt_right->x ) + pt_right = pt_ptr; + if( pt.y < pt_top->y ) + pt_top = pt_ptr; + if( pt.y > pt_bottom->y ) + pt_bottom = pt_ptr; + } + + pts[0] = *pt_left; + pts[1] = *pt_right; + pts[2] = *pt_top; + pts[3] = *pt_bottom; + } + + for( k = 0; k < max_iters; k++ ) + { + double min_delta = 0, delta; + CvPoint2D32f ptfl; + + icvFindEnslosingCicle4pts_32f( pts, ¢er, &radius ); + cvStartReadSeq( sequence, &reader, 0 ); + + for( i = 0; i < count; i++ ) + { + if( !is_float ) + { + ptfl.x = (float)((CvPoint*)reader.ptr)->x; + ptfl.y = (float)((CvPoint*)reader.ptr)->y; + } + else + { + ptfl = *(CvPoint2D32f*)reader.ptr; + } + CV_NEXT_SEQ_ELEM( sequence->elem_size, reader ); + + delta = icvIsPtInCircle( ptfl, center, radius ); + if( delta < min_delta ) + { + min_delta = delta; + pts[3] = ptfl; + } + } + result = min_delta >= 0; + if( result ) + break; + } + + if( !result ) + { + cvStartReadSeq( sequence, &reader, 0 ); + radius = 0.f; + + for( i = 0; i < count; i++ ) + { + CvPoint2D32f ptfl; + float t, dx, dy; + + if( !is_float ) + { + ptfl.x = (float)((CvPoint*)reader.ptr)->x; + ptfl.y = (float)((CvPoint*)reader.ptr)->y; + } + else + { + ptfl = *(CvPoint2D32f*)reader.ptr; + } + + CV_NEXT_SEQ_ELEM( sequence->elem_size, reader ); + dx = center.x - ptfl.x; + dy = center.y - ptfl.y; + t = dx*dx + dy*dy; + radius = MAX(radius,t); + } + + radius = (float)(sqrt(radius)*(1 + eps)); + result = 1; + } + + *_center = center; + *_radius = radius; + + return result; +} + + +/* area of a whole sequence */ +static CvStatus +icvContourArea( const CvSeq* contour, double *area ) +{ + if( contour->total ) + { + CvSeqReader reader; + int lpt = contour->total; + double a00 = 0, xi_1, yi_1; + int is_float = CV_SEQ_ELTYPE(contour) == CV_32FC2; + + cvStartReadSeq( contour, &reader, 0 ); + + if( !is_float ) + { + xi_1 = ((CvPoint*)(reader.ptr))->x; + yi_1 = ((CvPoint*)(reader.ptr))->y; + } + else + { + xi_1 = ((CvPoint2D32f*)(reader.ptr))->x; + yi_1 = ((CvPoint2D32f*)(reader.ptr))->y; + } + CV_NEXT_SEQ_ELEM( contour->elem_size, reader ); + + while( lpt-- > 0 ) + { + double dxy, xi, yi; + + if( !is_float ) + { + xi = ((CvPoint*)(reader.ptr))->x; + yi = ((CvPoint*)(reader.ptr))->y; + } + else + { + xi = ((CvPoint2D32f*)(reader.ptr))->x; + yi = ((CvPoint2D32f*)(reader.ptr))->y; + } + CV_NEXT_SEQ_ELEM( contour->elem_size, reader ); + + dxy = xi_1 * yi - xi * yi_1; + a00 += dxy; + xi_1 = xi; + yi_1 = yi; + } + + *area = a00 * 0.5; + } + else + *area = 0; + + return CV_OK; +} + + +/****************************************************************************************\ + + copy data from one buffer to other buffer + +\****************************************************************************************/ + +static CvStatus +icvMemCopy( double **buf1, double **buf2, double **buf3, int *b_max ) +{ + int bb; + + if( (*buf1 == NULL && *buf2 == NULL) || *buf3 == NULL ) + return CV_NULLPTR_ERR; + + bb = *b_max; + if( *buf2 == NULL ) + { + *b_max = 2 * (*b_max); + *buf2 = (double *)cvAlloc( (*b_max) * sizeof( double )); + + if( *buf2 == NULL ) + return CV_OUTOFMEM_ERR; + + memcpy( *buf2, *buf3, bb * sizeof( double )); + + *buf3 = *buf2; + cvFree( buf1 ); + *buf1 = NULL; + } + else + { + *b_max = 2 * (*b_max); + *buf1 = (double *) cvAlloc( (*b_max) * sizeof( double )); + + if( *buf1 == NULL ) + return CV_OUTOFMEM_ERR; + + memcpy( *buf1, *buf3, bb * sizeof( double )); + + *buf3 = *buf1; + cvFree( buf2 ); + *buf2 = NULL; + } + return CV_OK; +} + + +/* area of a contour sector */ +static CvStatus icvContourSecArea( CvSeq * contour, CvSlice slice, double *area ) +{ + CvPoint pt; /* pointer to points */ + CvPoint pt_s, pt_e; /* first and last points */ + CvSeqReader reader; /* points reader of contour */ + + int p_max = 2, p_ind; + int lpt, flag, i; + double a00; /* unnormalized moments m00 */ + double xi, yi, xi_1, yi_1, x0, y0, dxy, sk, sk1, t; + double x_s, y_s, nx, ny, dx, dy, du, dv; + double eps = 1.e-5; + double *p_are1, *p_are2, *p_are; + + assert( contour != NULL ); + + if( contour == NULL ) + return CV_NULLPTR_ERR; + + if( !CV_IS_SEQ_POINT_SET( contour )) + return CV_BADFLAG_ERR; + + lpt = cvSliceLength( slice, contour ); + /*if( n2 >= n1 ) + lpt = n2 - n1 + 1; + else + lpt = contour->total - n1 + n2 + 1;*/ + + if( contour->total && lpt > 2 ) + { + a00 = x0 = y0 = xi_1 = yi_1 = 0; + sk1 = 0; + flag = 0; + dxy = 0; + p_are1 = (double *) cvAlloc( p_max * sizeof( double )); + + if( p_are1 == NULL ) + return CV_OUTOFMEM_ERR; + + p_are = p_are1; + p_are2 = NULL; + + cvStartReadSeq( contour, &reader, 0 ); + cvSetSeqReaderPos( &reader, slice.start_index ); + CV_READ_SEQ_ELEM( pt_s, reader ); + p_ind = 0; + cvSetSeqReaderPos( &reader, slice.end_index ); + CV_READ_SEQ_ELEM( pt_e, reader ); + +/* normal coefficients */ + nx = pt_s.y - pt_e.y; + ny = pt_e.x - pt_s.x; + cvSetSeqReaderPos( &reader, slice.start_index ); + + while( lpt-- > 0 ) + { + CV_READ_SEQ_ELEM( pt, reader ); + + if( flag == 0 ) + { + xi_1 = (double) pt.x; + yi_1 = (double) pt.y; + x0 = xi_1; + y0 = yi_1; + sk1 = 0; + flag = 1; + } + else + { + xi = (double) pt.x; + yi = (double) pt.y; + +/**************** edges intersection examination **************************/ + sk = nx * (xi - pt_s.x) + ny * (yi - pt_s.y); + if( (fabs( sk ) < eps && lpt > 0) || sk * sk1 < -eps ) + { + if( fabs( sk ) < eps ) + { + dxy = xi_1 * yi - xi * yi_1; + a00 = a00 + dxy; + dxy = xi * y0 - x0 * yi; + a00 = a00 + dxy; + + if( p_ind >= p_max ) + icvMemCopy( &p_are1, &p_are2, &p_are, &p_max ); + + p_are[p_ind] = a00 / 2.; + p_ind++; + a00 = 0; + sk1 = 0; + x0 = xi; + y0 = yi; + dxy = 0; + } + else + { +/* define intersection point */ + dv = yi - yi_1; + du = xi - xi_1; + dx = ny; + dy = -nx; + if( fabs( du ) > eps ) + t = ((yi_1 - pt_s.y) * du + dv * (pt_s.x - xi_1)) / + (du * dy - dx * dv); + else + t = (xi_1 - pt_s.x) / dx; + if( t > eps && t < 1 - eps ) + { + x_s = pt_s.x + t * dx; + y_s = pt_s.y + t * dy; + dxy = xi_1 * y_s - x_s * yi_1; + a00 += dxy; + dxy = x_s * y0 - x0 * y_s; + a00 += dxy; + if( p_ind >= p_max ) + icvMemCopy( &p_are1, &p_are2, &p_are, &p_max ); + + p_are[p_ind] = a00 / 2.; + p_ind++; + + a00 = 0; + sk1 = 0; + x0 = x_s; + y0 = y_s; + dxy = x_s * yi - xi * y_s; + } + } + } + else + dxy = xi_1 * yi - xi * yi_1; + + a00 += dxy; + xi_1 = xi; + yi_1 = yi; + sk1 = sk; + + } + } + + xi = x0; + yi = y0; + dxy = xi_1 * yi - xi * yi_1; + + a00 += dxy; + + if( p_ind >= p_max ) + icvMemCopy( &p_are1, &p_are2, &p_are, &p_max ); + + p_are[p_ind] = a00 / 2.; + p_ind++; + +/* common area calculation */ + *area = 0; + for( i = 0; i < p_ind; i++ ) + (*area) += fabs( p_are[i] ); + + if( p_are1 != NULL ) + cvFree( &p_are1 ); + else if( p_are2 != NULL ) + cvFree( &p_are2 ); + + return CV_OK; + } + else + return CV_BADSIZE_ERR; +} + + +/* external contour area function */ +CV_IMPL double +cvContourArea( const void *array, CvSlice slice, int oriented ) +{ + double area = 0; + + CvContour contour_header; + CvSeq* contour = 0; + CvSeqBlock block; + + if( CV_IS_SEQ( array )) + { + contour = (CvSeq*)array; + if( !CV_IS_SEQ_POLYLINE( contour )) + CV_Error( CV_StsBadArg, "Unsupported sequence type" ); + } + else + { + contour = cvPointSeqFromMat( CV_SEQ_KIND_CURVE, array, &contour_header, &block ); + } + + if( cvSliceLength( slice, contour ) == contour->total ) + { + IPPI_CALL( icvContourArea( contour, &area )); + } + else + { + if( CV_SEQ_ELTYPE( contour ) != CV_32SC2 ) + CV_Error( CV_StsUnsupportedFormat, + "Only curves with integer coordinates are supported in case of contour slice" ); + IPPI_CALL( icvContourSecArea( contour, slice, &area )); + } + + return oriented ? area : fabs(area); +} + + +/* for now this function works bad with singular cases + You can see in the code, that when some troubles with + matrices or some variables occur - + box filled with zero values is returned. + However in general function works fine. +*/ +static void +icvFitEllipse_F( CvSeq* points, CvBox2D* box ) +{ + cv::Ptr D; + double S[36], C[36], T[36]; + + int i, j; + double eigenvalues[6], eigenvectors[36]; + double a, b, c, d, e, f; + double x0, y0, idet, scale, offx = 0, offy = 0; + + int n = points->total; + CvSeqReader reader; + int is_float = CV_SEQ_ELTYPE(points) == CV_32FC2; + + CvMat matS = cvMat(6,6,CV_64F,S), matC = cvMat(6,6,CV_64F,C), matT = cvMat(6,6,CV_64F,T); + CvMat _EIGVECS = cvMat(6,6,CV_64F,eigenvectors), _EIGVALS = cvMat(6,1,CV_64F,eigenvalues); + + /* create matrix D of input points */ + D = cvCreateMat( n, 6, CV_64F ); + + cvStartReadSeq( points, &reader ); + + /* shift all points to zero */ + for( i = 0; i < n; i++ ) + { + if( !is_float ) + { + offx += ((CvPoint*)reader.ptr)->x; + offy += ((CvPoint*)reader.ptr)->y; + } + else + { + offx += ((CvPoint2D32f*)reader.ptr)->x; + offy += ((CvPoint2D32f*)reader.ptr)->y; + } + CV_NEXT_SEQ_ELEM( points->elem_size, reader ); + } + + offx /= n; + offy /= n; + + // fill matrix rows as (x*x, x*y, y*y, x, y, 1 ) + for( i = 0; i < n; i++ ) + { + double x, y; + double* Dptr = D->data.db + i*6; + + if( !is_float ) + { + x = ((CvPoint*)reader.ptr)->x - offx; + y = ((CvPoint*)reader.ptr)->y - offy; + } + else + { + x = ((CvPoint2D32f*)reader.ptr)->x - offx; + y = ((CvPoint2D32f*)reader.ptr)->y - offy; + } + CV_NEXT_SEQ_ELEM( points->elem_size, reader ); + + Dptr[0] = x * x; + Dptr[1] = x * y; + Dptr[2] = y * y; + Dptr[3] = x; + Dptr[4] = y; + Dptr[5] = 1.; + } + + // S = D^t*D + cvMulTransposed( D, &matS, 1 ); + cvSVD( &matS, &_EIGVALS, &_EIGVECS, 0, CV_SVD_MODIFY_A + CV_SVD_U_T ); + + for( i = 0; i < 6; i++ ) + { + double a = eigenvalues[i]; + a = a < DBL_EPSILON ? 0 : 1./sqrt(sqrt(a)); + for( j = 0; j < 6; j++ ) + eigenvectors[i*6 + j] *= a; + } + + // C = Q^-1 = transp(INVEIGV) * INVEIGV + cvMulTransposed( &_EIGVECS, &matC, 1 ); + + cvZero( &matS ); + S[2] = 2.; + S[7] = -1.; + S[12] = 2.; + + // S = Q^-1*S*Q^-1 + cvMatMul( &matC, &matS, &matT ); + cvMatMul( &matT, &matC, &matS ); + + // and find its eigenvalues and vectors too + //cvSVD( &matS, &_EIGVALS, &_EIGVECS, 0, CV_SVD_MODIFY_A + CV_SVD_U_T ); + cvEigenVV( &matS, &_EIGVECS, &_EIGVALS, 0 ); + + for( i = 0; i < 3; i++ ) + if( eigenvalues[i] > 0 ) + break; + + if( i >= 3 /*eigenvalues[0] < DBL_EPSILON*/ ) + { + box->center.x = box->center.y = + box->size.width = box->size.height = + box->angle = 0.f; + return; + } + + // now find truthful eigenvector + _EIGVECS = cvMat( 6, 1, CV_64F, eigenvectors + 6*i ); + matT = cvMat( 6, 1, CV_64F, T ); + // Q^-1*eigenvecs[0] + cvMatMul( &matC, &_EIGVECS, &matT ); + + // extract vector components + a = T[0]; b = T[1]; c = T[2]; d = T[3]; e = T[4]; f = T[5]; + + ///////////////// extract ellipse axes from above values //////////////// + + /* + 1) find center of ellipse + it satisfy equation + | a b/2 | * | x0 | + | d/2 | = |0 | + | b/2 c | | y0 | | e/2 | |0 | + + */ + idet = a * c - b * b * 0.25; + idet = idet > DBL_EPSILON ? 1./idet : 0; + + // we must normalize (a b c d e f ) to fit (4ac-b^2=1) + scale = sqrt( 0.25 * idet ); + + if( scale < DBL_EPSILON ) + { + box->center.x = (float)offx; + box->center.y = (float)offy; + box->size.width = box->size.height = box->angle = 0.f; + return; + } + + a *= scale; + b *= scale; + c *= scale; + d *= scale; + e *= scale; + f *= scale; + + x0 = (-d * c + e * b * 0.5) * 2.; + y0 = (-a * e + d * b * 0.5) * 2.; + + // recover center + box->center.x = (float)(x0 + offx); + box->center.y = (float)(y0 + offy); + + // offset ellipse to (x0,y0) + // new f == F(x0,y0) + f += a * x0 * x0 + b * x0 * y0 + c * y0 * y0 + d * x0 + e * y0; + + if( fabs(f) < DBL_EPSILON ) + { + box->size.width = box->size.height = box->angle = 0.f; + return; + } + + scale = -1. / f; + // normalize to f = 1 + a *= scale; + b *= scale; + c *= scale; + + // extract axis of ellipse + // one more eigenvalue operation + S[0] = a; + S[1] = S[2] = b * 0.5; + S[3] = c; + + matS = cvMat( 2, 2, CV_64F, S ); + _EIGVECS = cvMat( 2, 2, CV_64F, eigenvectors ); + _EIGVALS = cvMat( 1, 2, CV_64F, eigenvalues ); + cvSVD( &matS, &_EIGVALS, &_EIGVECS, 0, CV_SVD_MODIFY_A + CV_SVD_U_T ); + + // exteract axis length from eigenvectors + box->size.width = (float)(2./sqrt(eigenvalues[0])); + box->size.height = (float)(2./sqrt(eigenvalues[1])); + + // calc angle + box->angle = (float)(180 - atan2(eigenvectors[2], eigenvectors[3])*180/CV_PI); +} + + +CV_IMPL CvBox2D +cvFitEllipse2( const CvArr* array ) +{ + CvBox2D box; + cv::AutoBuffer Ad, bd; + memset( &box, 0, sizeof(box)); + + CvContour contour_header; + CvSeq* ptseq = 0; + CvSeqBlock block; + int n; + + if( CV_IS_SEQ( array )) + { + ptseq = (CvSeq*)array; + if( !CV_IS_SEQ_POINT_SET( ptseq )) + CV_Error( CV_StsBadArg, "Unsupported sequence type" ); + } + else + { + ptseq = cvPointSeqFromMat(CV_SEQ_KIND_GENERIC, array, &contour_header, &block); + } + + n = ptseq->total; + if( n < 5 ) + CV_Error( CV_StsBadSize, "Number of points should be >= 5" ); +#if 1 + icvFitEllipse_F( ptseq, &box ); +#else + /* + * New fitellipse algorithm, contributed by Dr. Daniel Weiss + */ + double gfp[5], rp[5], t; + CvMat A, b, x; + const double min_eps = 1e-6; + int i, is_float; + CvSeqReader reader; + + Ad.allocate(n*5); + bd.allocate(n); + + // first fit for parameters A - E + A = cvMat( n, 5, CV_64F, Ad ); + b = cvMat( n, 1, CV_64F, bd ); + x = cvMat( 5, 1, CV_64F, gfp ); + + cvStartReadSeq( ptseq, &reader ); + is_float = CV_SEQ_ELTYPE(ptseq) == CV_32FC2; + + for( i = 0; i < n; i++ ) + { + CvPoint2D32f p; + if( is_float ) + p = *(CvPoint2D32f*)(reader.ptr); + else + { + p.x = (float)((int*)reader.ptr)[0]; + p.y = (float)((int*)reader.ptr)[1]; + } + CV_NEXT_SEQ_ELEM( sizeof(p), reader ); + + bd[i] = 10000.0; // 1.0? + Ad[i*5] = -(double)p.x * p.x; // A - C signs inverted as proposed by APP + Ad[i*5 + 1] = -(double)p.y * p.y; + Ad[i*5 + 2] = -(double)p.x * p.y; + Ad[i*5 + 3] = p.x; + Ad[i*5 + 4] = p.y; + } + + cvSolve( &A, &b, &x, CV_SVD ); + + // now use general-form parameters A - E to find the ellipse center: + // differentiate general form wrt x/y to get two equations for cx and cy + A = cvMat( 2, 2, CV_64F, Ad ); + b = cvMat( 2, 1, CV_64F, bd ); + x = cvMat( 2, 1, CV_64F, rp ); + Ad[0] = 2 * gfp[0]; + Ad[1] = Ad[2] = gfp[2]; + Ad[3] = 2 * gfp[1]; + bd[0] = gfp[3]; + bd[1] = gfp[4]; + cvSolve( &A, &b, &x, CV_SVD ); + + // re-fit for parameters A - C with those center coordinates + A = cvMat( n, 3, CV_64F, Ad ); + b = cvMat( n, 1, CV_64F, bd ); + x = cvMat( 3, 1, CV_64F, gfp ); + for( i = 0; i < n; i++ ) + { + CvPoint2D32f p; + if( is_float ) + p = *(CvPoint2D32f*)(reader.ptr); + else + { + p.x = (float)((int*)reader.ptr)[0]; + p.y = (float)((int*)reader.ptr)[1]; + } + CV_NEXT_SEQ_ELEM( sizeof(p), reader ); + bd[i] = 1.0; + Ad[i * 3] = (p.x - rp[0]) * (p.x - rp[0]); + Ad[i * 3 + 1] = (p.y - rp[1]) * (p.y - rp[1]); + Ad[i * 3 + 2] = (p.x - rp[0]) * (p.y - rp[1]); + } + cvSolve(&A, &b, &x, CV_SVD); + + // store angle and radii + rp[4] = -0.5 * atan2(gfp[2], gfp[1] - gfp[0]); // convert from APP angle usage + t = sin(-2.0 * rp[4]); + if( fabs(t) > fabs(gfp[2])*min_eps ) + t = gfp[2]/t; + else + t = gfp[1] - gfp[0]; + rp[2] = fabs(gfp[0] + gfp[1] - t); + if( rp[2] > min_eps ) + rp[2] = sqrt(2.0 / rp[2]); + rp[3] = fabs(gfp[0] + gfp[1] + t); + if( rp[3] > min_eps ) + rp[3] = sqrt(2.0 / rp[3]); + + box.center.x = (float)rp[0]; + box.center.y = (float)rp[1]; + box.size.width = (float)(rp[2]*2); + box.size.height = (float)(rp[3]*2); + if( box.size.width > box.size.height ) + { + float tmp; + CV_SWAP( box.size.width, box.size.height, tmp ); + box.angle = (float)(90 + rp[4]*180/CV_PI); + } + if( box.angle < -180 ) + box.angle += 360; + if( box.angle > 360 ) + box.angle -= 360; +#endif + + return box; +} + + +/* Calculates bounding rectagnle of a point set or retrieves already calculated */ +CV_IMPL CvRect +cvBoundingRect( CvArr* array, int update ) +{ + CvSeqReader reader; + CvRect rect = { 0, 0, 0, 0 }; + CvContour contour_header; + CvSeq* ptseq = 0; + CvSeqBlock block; + + CvMat stub, *mat = 0; + int xmin = 0, ymin = 0, xmax = -1, ymax = -1, i, j, k; + int calculate = update; + + if( CV_IS_SEQ( array )) + { + ptseq = (CvSeq*)array; + if( !CV_IS_SEQ_POINT_SET( ptseq )) + CV_Error( CV_StsBadArg, "Unsupported sequence type" ); + + if( ptseq->header_size < (int)sizeof(CvContour)) + { + /*if( update == 1 ) + CV_Error( CV_StsBadArg, "The header is too small to fit the rectangle, " + "so it could not be updated" );*/ + update = 0; + calculate = 1; + } + } + else + { + mat = cvGetMat( array, &stub ); + if( CV_MAT_TYPE(mat->type) == CV_32SC2 || + CV_MAT_TYPE(mat->type) == CV_32FC2 ) + { + ptseq = cvPointSeqFromMat(CV_SEQ_KIND_GENERIC, mat, &contour_header, &block); + mat = 0; + } + else if( CV_MAT_TYPE(mat->type) != CV_8UC1 && + CV_MAT_TYPE(mat->type) != CV_8SC1 ) + CV_Error( CV_StsUnsupportedFormat, + "The image/matrix format is not supported by the function" ); + update = 0; + calculate = 1; + } + + if( !calculate ) + return ((CvContour*)ptseq)->rect; + + if( mat ) + { + CvSize size = cvGetMatSize(mat); + xmin = size.width; + ymin = -1; + + for( i = 0; i < size.height; i++ ) + { + uchar* _ptr = mat->data.ptr + i*mat->step; + uchar* ptr = (uchar*)cvAlignPtr(_ptr, 4); + int have_nz = 0, k_min, offset = (int)(ptr - _ptr); + j = 0; + offset = MIN(offset, size.width); + for( ; j < offset; j++ ) + if( _ptr[j] ) + { + have_nz = 1; + break; + } + if( j < offset ) + { + if( j < xmin ) + xmin = j; + if( j > xmax ) + xmax = j; + } + if( offset < size.width ) + { + xmin -= offset; + xmax -= offset; + size.width -= offset; + j = 0; + for( ; j <= xmin - 4; j += 4 ) + if( *((int*)(ptr+j)) ) + break; + for( ; j < xmin; j++ ) + if( ptr[j] ) + { + xmin = j; + if( j > xmax ) + xmax = j; + have_nz = 1; + break; + } + k_min = MAX(j-1, xmax); + k = size.width - 1; + for( ; k > k_min && (k&3) != 3; k-- ) + if( ptr[k] ) + break; + if( k > k_min && (k&3) == 3 ) + { + for( ; k > k_min+3; k -= 4 ) + if( *((int*)(ptr+k-3)) ) + break; + } + for( ; k > k_min; k-- ) + if( ptr[k] ) + { + xmax = k; + have_nz = 1; + break; + } + if( !have_nz ) + { + j &= ~3; + for( ; j <= k - 3; j += 4 ) + if( *((int*)(ptr+j)) ) + break; + for( ; j <= k; j++ ) + if( ptr[j] ) + { + have_nz = 1; + break; + } + } + xmin += offset; + xmax += offset; + size.width += offset; + } + if( have_nz ) + { + if( ymin < 0 ) + ymin = i; + ymax = i; + } + } + + if( xmin >= size.width ) + xmin = ymin = 0; + } + else if( ptseq->total ) + { + int is_float = CV_SEQ_ELTYPE(ptseq) == CV_32FC2; + cvStartReadSeq( ptseq, &reader, 0 ); + + if( !is_float ) + { + CvPoint pt; + /* init values */ + CV_READ_SEQ_ELEM( pt, reader ); + xmin = xmax = pt.x; + ymin = ymax = pt.y; + + for( i = 1; i < ptseq->total; i++ ) + { + CV_READ_SEQ_ELEM( pt, reader ); + + if( xmin > pt.x ) + xmin = pt.x; + + if( xmax < pt.x ) + xmax = pt.x; + + if( ymin > pt.y ) + ymin = pt.y; + + if( ymax < pt.y ) + ymax = pt.y; + } + } + else + { + CvPoint pt; + Cv32suf v; + /* init values */ + CV_READ_SEQ_ELEM( pt, reader ); + xmin = xmax = CV_TOGGLE_FLT(pt.x); + ymin = ymax = CV_TOGGLE_FLT(pt.y); + + for( i = 1; i < ptseq->total; i++ ) + { + CV_READ_SEQ_ELEM( pt, reader ); + pt.x = CV_TOGGLE_FLT(pt.x); + pt.y = CV_TOGGLE_FLT(pt.y); + + if( xmin > pt.x ) + xmin = pt.x; + + if( xmax < pt.x ) + xmax = pt.x; + + if( ymin > pt.y ) + ymin = pt.y; + + if( ymax < pt.y ) + ymax = pt.y; + } + + v.i = CV_TOGGLE_FLT(xmin); xmin = cvFloor(v.f); + v.i = CV_TOGGLE_FLT(ymin); ymin = cvFloor(v.f); + /* because right and bottom sides of + the bounding rectangle are not inclusive + (note +1 in width and height calculation below), + cvFloor is used here instead of cvCeil */ + v.i = CV_TOGGLE_FLT(xmax); xmax = cvFloor(v.f); + v.i = CV_TOGGLE_FLT(ymax); ymax = cvFloor(v.f); + } + } + + rect.x = xmin; + rect.y = ymin; + rect.width = xmax - xmin + 1; + rect.height = ymax - ymin + 1; + + if( update ) + ((CvContour*)ptseq)->rect = rect; + + return rect; +} + + +/* End of file. */ diff --git a/opencv/imgproc/smooth.cpp b/opencv/imgproc/smooth.cpp new file mode 100644 index 0000000..4a9c27e --- /dev/null +++ b/opencv/imgproc/smooth.cpp @@ -0,0 +1,1566 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +/* + * This file includes the code, contributed by Simon Perreault + * (the function icvMedianBlur_8u_O1) + * + * Constant-time median filtering -- http://nomis80.org/ctmf.html + * Copyright (C) 2006 Simon Perreault + * + * Contact: + * Laboratoire de vision et systemes numeriques + * Pavillon Adrien-Pouliot + * Universite Laval + * Sainte-Foy, Quebec, Canada + * G1K 7P4 + * + * perreaul@gel.ulaval.ca + */ + +namespace cv +{ + +/****************************************************************************************\ + Box Filter +\****************************************************************************************/ + +template struct RowSum : public BaseRowFilter +{ + RowSum( int _ksize, int _anchor ) + { + ksize = _ksize; + anchor = _anchor; + } + + void operator()(const uchar* src, uchar* dst, int width, int cn) + { + const T* S = (const T*)src; + ST* D = (ST*)dst; + int i = 0, k, ksz_cn = ksize*cn; + + width = (width - 1)*cn; + for( k = 0; k < cn; k++, S++, D++ ) + { + ST s = 0; + for( i = 0; i < ksz_cn; i += cn ) + s += S[i]; + D[0] = s; + for( i = 0; i < width; i += cn ) + { + s += S[i + ksz_cn] - S[i]; + D[i+cn] = s; + } + } + } +}; + + +template struct ColumnSum : public BaseColumnFilter +{ + ColumnSum( int _ksize, int _anchor, double _scale ) + { + ksize = _ksize; + anchor = _anchor; + scale = _scale; + sumCount = 0; + } + + void reset() { sumCount = 0; } + + void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) + { + int i; + ST* SUM; + bool haveScale = scale != 1; + double _scale = scale; + + if( width != (int)sum.size() ) + { + sum.resize(width); + sumCount = 0; + } + + SUM = &sum[0]; + if( sumCount == 0 ) + { + for( i = 0; i < width; i++ ) + SUM[i] = 0; + for( ; sumCount < ksize - 1; sumCount++, src++ ) + { + const ST* Sp = (const ST*)src[0]; + for( i = 0; i <= width - 2; i += 2 ) + { + ST s0 = SUM[i] + Sp[i], s1 = SUM[i+1] + Sp[i+1]; + SUM[i] = s0; SUM[i+1] = s1; + } + + for( ; i < width; i++ ) + SUM[i] += Sp[i]; + } + } + else + { + CV_Assert( sumCount == ksize-1 ); + src += ksize-1; + } + + for( ; count--; src++ ) + { + const ST* Sp = (const ST*)src[0]; + const ST* Sm = (const ST*)src[1-ksize]; + T* D = (T*)dst; + if( haveScale ) + { + for( i = 0; i <= width - 2; i += 2 ) + { + ST s0 = SUM[i] + Sp[i], s1 = SUM[i+1] + Sp[i+1]; + D[i] = saturate_cast(s0*_scale); + D[i+1] = saturate_cast(s1*_scale); + s0 -= Sm[i]; s1 -= Sm[i+1]; + SUM[i] = s0; SUM[i+1] = s1; + } + + for( ; i < width; i++ ) + { + ST s0 = SUM[i] + Sp[i]; + D[i] = saturate_cast(s0*_scale); + SUM[i] = s0 - Sm[i]; + } + } + else + { + for( i = 0; i <= width - 2; i += 2 ) + { + ST s0 = SUM[i] + Sp[i], s1 = SUM[i+1] + Sp[i+1]; + D[i] = saturate_cast(s0); + D[i+1] = saturate_cast(s1); + s0 -= Sm[i]; s1 -= Sm[i+1]; + SUM[i] = s0; SUM[i+1] = s1; + } + + for( ; i < width; i++ ) + { + ST s0 = SUM[i] + Sp[i]; + D[i] = saturate_cast(s0); + SUM[i] = s0 - Sm[i]; + } + } + dst += dststep; + } + } + + double scale; + int sumCount; + vector sum; +}; + + +} + +cv::Ptr cv::getRowSumFilter(int srcType, int sumType, int ksize, int anchor) +{ + int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(sumType); + CV_Assert( CV_MAT_CN(sumType) == CV_MAT_CN(srcType) ); + + if( anchor < 0 ) + anchor = ksize/2; + + if( sdepth == CV_8U && ddepth == CV_32S ) + return Ptr(new RowSum(ksize, anchor)); + if( sdepth == CV_8U && ddepth == CV_64F ) + return Ptr(new RowSum(ksize, anchor)); + if( sdepth == CV_16U && ddepth == CV_32S ) + return Ptr(new RowSum(ksize, anchor)); + if( sdepth == CV_16U && ddepth == CV_64F ) + return Ptr(new RowSum(ksize, anchor)); + if( sdepth == CV_16S && ddepth == CV_32S ) + return Ptr(new RowSum(ksize, anchor)); + if( sdepth == CV_32S && ddepth == CV_32S ) + return Ptr(new RowSum(ksize, anchor)); + if( sdepth == CV_16S && ddepth == CV_64F ) + return Ptr(new RowSum(ksize, anchor)); + if( sdepth == CV_32F && ddepth == CV_64F ) + return Ptr(new RowSum(ksize, anchor)); + if( sdepth == CV_64F && ddepth == CV_64F ) + return Ptr(new RowSum(ksize, anchor)); + + CV_Error_( CV_StsNotImplemented, + ("Unsupported combination of source format (=%d), and buffer format (=%d)", + srcType, sumType)); + + return Ptr(0); +} + + +cv::Ptr cv::getColumnSumFilter(int sumType, int dstType, int ksize, + int anchor, double scale) +{ + int sdepth = CV_MAT_DEPTH(sumType), ddepth = CV_MAT_DEPTH(dstType); + CV_Assert( CV_MAT_CN(sumType) == CV_MAT_CN(dstType) ); + + if( anchor < 0 ) + anchor = ksize/2; + + if( ddepth == CV_8U && sdepth == CV_32S ) + return Ptr(new ColumnSum(ksize, anchor, scale)); + if( ddepth == CV_8U && sdepth == CV_64F ) + return Ptr(new ColumnSum(ksize, anchor, scale)); + if( ddepth == CV_16U && sdepth == CV_32S ) + return Ptr(new ColumnSum(ksize, anchor, scale)); + if( ddepth == CV_16U && sdepth == CV_64F ) + return Ptr(new ColumnSum(ksize, anchor, scale)); + if( ddepth == CV_16S && sdepth == CV_32S ) + return Ptr(new ColumnSum(ksize, anchor, scale)); + if( ddepth == CV_16S && sdepth == CV_64F ) + return Ptr(new ColumnSum(ksize, anchor, scale)); + if( ddepth == CV_32S && sdepth == CV_32S ) + return Ptr(new ColumnSum(ksize, anchor, scale)); + if( ddepth == CV_32F && sdepth == CV_32S ) + return Ptr(new ColumnSum(ksize, anchor, scale)); + if( ddepth == CV_32F && sdepth == CV_64F ) + return Ptr(new ColumnSum(ksize, anchor, scale)); + if( ddepth == CV_64F && sdepth == CV_32S ) + return Ptr(new ColumnSum(ksize, anchor, scale)); + if( ddepth == CV_64F && sdepth == CV_64F ) + return Ptr(new ColumnSum(ksize, anchor, scale)); + + CV_Error_( CV_StsNotImplemented, + ("Unsupported combination of sum format (=%d), and destination format (=%d)", + sumType, dstType)); + + return Ptr(0); +} + + +cv::Ptr cv::createBoxFilter( int srcType, int dstType, Size ksize, + Point anchor, bool normalize, int borderType ) +{ + int sdepth = CV_MAT_DEPTH(srcType); + int cn = CV_MAT_CN(srcType), sumType = CV_64F; + if( sdepth < CV_32S && (!normalize || + ksize.width*ksize.height <= (sdepth == CV_8U ? (1<<23) : + sdepth == CV_16U ? (1 << 15) : (1 << 16))) ) + sumType = CV_32S; + sumType = CV_MAKETYPE( sumType, cn ); + + Ptr rowFilter = getRowSumFilter(srcType, sumType, ksize.width, anchor.x ); + Ptr columnFilter = getColumnSumFilter(sumType, + dstType, ksize.height, anchor.y, normalize ? 1./(ksize.width*ksize.height) : 1); + + return Ptr(new FilterEngine(Ptr(0), rowFilter, columnFilter, + srcType, dstType, sumType, borderType )); +} + + +void cv::boxFilter( InputArray _src, OutputArray _dst, int ddepth, + Size ksize, Point anchor, + bool normalize, int borderType ) +{ + Mat src = _src.getMat(); + int sdepth = src.depth(), cn = src.channels(); + if( ddepth < 0 ) + ddepth = sdepth; + _dst.create( src.size(), CV_MAKETYPE(ddepth, cn) ); + Mat dst = _dst.getMat(); + if( borderType != BORDER_CONSTANT && normalize ) + { + if( src.rows == 1 ) + ksize.height = 1; + if( src.cols == 1 ) + ksize.width = 1; + } + Ptr f = createBoxFilter( src.type(), dst.type(), + ksize, anchor, normalize, borderType ); + f->apply( src, dst ); +} + +void cv::blur( InputArray src, OutputArray dst, + Size ksize, Point anchor, int borderType ) +{ + boxFilter( src, dst, -1, ksize, anchor, true, borderType ); +} + +/****************************************************************************************\ + Gaussian Blur +\****************************************************************************************/ + +cv::Mat cv::getGaussianKernel( int n, double sigma, int ktype ) +{ + const int SMALL_GAUSSIAN_SIZE = 7; + static const float small_gaussian_tab[][SMALL_GAUSSIAN_SIZE] = + { + {1.f}, + {0.25f, 0.5f, 0.25f}, + {0.0625f, 0.25f, 0.375f, 0.25f, 0.0625f}, + {0.03125f, 0.109375f, 0.21875f, 0.28125f, 0.21875f, 0.109375f, 0.03125f} + }; + + const float* fixed_kernel = n % 2 == 1 && n <= SMALL_GAUSSIAN_SIZE && sigma <= 0 ? + small_gaussian_tab[n>>1] : 0; + + CV_Assert( ktype == CV_32F || ktype == CV_64F ); + Mat kernel(n, 1, ktype); + float* cf = (float*)kernel.data; + double* cd = (double*)kernel.data; + + double sigmaX = sigma > 0 ? sigma : ((n-1)*0.5 - 1)*0.3 + 0.8; + double scale2X = -0.5/(sigmaX*sigmaX); + double sum = 0; + + int i; + for( i = 0; i < n; i++ ) + { + double x = i - (n-1)*0.5; + double t = fixed_kernel ? (double)fixed_kernel[i] : std::exp(scale2X*x*x); + if( ktype == CV_32F ) + { + cf[i] = (float)t; + sum += cf[i]; + } + else + { + cd[i] = t; + sum += cd[i]; + } + } + + sum = 1./sum; + for( i = 0; i < n; i++ ) + { + if( ktype == CV_32F ) + cf[i] = (float)(cf[i]*sum); + else + cd[i] *= sum; + } + + return kernel; +} + + +cv::Ptr cv::createGaussianFilter( int type, Size ksize, + double sigma1, double sigma2, + int borderType ) +{ + int depth = CV_MAT_DEPTH(type); + if( sigma2 <= 0 ) + sigma2 = sigma1; + + // automatic detection of kernel size from sigma + if( ksize.width <= 0 && sigma1 > 0 ) + ksize.width = cvRound(sigma1*(depth == CV_8U ? 3 : 4)*2 + 1)|1; + if( ksize.height <= 0 && sigma2 > 0 ) + ksize.height = cvRound(sigma2*(depth == CV_8U ? 3 : 4)*2 + 1)|1; + + CV_Assert( ksize.width > 0 && ksize.width % 2 == 1 && + ksize.height > 0 && ksize.height % 2 == 1 ); + + sigma1 = std::max( sigma1, 0. ); + sigma2 = std::max( sigma2, 0. ); + + Mat kx = getGaussianKernel( ksize.width, sigma1, std::max(depth, CV_32F) ); + Mat ky; + if( ksize.height == ksize.width && std::abs(sigma1 - sigma2) < DBL_EPSILON ) + ky = kx; + else + ky = getGaussianKernel( ksize.height, sigma2, std::max(depth, CV_32F) ); + + return createSeparableLinearFilter( type, type, kx, ky, Point(-1,-1), 0, borderType ); +} + + +void cv::GaussianBlur( InputArray _src, OutputArray _dst, Size ksize, + double sigma1, double sigma2, + int borderType ) +{ + Mat src = _src.getMat(); + _dst.create( src.size(), src.type() ); + Mat dst = _dst.getMat(); + + if( ksize.width == 1 && ksize.height == 1 ) + { + src.copyTo(dst); + return; + } + + if( borderType != BORDER_CONSTANT ) + { + if( src.rows == 1 ) + ksize.height = 1; + if( src.cols == 1 ) + ksize.width = 1; + } + Ptr f = createGaussianFilter( src.type(), ksize, sigma1, sigma2, borderType ); + f->apply( src, dst ); +} + + +/****************************************************************************************\ + Median Filter +\****************************************************************************************/ + +namespace cv +{ + +#if _MSC_VER >= 1200 +#pragma warning( disable: 4244 ) +#endif + +typedef ushort HT; + +/** + * This structure represents a two-tier histogram. The first tier (known as the + * "coarse" level) is 4 bit wide and the second tier (known as the "fine" level) + * is 8 bit wide. Pixels inserted in the fine level also get inserted into the + * coarse bucket designated by the 4 MSBs of the fine bucket value. + * + * The structure is aligned on 16 bits, which is a prerequisite for SIMD + * instructions. Each bucket is 16 bit wide, which means that extra care must be + * taken to prevent overflow. + */ +typedef struct +{ + HT coarse[16]; + HT fine[16][16]; +} Histogram; + + +#if CV_SSE2 +#define MEDIAN_HAVE_SIMD 1 + +static inline void histogram_add_simd( const HT x[16], HT y[16] ) +{ + const __m128i* rx = (const __m128i*)x; + __m128i* ry = (__m128i*)y; + __m128i r0 = _mm_add_epi16(_mm_load_si128(ry+0),_mm_load_si128(rx+0)); + __m128i r1 = _mm_add_epi16(_mm_load_si128(ry+1),_mm_load_si128(rx+1)); + _mm_store_si128(ry+0, r0); + _mm_store_si128(ry+1, r1); +} + +static inline void histogram_sub_simd( const HT x[16], HT y[16] ) +{ + const __m128i* rx = (const __m128i*)x; + __m128i* ry = (__m128i*)y; + __m128i r0 = _mm_sub_epi16(_mm_load_si128(ry+0),_mm_load_si128(rx+0)); + __m128i r1 = _mm_sub_epi16(_mm_load_si128(ry+1),_mm_load_si128(rx+1)); + _mm_store_si128(ry+0, r0); + _mm_store_si128(ry+1, r1); +} + +#else +#define MEDIAN_HAVE_SIMD 0 +#endif + + +static inline void histogram_add( const HT x[16], HT y[16] ) +{ + int i; + for( i = 0; i < 16; ++i ) + y[i] = (HT)(y[i] + x[i]); +} + +static inline void histogram_sub( const HT x[16], HT y[16] ) +{ + int i; + for( i = 0; i < 16; ++i ) + y[i] = (HT)(y[i] - x[i]); +} + +static inline void histogram_muladd( int a, const HT x[16], + HT y[16] ) +{ + for( int i = 0; i < 16; ++i ) + y[i] = (HT)(y[i] + a * x[i]); +} + +static void +medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize ) +{ +/** + * HOP is short for Histogram OPeration. This macro makes an operation \a op on + * histogram \a h for pixel value \a x. It takes care of handling both levels. + */ +#define HOP(h,x,op) \ + h.coarse[x>>4] op, \ + *((HT*)h.fine + x) op + +#define COP(c,j,x,op) \ + h_coarse[ 16*(n*c+j) + (x>>4) ] op, \ + h_fine[ 16 * (n*(16*c+(x>>4)) + j) + (x & 0xF) ] op + + int cn = _dst.channels(), m = _dst.rows, r = (ksize-1)/2; + size_t sstep = _src.step, dstep = _dst.step; + Histogram CV_DECL_ALIGNED(16) H[4]; + HT CV_DECL_ALIGNED(16) luc[4][16]; + + int STRIPE_SIZE = std::min( _dst.cols, 512/cn ); + + vector _h_coarse(1 * 16 * (STRIPE_SIZE + 2*r) * cn + 16); + vector _h_fine(16 * 16 * (STRIPE_SIZE + 2*r) * cn + 16); + HT* h_coarse = alignPtr(&_h_coarse[0], 16); + HT* h_fine = alignPtr(&_h_fine[0], 16); +#if MEDIAN_HAVE_SIMD + volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2); +#endif + + for( int x = 0; x < _dst.cols; x += STRIPE_SIZE ) + { + int i, j, k, c, n = std::min(_dst.cols - x, STRIPE_SIZE) + r*2; + const uchar* src = _src.data + x*cn; + uchar* dst = _dst.data + (x - r)*cn; + + memset( h_coarse, 0, 16*n*cn*sizeof(h_coarse[0]) ); + memset( h_fine, 0, 16*16*n*cn*sizeof(h_fine[0]) ); + + // First row initialization + for( c = 0; c < cn; c++ ) + { + for( j = 0; j < n; j++ ) + COP( c, j, src[cn*j+c], += r+2 ); + + for( i = 1; i < r; i++ ) + { + const uchar* p = src + sstep*std::min(i, m-1); + for ( j = 0; j < n; j++ ) + COP( c, j, p[cn*j+c], ++ ); + } + } + + for( i = 0; i < m; i++ ) + { + const uchar* p0 = src + sstep * std::max( 0, i-r-1 ); + const uchar* p1 = src + sstep * std::min( m-1, i+r ); + + memset( H, 0, cn*sizeof(H[0]) ); + memset( luc, 0, cn*sizeof(luc[0]) ); + for( c = 0; c < cn; c++ ) + { + // Update column histograms for the entire row. + for( j = 0; j < n; j++ ) + { + COP( c, j, p0[j*cn + c], -- ); + COP( c, j, p1[j*cn + c], ++ ); + } + + // First column initialization + for( k = 0; k < 16; ++k ) + histogram_muladd( 2*r+1, &h_fine[16*n*(16*c+k)], &H[c].fine[k][0] ); + + #if MEDIAN_HAVE_SIMD + if( useSIMD ) + { + for( j = 0; j < 2*r; ++j ) + histogram_add_simd( &h_coarse[16*(n*c+j)], H[c].coarse ); + + for( j = r; j < n-r; j++ ) + { + int t = 2*r*r + 2*r, b, sum = 0; + HT* segment; + + histogram_add_simd( &h_coarse[16*(n*c + std::min(j+r,n-1))], H[c].coarse ); + + // Find median at coarse level + for ( k = 0; k < 16 ; ++k ) + { + sum += H[c].coarse[k]; + if ( sum > t ) + { + sum -= H[c].coarse[k]; + break; + } + } + assert( k < 16 ); + + /* Update corresponding histogram segment */ + if ( luc[c][k] <= j-r ) + { + memset( &H[c].fine[k], 0, 16 * sizeof(HT) ); + for ( luc[c][k] = j-r; luc[c][k] < MIN(j+r+1,n); ++luc[c][k] ) + histogram_add_simd( &h_fine[16*(n*(16*c+k)+luc[c][k])], H[c].fine[k] ); + + if ( luc[c][k] < j+r+1 ) + { + histogram_muladd( j+r+1 - n, &h_fine[16*(n*(16*c+k)+(n-1))], &H[c].fine[k][0] ); + luc[c][k] = (HT)(j+r+1); + } + } + else + { + for ( ; luc[c][k] < j+r+1; ++luc[c][k] ) + { + histogram_sub_simd( &h_fine[16*(n*(16*c+k)+MAX(luc[c][k]-2*r-1,0))], H[c].fine[k] ); + histogram_add_simd( &h_fine[16*(n*(16*c+k)+MIN(luc[c][k],n-1))], H[c].fine[k] ); + } + } + + histogram_sub_simd( &h_coarse[16*(n*c+MAX(j-r,0))], H[c].coarse ); + + /* Find median in segment */ + segment = H[c].fine[k]; + for ( b = 0; b < 16 ; b++ ) + { + sum += segment[b]; + if ( sum > t ) + { + dst[dstep*i+cn*j+c] = (uchar)(16*k + b); + break; + } + } + assert( b < 16 ); + } + } + else + #endif + { + for( j = 0; j < 2*r; ++j ) + histogram_add( &h_coarse[16*(n*c+j)], H[c].coarse ); + + for( j = r; j < n-r; j++ ) + { + int t = 2*r*r + 2*r, b, sum = 0; + HT* segment; + + histogram_add( &h_coarse[16*(n*c + std::min(j+r,n-1))], H[c].coarse ); + + // Find median at coarse level + for ( k = 0; k < 16 ; ++k ) + { + sum += H[c].coarse[k]; + if ( sum > t ) + { + sum -= H[c].coarse[k]; + break; + } + } + assert( k < 16 ); + + /* Update corresponding histogram segment */ + if ( luc[c][k] <= j-r ) + { + memset( &H[c].fine[k], 0, 16 * sizeof(HT) ); + for ( luc[c][k] = j-r; luc[c][k] < MIN(j+r+1,n); ++luc[c][k] ) + histogram_add( &h_fine[16*(n*(16*c+k)+luc[c][k])], H[c].fine[k] ); + + if ( luc[c][k] < j+r+1 ) + { + histogram_muladd( j+r+1 - n, &h_fine[16*(n*(16*c+k)+(n-1))], &H[c].fine[k][0] ); + luc[c][k] = (HT)(j+r+1); + } + } + else + { + for ( ; luc[c][k] < j+r+1; ++luc[c][k] ) + { + histogram_sub( &h_fine[16*(n*(16*c+k)+MAX(luc[c][k]-2*r-1,0))], H[c].fine[k] ); + histogram_add( &h_fine[16*(n*(16*c+k)+MIN(luc[c][k],n-1))], H[c].fine[k] ); + } + } + + histogram_sub( &h_coarse[16*(n*c+MAX(j-r,0))], H[c].coarse ); + + /* Find median in segment */ + segment = H[c].fine[k]; + for ( b = 0; b < 16 ; b++ ) + { + sum += segment[b]; + if ( sum > t ) + { + dst[dstep*i+cn*j+c] = (uchar)(16*k + b); + break; + } + } + assert( b < 16 ); + } + } + } + } + } + +#undef HOP +#undef COP +} + + +#if _MSC_VER >= 1200 +#pragma warning( default: 4244 ) +#endif + +static void +medianBlur_8u_Om( const Mat& _src, Mat& _dst, int m ) +{ + #define N 16 + int zone0[4][N]; + int zone1[4][N*N]; + int x, y; + int n2 = m*m/2; + Size size = _dst.size(); + const uchar* src = _src.data; + uchar* dst = _dst.data; + int src_step = (int)_src.step, dst_step = (int)_dst.step; + int cn = _src.channels(); + const uchar* src_max = src + size.height*src_step; + + #define UPDATE_ACC01( pix, cn, op ) \ + { \ + int p = (pix); \ + zone1[cn][p] op; \ + zone0[cn][p >> 4] op; \ + } + + //CV_Assert( size.height >= nx && size.width >= nx ); + for( x = 0; x < size.width; x++, src += cn, dst += cn ) + { + uchar* dst_cur = dst; + const uchar* src_top = src; + const uchar* src_bottom = src; + int k, c; + int src_step1 = src_step, dst_step1 = dst_step; + + if( x % 2 != 0 ) + { + src_bottom = src_top += src_step*(size.height-1); + dst_cur += dst_step*(size.height-1); + src_step1 = -src_step1; + dst_step1 = -dst_step1; + } + + // init accumulator + memset( zone0, 0, sizeof(zone0[0])*cn ); + memset( zone1, 0, sizeof(zone1[0])*cn ); + + for( y = 0; y <= m/2; y++ ) + { + for( c = 0; c < cn; c++ ) + { + if( y > 0 ) + { + for( k = 0; k < m*cn; k += cn ) + UPDATE_ACC01( src_bottom[k+c], c, ++ ); + } + else + { + for( k = 0; k < m*cn; k += cn ) + UPDATE_ACC01( src_bottom[k+c], c, += m/2+1 ); + } + } + + if( (src_step1 > 0 && y < size.height-1) || + (src_step1 < 0 && size.height-y-1 > 0) ) + src_bottom += src_step1; + } + + for( y = 0; y < size.height; y++, dst_cur += dst_step1 ) + { + // find median + for( c = 0; c < cn; c++ ) + { + int s = 0; + for( k = 0; ; k++ ) + { + int t = s + zone0[c][k]; + if( t > n2 ) break; + s = t; + } + + for( k *= N; ;k++ ) + { + s += zone1[c][k]; + if( s > n2 ) break; + } + + dst_cur[c] = (uchar)k; + } + + if( y+1 == size.height ) + break; + + if( cn == 1 ) + { + for( k = 0; k < m; k++ ) + { + int p = src_top[k]; + int q = src_bottom[k]; + zone1[0][p]--; + zone0[0][p>>4]--; + zone1[0][q]++; + zone0[0][q>>4]++; + } + } + else if( cn == 3 ) + { + for( k = 0; k < m*3; k += 3 ) + { + UPDATE_ACC01( src_top[k], 0, -- ); + UPDATE_ACC01( src_top[k+1], 1, -- ); + UPDATE_ACC01( src_top[k+2], 2, -- ); + + UPDATE_ACC01( src_bottom[k], 0, ++ ); + UPDATE_ACC01( src_bottom[k+1], 1, ++ ); + UPDATE_ACC01( src_bottom[k+2], 2, ++ ); + } + } + else + { + assert( cn == 4 ); + for( k = 0; k < m*4; k += 4 ) + { + UPDATE_ACC01( src_top[k], 0, -- ); + UPDATE_ACC01( src_top[k+1], 1, -- ); + UPDATE_ACC01( src_top[k+2], 2, -- ); + UPDATE_ACC01( src_top[k+3], 3, -- ); + + UPDATE_ACC01( src_bottom[k], 0, ++ ); + UPDATE_ACC01( src_bottom[k+1], 1, ++ ); + UPDATE_ACC01( src_bottom[k+2], 2, ++ ); + UPDATE_ACC01( src_bottom[k+3], 3, ++ ); + } + } + + if( (src_step1 > 0 && src_bottom + src_step1 < src_max) || + (src_step1 < 0 && src_bottom + src_step1 >= src) ) + src_bottom += src_step1; + + if( y >= m/2 ) + src_top += src_step1; + } + } +#undef N +#undef UPDATE_ACC +} + + +struct MinMax8u +{ + typedef uchar value_type; + typedef int arg_type; + enum { SIZE = 1 }; + arg_type load(const uchar* ptr) { return *ptr; } + void store(uchar* ptr, arg_type val) { *ptr = (uchar)val; } + void operator()(arg_type& a, arg_type& b) const + { + int t = CV_FAST_CAST_8U(a - b); + b += t; a -= t; + } +}; + +struct MinMax16u +{ + typedef ushort value_type; + typedef int arg_type; + enum { SIZE = 1 }; + arg_type load(const ushort* ptr) { return *ptr; } + void store(ushort* ptr, arg_type val) { *ptr = (ushort)val; } + void operator()(arg_type& a, arg_type& b) const + { + arg_type t = a; + a = std::min(a, b); + b = std::max(b, t); + } +}; + +struct MinMax16s +{ + typedef short value_type; + typedef int arg_type; + enum { SIZE = 1 }; + arg_type load(const short* ptr) { return *ptr; } + void store(short* ptr, arg_type val) { *ptr = (short)val; } + void operator()(arg_type& a, arg_type& b) const + { + arg_type t = a; + a = std::min(a, b); + b = std::max(b, t); + } +}; + +struct MinMax32f +{ + typedef float value_type; + typedef float arg_type; + enum { SIZE = 1 }; + arg_type load(const float* ptr) { return *ptr; } + void store(float* ptr, arg_type val) { *ptr = val; } + void operator()(arg_type& a, arg_type& b) const + { + arg_type t = a; + a = std::min(a, b); + b = std::max(b, t); + } +}; + +#if CV_SSE2 + +struct MinMaxVec8u +{ + typedef uchar value_type; + typedef __m128i arg_type; + enum { SIZE = 16 }; + arg_type load(const uchar* ptr) { return _mm_loadu_si128((const __m128i*)ptr); } + void store(uchar* ptr, arg_type val) { _mm_storeu_si128((__m128i*)ptr, val); } + void operator()(arg_type& a, arg_type& b) const + { + arg_type t = a; + a = _mm_min_epu8(a, b); + b = _mm_max_epu8(b, t); + } +}; + + +struct MinMaxVec16u +{ + typedef ushort value_type; + typedef __m128i arg_type; + enum { SIZE = 8 }; + arg_type load(const ushort* ptr) { return _mm_loadu_si128((const __m128i*)ptr); } + void store(ushort* ptr, arg_type val) { _mm_storeu_si128((__m128i*)ptr, val); } + void operator()(arg_type& a, arg_type& b) const + { + arg_type t = _mm_subs_epu16(a, b); + a = _mm_subs_epu16(a, t); + b = _mm_adds_epu16(b, t); + } +}; + + +struct MinMaxVec16s +{ + typedef short value_type; + typedef __m128i arg_type; + enum { SIZE = 8 }; + arg_type load(const short* ptr) { return _mm_loadu_si128((const __m128i*)ptr); } + void store(short* ptr, arg_type val) { _mm_storeu_si128((__m128i*)ptr, val); } + void operator()(arg_type& a, arg_type& b) const + { + arg_type t = a; + a = _mm_min_epi16(a, b); + b = _mm_max_epi16(b, t); + } +}; + + +struct MinMaxVec32f +{ + typedef float value_type; + typedef __m128 arg_type; + enum { SIZE = 4 }; + arg_type load(const float* ptr) { return _mm_loadu_ps(ptr); } + void store(float* ptr, arg_type val) { _mm_storeu_ps(ptr, val); } + void operator()(arg_type& a, arg_type& b) const + { + arg_type t = a; + a = _mm_min_ps(a, b); + b = _mm_max_ps(b, t); + } +}; + + +#else + +typedef MinMax8u MinMaxVec8u; +typedef MinMax16u MinMaxVec16u; +typedef MinMax16s MinMaxVec16s; +typedef MinMax32f MinMaxVec32f; + +#endif + +template +static void +medianBlur_SortNet( const Mat& _src, Mat& _dst, int m ) +{ + typedef typename Op::value_type T; + typedef typename Op::arg_type WT; + typedef typename VecOp::arg_type VT; + + const T* src = (const T*)_src.data; + T* dst = (T*)_dst.data; + int sstep = (int)(_src.step/sizeof(T)); + int dstep = (int)(_dst.step/sizeof(T)); + Size size = _dst.size(); + int i, j, k, cn = _src.channels(); + Op op; + VecOp vop; + volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2); + + if( m == 3 ) + { + if( size.width == 1 || size.height == 1 ) + { + int len = size.width + size.height - 1; + int sdelta = size.height == 1 ? cn : sstep; + int sdelta0 = size.height == 1 ? 0 : sstep - cn; + int ddelta = size.height == 1 ? cn : dstep; + + for( i = 0; i < len; i++, src += sdelta0, dst += ddelta ) + for( j = 0; j < cn; j++, src++ ) + { + WT p0 = src[i > 0 ? -sdelta : 0]; + WT p1 = src[0]; + WT p2 = src[i < len - 1 ? sdelta : 0]; + + op(p0, p1); op(p1, p2); op(p0, p1); + dst[j] = (T)p1; + } + return; + } + + size.width *= cn; + for( i = 0; i < size.height; i++, dst += dstep ) + { + const T* row0 = src + std::max(i - 1, 0)*sstep; + const T* row1 = src + i*sstep; + const T* row2 = src + std::min(i + 1, size.height-1)*sstep; + int limit = useSIMD ? cn : size.width; + + for(j = 0;; ) + { + for( ; j < limit; j++ ) + { + int j0 = j >= cn ? j - cn : j; + int j2 = j < size.width - cn ? j + cn : j; + WT p0 = row0[j0], p1 = row0[j], p2 = row0[j2]; + WT p3 = row1[j0], p4 = row1[j], p5 = row1[j2]; + WT p6 = row2[j0], p7 = row2[j], p8 = row2[j2]; + + op(p1, p2); op(p4, p5); op(p7, p8); op(p0, p1); + op(p3, p4); op(p6, p7); op(p1, p2); op(p4, p5); + op(p7, p8); op(p0, p3); op(p5, p8); op(p4, p7); + op(p3, p6); op(p1, p4); op(p2, p5); op(p4, p7); + op(p4, p2); op(p6, p4); op(p4, p2); + dst[j] = (T)p4; + } + + if( limit == size.width ) + break; + + for( ; j <= size.width - VecOp::SIZE - cn; j += VecOp::SIZE ) + { + VT p0 = vop.load(row0+j-cn), p1 = vop.load(row0+j), p2 = vop.load(row0+j+cn); + VT p3 = vop.load(row1+j-cn), p4 = vop.load(row1+j), p5 = vop.load(row1+j+cn); + VT p6 = vop.load(row2+j-cn), p7 = vop.load(row2+j), p8 = vop.load(row2+j+cn); + + vop(p1, p2); vop(p4, p5); vop(p7, p8); vop(p0, p1); + vop(p3, p4); vop(p6, p7); vop(p1, p2); vop(p4, p5); + vop(p7, p8); vop(p0, p3); vop(p5, p8); vop(p4, p7); + vop(p3, p6); vop(p1, p4); vop(p2, p5); vop(p4, p7); + vop(p4, p2); vop(p6, p4); vop(p4, p2); + vop.store(dst+j, p4); + } + + limit = size.width; + } + } + } + else if( m == 5 ) + { + if( size.width == 1 || size.height == 1 ) + { + int len = size.width + size.height - 1; + int sdelta = size.height == 1 ? cn : sstep; + int sdelta0 = size.height == 1 ? 0 : sstep - cn; + int ddelta = size.height == 1 ? cn : dstep; + + for( i = 0; i < len; i++, src += sdelta0, dst += ddelta ) + for( j = 0; j < cn; j++, src++ ) + { + int i1 = i > 0 ? -sdelta : 0; + int i0 = i > 1 ? -sdelta*2 : i1; + int i3 = i < len-1 ? sdelta : 0; + int i4 = i < len-2 ? sdelta*2 : i3; + WT p0 = src[i0], p1 = src[i1], p2 = src[0], p3 = src[i3], p4 = src[i4]; + + op(p0, p1); op(p3, p4); op(p2, p3); op(p3, p4); op(p0, p2); + op(p2, p4); op(p1, p3); op(p1, p2); + dst[j] = (T)p2; + } + return; + } + + size.width *= cn; + for( i = 0; i < size.height; i++, dst += dstep ) + { + const T* row[5]; + row[0] = src + std::max(i - 2, 0)*sstep; + row[1] = src + std::max(i - 1, 0)*sstep; + row[2] = src + i*sstep; + row[3] = src + std::min(i + 1, size.height-1)*sstep; + row[4] = src + std::min(i + 2, size.height-1)*sstep; + int limit = useSIMD ? cn*2 : size.width; + + for(j = 0;; ) + { + for( ; j < limit; j++ ) + { + WT p[25]; + int j1 = j >= cn ? j - cn : j; + int j0 = j >= cn*2 ? j - cn*2 : j1; + int j3 = j < size.width - cn ? j + cn : j; + int j4 = j < size.width - cn*2 ? j + cn*2 : j3; + for( k = 0; k < 5; k++ ) + { + const T* rowk = row[k]; + p[k*5] = rowk[j0]; p[k*5+1] = rowk[j1]; + p[k*5+2] = rowk[j]; p[k*5+3] = rowk[j3]; + p[k*5+4] = rowk[j4]; + } + + op(p[1], p[2]); op(p[0], p[1]); op(p[1], p[2]); op(p[4], p[5]); op(p[3], p[4]); + op(p[4], p[5]); op(p[0], p[3]); op(p[2], p[5]); op(p[2], p[3]); op(p[1], p[4]); + op(p[1], p[2]); op(p[3], p[4]); op(p[7], p[8]); op(p[6], p[7]); op(p[7], p[8]); + op(p[10], p[11]); op(p[9], p[10]); op(p[10], p[11]); op(p[6], p[9]); op(p[8], p[11]); + op(p[8], p[9]); op(p[7], p[10]); op(p[7], p[8]); op(p[9], p[10]); op(p[0], p[6]); + op(p[4], p[10]); op(p[4], p[6]); op(p[2], p[8]); op(p[2], p[4]); op(p[6], p[8]); + op(p[1], p[7]); op(p[5], p[11]); op(p[5], p[7]); op(p[3], p[9]); op(p[3], p[5]); + op(p[7], p[9]); op(p[1], p[2]); op(p[3], p[4]); op(p[5], p[6]); op(p[7], p[8]); + op(p[9], p[10]); op(p[13], p[14]); op(p[12], p[13]); op(p[13], p[14]); op(p[16], p[17]); + op(p[15], p[16]); op(p[16], p[17]); op(p[12], p[15]); op(p[14], p[17]); op(p[14], p[15]); + op(p[13], p[16]); op(p[13], p[14]); op(p[15], p[16]); op(p[19], p[20]); op(p[18], p[19]); + op(p[19], p[20]); op(p[21], p[22]); op(p[23], p[24]); op(p[21], p[23]); op(p[22], p[24]); + op(p[22], p[23]); op(p[18], p[21]); op(p[20], p[23]); op(p[20], p[21]); op(p[19], p[22]); + op(p[22], p[24]); op(p[19], p[20]); op(p[21], p[22]); op(p[23], p[24]); op(p[12], p[18]); + op(p[16], p[22]); op(p[16], p[18]); op(p[14], p[20]); op(p[20], p[24]); op(p[14], p[16]); + op(p[18], p[20]); op(p[22], p[24]); op(p[13], p[19]); op(p[17], p[23]); op(p[17], p[19]); + op(p[15], p[21]); op(p[15], p[17]); op(p[19], p[21]); op(p[13], p[14]); op(p[15], p[16]); + op(p[17], p[18]); op(p[19], p[20]); op(p[21], p[22]); op(p[23], p[24]); op(p[0], p[12]); + op(p[8], p[20]); op(p[8], p[12]); op(p[4], p[16]); op(p[16], p[24]); op(p[12], p[16]); + op(p[2], p[14]); op(p[10], p[22]); op(p[10], p[14]); op(p[6], p[18]); op(p[6], p[10]); + op(p[10], p[12]); op(p[1], p[13]); op(p[9], p[21]); op(p[9], p[13]); op(p[5], p[17]); + op(p[13], p[17]); op(p[3], p[15]); op(p[11], p[23]); op(p[11], p[15]); op(p[7], p[19]); + op(p[7], p[11]); op(p[11], p[13]); op(p[11], p[12]); + dst[j] = (T)p[12]; + } + + if( limit == size.width ) + break; + + for( ; j <= size.width - VecOp::SIZE - cn*2; j += VecOp::SIZE ) + { + VT p[25]; + for( k = 0; k < 5; k++ ) + { + const T* rowk = row[k]; + p[k*5] = vop.load(rowk+j-cn*2); p[k*5+1] = vop.load(rowk+j-cn); + p[k*5+2] = vop.load(rowk+j); p[k*5+3] = vop.load(rowk+j+cn); + p[k*5+4] = vop.load(rowk+j+cn*2); + } + + vop(p[1], p[2]); vop(p[0], p[1]); vop(p[1], p[2]); vop(p[4], p[5]); vop(p[3], p[4]); + vop(p[4], p[5]); vop(p[0], p[3]); vop(p[2], p[5]); vop(p[2], p[3]); vop(p[1], p[4]); + vop(p[1], p[2]); vop(p[3], p[4]); vop(p[7], p[8]); vop(p[6], p[7]); vop(p[7], p[8]); + vop(p[10], p[11]); vop(p[9], p[10]); vop(p[10], p[11]); vop(p[6], p[9]); vop(p[8], p[11]); + vop(p[8], p[9]); vop(p[7], p[10]); vop(p[7], p[8]); vop(p[9], p[10]); vop(p[0], p[6]); + vop(p[4], p[10]); vop(p[4], p[6]); vop(p[2], p[8]); vop(p[2], p[4]); vop(p[6], p[8]); + vop(p[1], p[7]); vop(p[5], p[11]); vop(p[5], p[7]); vop(p[3], p[9]); vop(p[3], p[5]); + vop(p[7], p[9]); vop(p[1], p[2]); vop(p[3], p[4]); vop(p[5], p[6]); vop(p[7], p[8]); + vop(p[9], p[10]); vop(p[13], p[14]); vop(p[12], p[13]); vop(p[13], p[14]); vop(p[16], p[17]); + vop(p[15], p[16]); vop(p[16], p[17]); vop(p[12], p[15]); vop(p[14], p[17]); vop(p[14], p[15]); + vop(p[13], p[16]); vop(p[13], p[14]); vop(p[15], p[16]); vop(p[19], p[20]); vop(p[18], p[19]); + vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[21], p[23]); vop(p[22], p[24]); + vop(p[22], p[23]); vop(p[18], p[21]); vop(p[20], p[23]); vop(p[20], p[21]); vop(p[19], p[22]); + vop(p[22], p[24]); vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[12], p[18]); + vop(p[16], p[22]); vop(p[16], p[18]); vop(p[14], p[20]); vop(p[20], p[24]); vop(p[14], p[16]); + vop(p[18], p[20]); vop(p[22], p[24]); vop(p[13], p[19]); vop(p[17], p[23]); vop(p[17], p[19]); + vop(p[15], p[21]); vop(p[15], p[17]); vop(p[19], p[21]); vop(p[13], p[14]); vop(p[15], p[16]); + vop(p[17], p[18]); vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[0], p[12]); + vop(p[8], p[20]); vop(p[8], p[12]); vop(p[4], p[16]); vop(p[16], p[24]); vop(p[12], p[16]); + vop(p[2], p[14]); vop(p[10], p[22]); vop(p[10], p[14]); vop(p[6], p[18]); vop(p[6], p[10]); + vop(p[10], p[12]); vop(p[1], p[13]); vop(p[9], p[21]); vop(p[9], p[13]); vop(p[5], p[17]); + vop(p[13], p[17]); vop(p[3], p[15]); vop(p[11], p[23]); vop(p[11], p[15]); vop(p[7], p[19]); + vop(p[7], p[11]); vop(p[11], p[13]); vop(p[11], p[12]); + vop.store(dst+j, p[12]); + } + + limit = size.width; + } + } + } +} + +} + +void cv::medianBlur( InputArray _src0, OutputArray _dst, int ksize ) +{ + Mat src0 = _src0.getMat(); + _dst.create( src0.size(), src0.type() ); + Mat dst = _dst.getMat(); + + if( ksize <= 1 ) + { + src0.copyTo(dst); + return; + } + + CV_Assert( ksize % 2 == 1 ); + + Size size = src0.size(); + int cn = src0.channels(); + bool useSortNet = ksize == 3 || (ksize == 5 +#if !CV_SSE2 + && src0.depth() > CV_8U +#endif + ); + + Mat src; + if( useSortNet ) + { + if( dst.data != src0.data ) + src = src0; + else + src0.copyTo(src); + } + else + cv::copyMakeBorder( src0, src, 0, 0, ksize/2, ksize/2, BORDER_REPLICATE ); + + if( useSortNet ) + { + if( src.depth() == CV_8U ) + medianBlur_SortNet( src, dst, ksize ); + else if( src.depth() == CV_16U ) + medianBlur_SortNet( src, dst, ksize ); + else if( src.depth() == CV_16S ) + medianBlur_SortNet( src, dst, ksize ); + else if( src.depth() == CV_32F ) + medianBlur_SortNet( src, dst, ksize ); + else + CV_Error(CV_StsUnsupportedFormat, ""); + return; + } + + CV_Assert( src.depth() == CV_8U && (cn == 1 || cn == 3 || cn == 4) ); + + double img_size_mp = (double)(size.width*size.height)/(1 << 20); + if( ksize <= 3 + (img_size_mp < 1 ? 12 : img_size_mp < 4 ? 6 : 2)*(MEDIAN_HAVE_SIMD && checkHardwareSupport(CV_CPU_SSE2) ? 1 : 3)) + medianBlur_8u_Om( src, dst, ksize ); + else + medianBlur_8u_O1( src, dst, ksize ); +} + +/****************************************************************************************\ + Bilateral Filtering +\****************************************************************************************/ + +namespace cv +{ + +static void +bilateralFilter_8u( const Mat& src, Mat& dst, int d, + double sigma_color, double sigma_space, + int borderType ) +{ + int cn = src.channels(); + int i, j, k, maxk, radius; + Size size = src.size(); + + CV_Assert( (src.type() == CV_8UC1 || src.type() == CV_8UC3) && + src.type() == dst.type() && src.size() == dst.size() && + src.data != dst.data ); + + if( sigma_color <= 0 ) + sigma_color = 1; + if( sigma_space <= 0 ) + sigma_space = 1; + + double gauss_color_coeff = -0.5/(sigma_color*sigma_color); + double gauss_space_coeff = -0.5/(sigma_space*sigma_space); + + if( d <= 0 ) + radius = cvRound(sigma_space*1.5); + else + radius = d/2; + radius = MAX(radius, 1); + d = radius*2 + 1; + + Mat temp; + copyMakeBorder( src, temp, radius, radius, radius, radius, borderType ); + + vector _color_weight(cn*256); + vector _space_weight(d*d); + vector _space_ofs(d*d); + float* color_weight = &_color_weight[0]; + float* space_weight = &_space_weight[0]; + int* space_ofs = &_space_ofs[0]; + + // initialize color-related bilateral filter coefficients + for( i = 0; i < 256*cn; i++ ) + color_weight[i] = (float)std::exp(i*i*gauss_color_coeff); + + // initialize space-related bilateral filter coefficients + for( i = -radius, maxk = 0; i <= radius; i++ ) + for( j = -radius; j <= radius; j++ ) + { + double r = std::sqrt((double)i*i + (double)j*j); + if( r > radius ) + continue; + space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff); + space_ofs[maxk++] = (int)(i*temp.step + j*cn); + } + + for( i = 0; i < size.height; i++ ) + { + const uchar* sptr = temp.data + (i+radius)*temp.step + radius*cn; + uchar* dptr = dst.data + i*dst.step; + + if( cn == 1 ) + { + for( j = 0; j < size.width; j++ ) + { + float sum = 0, wsum = 0; + int val0 = sptr[j]; + for( k = 0; k < maxk; k++ ) + { + int val = sptr[j + space_ofs[k]]; + float w = space_weight[k]*color_weight[std::abs(val - val0)]; + sum += val*w; + wsum += w; + } + // overflow is not possible here => there is no need to use CV_CAST_8U + dptr[j] = (uchar)cvRound(sum/wsum); + } + } + else + { + assert( cn == 3 ); + for( j = 0; j < size.width*3; j += 3 ) + { + float sum_b = 0, sum_g = 0, sum_r = 0, wsum = 0; + int b0 = sptr[j], g0 = sptr[j+1], r0 = sptr[j+2]; + for( k = 0; k < maxk; k++ ) + { + const uchar* sptr_k = sptr + j + space_ofs[k]; + int b = sptr_k[0], g = sptr_k[1], r = sptr_k[2]; + float w = space_weight[k]*color_weight[std::abs(b - b0) + + std::abs(g - g0) + std::abs(r - r0)]; + sum_b += b*w; sum_g += g*w; sum_r += r*w; + wsum += w; + } + wsum = 1.f/wsum; + b0 = cvRound(sum_b*wsum); + g0 = cvRound(sum_g*wsum); + r0 = cvRound(sum_r*wsum); + dptr[j] = (uchar)b0; dptr[j+1] = (uchar)g0; dptr[j+2] = (uchar)r0; + } + } + } +} + + +static void +bilateralFilter_32f( const Mat& src, Mat& dst, int d, + double sigma_color, double sigma_space, + int borderType ) +{ + int cn = src.channels(); + int i, j, k, maxk, radius; + double minValSrc=-1, maxValSrc=1; + const int kExpNumBinsPerChannel = 1 << 12; + int kExpNumBins = 0; + float lastExpVal = 1.f; + float len, scale_index; + Size size = src.size(); + + CV_Assert( (src.type() == CV_32FC1 || src.type() == CV_32FC3) && + src.type() == dst.type() && src.size() == dst.size() && + src.data != dst.data ); + + if( sigma_color <= 0 ) + sigma_color = 1; + if( sigma_space <= 0 ) + sigma_space = 1; + + double gauss_color_coeff = -0.5/(sigma_color*sigma_color); + double gauss_space_coeff = -0.5/(sigma_space*sigma_space); + + if( d <= 0 ) + radius = cvRound(sigma_space*1.5); + else + radius = d/2; + radius = MAX(radius, 1); + d = radius*2 + 1; + // compute the min/max range for the input image (even if multichannel) + + minMaxLoc( src.reshape(1), &minValSrc, &maxValSrc ); + + // temporary copy of the image with borders for easy processing + Mat temp; + copyMakeBorder( src, temp, radius, radius, radius, radius, borderType ); + + // allocate lookup tables + vector _space_weight(d*d); + vector _space_ofs(d*d); + float* space_weight = &_space_weight[0]; + int* space_ofs = &_space_ofs[0]; + + // assign a length which is slightly more than needed + len = (float)(maxValSrc - minValSrc) * cn; + kExpNumBins = kExpNumBinsPerChannel * cn; + vector _expLUT(kExpNumBins+2); + float* expLUT = &_expLUT[0]; + + scale_index = kExpNumBins/len; + + // initialize the exp LUT + for( i = 0; i < kExpNumBins+2; i++ ) + { + if( lastExpVal > 0.f ) + { + double val = i / scale_index; + expLUT[i] = (float)std::exp(val * val * gauss_color_coeff); + lastExpVal = expLUT[i]; + } + else + expLUT[i] = 0.f; + } + + // initialize space-related bilateral filter coefficients + for( i = -radius, maxk = 0; i <= radius; i++ ) + for( j = -radius; j <= radius; j++ ) + { + double r = std::sqrt((double)i*i + (double)j*j); + if( r > radius ) + continue; + space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff); + space_ofs[maxk++] = (int)(i*(temp.step/sizeof(float)) + j*cn); + } + + for( i = 0; i < size.height; i++ ) + { + const float* sptr = (const float*)(temp.data + (i+radius)*temp.step) + radius*cn; + float* dptr = (float*)(dst.data + i*dst.step); + + if( cn == 1 ) + { + for( j = 0; j < size.width; j++ ) + { + float sum = 0, wsum = 0; + float val0 = sptr[j]; + for( k = 0; k < maxk; k++ ) + { + float val = sptr[j + space_ofs[k]]; + float alpha = (float)(std::abs(val - val0)*scale_index); + int idx = cvFloor(alpha); + alpha -= idx; + float w = space_weight[k]*(expLUT[idx] + alpha*(expLUT[idx+1] - expLUT[idx])); + sum += val*w; + wsum += w; + } + dptr[j] = (float)(sum/wsum); + } + } + else + { + assert( cn == 3 ); + for( j = 0; j < size.width*3; j += 3 ) + { + float sum_b = 0, sum_g = 0, sum_r = 0, wsum = 0; + float b0 = sptr[j], g0 = sptr[j+1], r0 = sptr[j+2]; + for( k = 0; k < maxk; k++ ) + { + const float* sptr_k = sptr + j + space_ofs[k]; + float b = sptr_k[0], g = sptr_k[1], r = sptr_k[2]; + float alpha = (float)((std::abs(b - b0) + + std::abs(g - g0) + std::abs(r - r0))*scale_index); + int idx = cvFloor(alpha); + alpha -= idx; + float w = space_weight[k]*(expLUT[idx] + alpha*(expLUT[idx+1] - expLUT[idx])); + sum_b += b*w; sum_g += g*w; sum_r += r*w; + wsum += w; + } + wsum = 1.f/wsum; + b0 = sum_b*wsum; + g0 = sum_g*wsum; + r0 = sum_r*wsum; + dptr[j] = b0; dptr[j+1] = g0; dptr[j+2] = r0; + } + } + } +} + +} + +void cv::bilateralFilter( InputArray _src, OutputArray _dst, int d, + double sigmaColor, double sigmaSpace, + int borderType ) +{ + Mat src = _src.getMat(); + _dst.create( src.size(), src.type() ); + Mat dst = _dst.getMat(); + + if( src.depth() == CV_8U ) + bilateralFilter_8u( src, dst, d, sigmaColor, sigmaSpace, borderType ); + else if( src.depth() == CV_32F ) + bilateralFilter_32f( src, dst, d, sigmaColor, sigmaSpace, borderType ); + else + CV_Error( CV_StsUnsupportedFormat, + "Bilateral filtering is only implemented for 8u and 32f images" ); +} + +////////////////////////////////////////////////////////////////////////////////////////// + +CV_IMPL void +cvSmooth( const void* srcarr, void* dstarr, int smooth_type, + int param1, int param2, double param3, double param4 ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst0 = cv::cvarrToMat(dstarr), dst = dst0; + + CV_Assert( dst.size() == src.size() && + (smooth_type == CV_BLUR_NO_SCALE || dst.type() == src.type()) ); + + if( param2 <= 0 ) + param2 = param1; + + if( smooth_type == CV_BLUR || smooth_type == CV_BLUR_NO_SCALE ) + cv::boxFilter( src, dst, dst.depth(), cv::Size(param1, param2), cv::Point(-1,-1), + smooth_type == CV_BLUR, cv::BORDER_REPLICATE ); + else if( smooth_type == CV_GAUSSIAN ) + cv::GaussianBlur( src, dst, cv::Size(param1, param2), param3, param4, cv::BORDER_REPLICATE ); + else if( smooth_type == CV_MEDIAN ) + cv::medianBlur( src, dst, param1 ); + else + cv::bilateralFilter( src, dst, param1, param3, param4, cv::BORDER_REPLICATE ); + + if( dst.data != dst0.data ) + CV_Error( CV_StsUnmatchedFormats, "The destination image does not have the proper type" ); +} + +/* End of file. */ diff --git a/opencv/imgproc/spilltree.cpp b/opencv/imgproc/spilltree.cpp new file mode 100644 index 0000000..a2c7599 --- /dev/null +++ b/opencv/imgproc/spilltree.cpp @@ -0,0 +1,498 @@ +/* Original code has been submitted by Liu Liu. + ---------------------------------------------------------------------------------- + * Spill-Tree for Approximate KNN Search + * Author: Liu Liu + * mailto: liuliu.1987+opencv@gmail.com + * Refer to Paper: + * An Investigation of Practical Approximate Nearest Neighbor Algorithms + * cvMergeSpillTree TBD + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * The name of Contributor may not be used to endorse or + * promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, + * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY + * OF SUCH DAMAGE. + */ + +#include "precomp.hpp" +#include "_featuretree.h" + +struct CvSpillTreeNode +{ + bool leaf; // is leaf or not (leaf is the point that have no more child) + bool spill; // is not a non-overlapping point (defeatist search) + CvSpillTreeNode* lc; // left child (<) + CvSpillTreeNode* rc; // right child (>) + int cc; // child count + CvMat* u; // projection vector + CvMat* center; // center + int i; // original index + double r; // radius of remaining feature point + double ub; // upper bound + double lb; // lower bound + double mp; // mean point + double p; // projection value +}; + +struct CvSpillTree +{ + CvSpillTreeNode* root; + CvMat** refmat; // leaf ref matrix + int total; // total leaves + int naive; // under this value, we perform naive search + int type; // mat type + double rho; // under this value, it is a spill tree + double tau; // the overlapping buffer ratio +}; + +struct CvResult +{ + int index; + double distance; +}; + +// find the farthest node in the "list" from "node" +static inline CvSpillTreeNode* +icvFarthestNode( CvSpillTreeNode* node, + CvSpillTreeNode* list, + int total ) +{ + double farthest = -1.; + CvSpillTreeNode* result = NULL; + for ( int i = 0; i < total; i++ ) + { + double norm = cvNorm( node->center, list->center ); + if ( norm > farthest ) + { + farthest = norm; + result = list; + } + list = list->rc; + } + return result; +} + +// clone a new tree node +static inline CvSpillTreeNode* +icvCloneSpillTreeNode( CvSpillTreeNode* node ) +{ + CvSpillTreeNode* result = (CvSpillTreeNode*)cvAlloc( sizeof(CvSpillTreeNode) ); + memcpy( result, node, sizeof(CvSpillTreeNode) ); + return result; +} + +// append the link-list of a tree node +static inline void +icvAppendSpillTreeNode( CvSpillTreeNode* node, + CvSpillTreeNode* append ) +{ + if ( node->lc == NULL ) + { + node->lc = node->rc = append; + node->lc->lc = node->rc->rc = NULL; + } else { + append->lc = node->rc; + append->rc = NULL; + node->rc->rc = append; + node->rc = append; + } + node->cc++; +} + +#define _dispatch_mat_ptr(x, step) (CV_MAT_DEPTH((x)->type) == CV_32F ? (void*)((x)->data.fl+(step)) : (CV_MAT_DEPTH((x)->type) == CV_64F ? (void*)((x)->data.db+(step)) : (void*)(0))) + +static void +icvDFSInitSpillTreeNode( const CvSpillTree* tr, + const int d, + CvSpillTreeNode* node ) +{ + if ( node->cc <= tr->naive ) + { + // already get to a leaf, terminate the recursion. + node->leaf = true; + node->spill = false; + return; + } + + // random select a node, then find a farthest node from this one, then find a farthest from that one... + // to approximate the farthest node-pair + static CvRNG rng_state = cvRNG(0xdeadbeef); + int rn = cvRandInt( &rng_state ) % node->cc; + CvSpillTreeNode* lnode = NULL; + CvSpillTreeNode* rnode = node->lc; + for ( int i = 0; i < rn; i++ ) + rnode = rnode->rc; + lnode = icvFarthestNode( rnode, node->lc, node->cc ); + rnode = icvFarthestNode( lnode, node->lc, node->cc ); + + // u is the projection vector + node->u = cvCreateMat( 1, d, tr->type ); + cvSub( lnode->center, rnode->center, node->u ); + cvNormalize( node->u, node->u ); + + // find the center of node in hyperspace + node->center = cvCreateMat( 1, d, tr->type ); + cvZero( node->center ); + CvSpillTreeNode* it = node->lc; + for ( int i = 0; i < node->cc; i++ ) + { + cvAdd( it->center, node->center, node->center ); + it = it->rc; + } + cvConvertScale( node->center, node->center, 1./node->cc ); + + // project every node to "u", and find the mean point "mp" + it = node->lc; + node->r = -1.; + node->mp = 0; + for ( int i = 0; i < node->cc; i++ ) + { + node->mp += ( it->p = cvDotProduct( it->center, node->u ) ); + double norm = cvNorm( node->center, it->center ); + if ( norm > node->r ) + node->r = norm; + it = it->rc; + } + node->mp = node->mp / node->cc; + + // overlapping buffer and upper bound, lower bound + double ob = (lnode->p-rnode->p)*tr->tau*.5; + node->ub = node->mp+ob; + node->lb = node->mp-ob; + int sl = 0, l = 0; + int sr = 0, r = 0; + it = node->lc; + for ( int i = 0; i < node->cc; i++ ) + { + if ( it->p <= node->ub ) + sl++; + if ( it->p >= node->lb ) + sr++; + if ( it->p < node->mp ) + l++; + else + r++; + it = it->rc; + } + // precision problem, return the node as it is. + if (( l == 0 )||( r == 0 )) + { + cvReleaseMat( &(node->u) ); + cvReleaseMat( &(node->center) ); + node->leaf = true; + node->spill = false; + return; + } + CvSpillTreeNode* lc = (CvSpillTreeNode*)cvAlloc( sizeof(CvSpillTreeNode) ); + memset(lc, 0, sizeof(CvSpillTreeNode)); + CvSpillTreeNode* rc = (CvSpillTreeNode*)cvAlloc( sizeof(CvSpillTreeNode) ); + memset(rc, 0, sizeof(CvSpillTreeNode)); + lc->lc = lc->rc = rc->lc = rc->rc = NULL; + lc->cc = rc->cc = 0; + int undo = cvRound(node->cc*tr->rho); + if (( sl >= undo )||( sr >= undo )) + { + // it is not a spill point (defeatist search disabled) + it = node->lc; + for ( int i = 0; i < node->cc; i++ ) + { + CvSpillTreeNode* next = it->rc; + if ( it->p < node->mp ) + icvAppendSpillTreeNode( lc, it ); + else + icvAppendSpillTreeNode( rc, it ); + it = next; + } + node->spill = false; + } else { + // a spill point + it = node->lc; + for ( int i = 0; i < node->cc; i++ ) + { + CvSpillTreeNode* next = it->rc; + if ( it->p < node->lb ) + icvAppendSpillTreeNode( lc, it ); + else if ( it->p > node->ub ) + icvAppendSpillTreeNode( rc, it ); + else { + CvSpillTreeNode* cit = icvCloneSpillTreeNode( it ); + icvAppendSpillTreeNode( lc, it ); + icvAppendSpillTreeNode( rc, cit ); + } + it = next; + } + node->spill = true; + } + node->lc = lc; + node->rc = rc; + + // recursion process + icvDFSInitSpillTreeNode( tr, d, node->lc ); + icvDFSInitSpillTreeNode( tr, d, node->rc ); +} + +static CvSpillTree* +icvCreateSpillTree( const CvMat* raw_data, + const int naive, + const double rho, + const double tau ) +{ + int n = raw_data->rows; + int d = raw_data->cols; + + CvSpillTree* tr = (CvSpillTree*)cvAlloc( sizeof(CvSpillTree) ); + tr->root = (CvSpillTreeNode*)cvAlloc( sizeof(CvSpillTreeNode) ); + memset(tr->root, 0, sizeof(CvSpillTreeNode)); + tr->refmat = (CvMat**)cvAlloc( sizeof(CvMat*)*n ); + tr->total = n; + tr->naive = naive; + tr->rho = rho; + tr->tau = tau; + tr->type = raw_data->type; + + // tie a link-list to the root node + tr->root->lc = (CvSpillTreeNode*)cvAlloc( sizeof(CvSpillTreeNode) ); + memset(tr->root->lc, 0, sizeof(CvSpillTreeNode)); + tr->root->lc->center = cvCreateMatHeader( 1, d, tr->type ); + cvSetData( tr->root->lc->center, _dispatch_mat_ptr(raw_data, 0), raw_data->step ); + tr->refmat[0] = tr->root->lc->center; + tr->root->lc->lc = NULL; + tr->root->lc->leaf = true; + tr->root->lc->i = 0; + CvSpillTreeNode* node = tr->root->lc; + for ( int i = 1; i < n; i++ ) + { + CvSpillTreeNode* newnode = (CvSpillTreeNode*)cvAlloc( sizeof(CvSpillTreeNode) ); + memset(newnode, 0, sizeof(CvSpillTreeNode)); + newnode->center = cvCreateMatHeader( 1, d, tr->type ); + cvSetData( newnode->center, _dispatch_mat_ptr(raw_data, i*d), raw_data->step ); + tr->refmat[i] = newnode->center; + newnode->lc = node; + newnode->i = i; + newnode->leaf = true; + newnode->rc = NULL; + node->rc = newnode; + node = newnode; + } + tr->root->rc = node; + tr->root->cc = n; + icvDFSInitSpillTreeNode( tr, d, tr->root ); + return tr; +} + +static void +icvSpillTreeNodeHeapify( CvResult * heap, + int i, + const int k ) +{ + if ( heap[i].index == -1 ) + return; + int l, r, largest = i; + CvResult inp; + do { + i = largest; + r = (i+1)<<1; + l = r-1; + if (( l < k )&&( heap[l].index == -1 )) + largest = l; + else if (( r < k )&&( heap[r].index == -1 )) + largest = r; + else { + if (( l < k )&&( heap[l].distance > heap[i].distance )) + largest = l; + if (( r < k )&&( heap[r].distance > heap[largest].distance )) + largest = r; + } + if ( largest != i ) + CV_SWAP( heap[largest], heap[i], inp ); + } while ( largest != i ); +} + +static void +icvSpillTreeDFSearch( CvSpillTree* tr, + CvSpillTreeNode* node, + CvResult* heap, + int* es, + const CvMat* desc, + const int k, + const int emax, + bool * cache) +{ + if ((emax > 0)&&( *es >= emax )) + return; + double dist, p=0; + double distance; + while ( node->spill ) + { + // defeatist search + if ( !node->leaf ) + p = cvDotProduct( node->u, desc ); + if ( p < node->lb && node->lc->cc >= k ) // check the number of children larger than k otherwise you'll skip over better neighbor + node = node->lc; + else if ( p > node->ub && node->rc->cc >= k ) + node = node->rc; + else + break; + if ( NULL == node ) + return; + } + if ( node->leaf ) + { + // a leaf, naive search + CvSpillTreeNode* it = node->lc; + for ( int i = 0; i < node->cc; i++ ) + { + if ( !cache[it->i] ) + { + distance = cvNorm( it->center, desc ); + cache[it->i] = true; + if (( heap[0].index == -1)||( distance < heap[0].distance )) + { + CvResult current_result; + current_result.index = it->i; + current_result.distance = distance; + heap[0] = current_result; + icvSpillTreeNodeHeapify( heap, 0, k ); + (*es)++; + } + } + it = it->rc; + } + return; + } + dist = cvNorm( node->center, desc ); + // impossible case, skip + if (( heap[0].index != -1 )&&( dist-node->r > heap[0].distance )) + return; + p = cvDotProduct( node->u, desc ); + // guided dfs + if ( p < node->mp ) + { + icvSpillTreeDFSearch( tr, node->lc, heap, es, desc, k, emax, cache ); + icvSpillTreeDFSearch( tr, node->rc, heap, es, desc, k, emax, cache ); + } else { + icvSpillTreeDFSearch( tr, node->rc, heap, es, desc, k, emax, cache ); + icvSpillTreeDFSearch( tr, node->lc, heap, es, desc, k, emax, cache ); + } +} + +static void +icvFindSpillTreeFeatures( CvSpillTree* tr, + const CvMat* desc, + CvMat* results, + CvMat* dist, + const int k, + const int emax ) +{ + assert( desc->type == tr->type ); + CvResult* heap = (CvResult*)cvAlloc( k*sizeof(heap[0]) ); + bool* cache = (bool*)cvAlloc( sizeof(bool)*tr->total ); + for ( int j = 0; j < desc->rows; j++ ) + { + CvMat _desc = cvMat( 1, desc->cols, desc->type, _dispatch_mat_ptr(desc, j*desc->cols) ); + for ( int i = 0; i < k; i++ ) { + CvResult current; + current.index=-1; + current.distance=-1; + heap[i] = current; + } + memset( cache, 0, sizeof(bool)*tr->total ); + int es = 0; + icvSpillTreeDFSearch( tr, tr->root, heap, &es, &_desc, k, emax, cache ); + CvResult inp; + for ( int i = k-1; i > 0; i-- ) + { + CV_SWAP( heap[i], heap[0], inp ); + icvSpillTreeNodeHeapify( heap, 0, i ); + } + int* rs = results->data.i+j*results->cols; + double* dt = dist->data.db+j*dist->cols; + for ( int i = 0; i < k; i++, rs++, dt++ ) + if ( heap[i].index != -1 ) + { + *rs = heap[i].index; + *dt = heap[i].distance; + } else + *rs = -1; + } + cvFree( &heap ); + cvFree( &cache ); +} + +static void +icvDFSReleaseSpillTreeNode( CvSpillTreeNode* node ) +{ + if ( node->leaf ) + { + CvSpillTreeNode* it = node->lc; + for ( int i = 0; i < node->cc; i++ ) + { + CvSpillTreeNode* s = it; + it = it->rc; + cvFree( &s ); + } + } else { + cvReleaseMat( &node->u ); + cvReleaseMat( &node->center ); + icvDFSReleaseSpillTreeNode( node->lc ); + icvDFSReleaseSpillTreeNode( node->rc ); + } + cvFree( &node ); +} + +static void +icvReleaseSpillTree( CvSpillTree** tr ) +{ + for ( int i = 0; i < (*tr)->total; i++ ) + cvReleaseMat( &((*tr)->refmat[i]) ); + cvFree( &((*tr)->refmat) ); + icvDFSReleaseSpillTreeNode( (*tr)->root ); + cvFree( tr ); +} + +class CvSpillTreeWrap : public CvFeatureTree { + CvSpillTree* tr; +public: + CvSpillTreeWrap(const CvMat* raw_data, + const int naive, + const double rho, + const double tau) { + tr = icvCreateSpillTree(raw_data, naive, rho, tau); + } + ~CvSpillTreeWrap() { + icvReleaseSpillTree(&tr); + } + + void FindFeatures(const CvMat* desc, int k, int emax, CvMat* results, CvMat* dist) { + icvFindSpillTreeFeatures(tr, desc, results, dist, k, emax); + } +}; + +CvFeatureTree* cvCreateSpillTree( const CvMat* raw_data, + const int naive, + const double rho, + const double tau ) { + return new CvSpillTreeWrap(raw_data, naive, rho, tau); +} diff --git a/opencv/imgproc/subdivision2d.cpp b/opencv/imgproc/subdivision2d.cpp new file mode 100644 index 0000000..43df79c --- /dev/null +++ b/opencv/imgproc/subdivision2d.cpp @@ -0,0 +1,1494 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ +#include "precomp.hpp" + +CV_IMPL CvSubdiv2D * +cvCreateSubdiv2D( int subdiv_type, int header_size, + int vtx_size, int quadedge_size, CvMemStorage * storage ) +{ + if( !storage ) + CV_Error( CV_StsNullPtr, "" ); + + if( header_size < (int)sizeof( CvSubdiv2D ) || + quadedge_size < (int)sizeof( CvQuadEdge2D ) || + vtx_size < (int)sizeof( CvSubdiv2DPoint )) + CV_Error( CV_StsBadSize, "" ); + + return (CvSubdiv2D *)cvCreateGraph( subdiv_type, header_size, + vtx_size, quadedge_size, storage ); +} + + +/****************************************************************************************\ +* Quad Edge algebra * +\****************************************************************************************/ + +static CvSubdiv2DEdge +cvSubdiv2DMakeEdge( CvSubdiv2D * subdiv ) +{ + if( !subdiv ) + CV_Error( CV_StsNullPtr, "" ); + + CvQuadEdge2D* edge = (CvQuadEdge2D*)cvSetNew( (CvSet*)subdiv->edges ); + memset( edge->pt, 0, sizeof( edge->pt )); + CvSubdiv2DEdge edgehandle = (CvSubdiv2DEdge) edge; + + edge->next[0] = edgehandle; + edge->next[1] = edgehandle + 3; + edge->next[2] = edgehandle + 2; + edge->next[3] = edgehandle + 1; + + subdiv->quad_edges++; + return edgehandle; +} + + +static CvSubdiv2DPoint * +cvSubdiv2DAddPoint( CvSubdiv2D * subdiv, CvPoint2D32f pt, int is_virtual ) +{ + CvSubdiv2DPoint* subdiv_point = (CvSubdiv2DPoint*)cvSetNew( (CvSet*)subdiv ); + if( subdiv_point ) + { + memset( subdiv_point, 0, subdiv->elem_size ); + subdiv_point->pt = pt; + subdiv_point->first = 0; + subdiv_point->flags |= is_virtual ? CV_SUBDIV2D_VIRTUAL_POINT_FLAG : 0; + subdiv_point->id = -1; + } + + return subdiv_point; +} + + +static void +cvSubdiv2DSplice( CvSubdiv2DEdge edgeA, CvSubdiv2DEdge edgeB ) +{ + CvSubdiv2DEdge *a_next = &CV_SUBDIV2D_NEXT_EDGE( edgeA ); + CvSubdiv2DEdge *b_next = &CV_SUBDIV2D_NEXT_EDGE( edgeB ); + CvSubdiv2DEdge a_rot = cvSubdiv2DRotateEdge( *a_next, 1 ); + CvSubdiv2DEdge b_rot = cvSubdiv2DRotateEdge( *b_next, 1 ); + CvSubdiv2DEdge *a_rot_next = &CV_SUBDIV2D_NEXT_EDGE( a_rot ); + CvSubdiv2DEdge *b_rot_next = &CV_SUBDIV2D_NEXT_EDGE( b_rot ); + CvSubdiv2DEdge t; + + CV_SWAP( *a_next, *b_next, t ); + CV_SWAP( *a_rot_next, *b_rot_next, t ); +} + + +static void +cvSubdiv2DSetEdgePoints( CvSubdiv2DEdge edge, + CvSubdiv2DPoint * org_pt, CvSubdiv2DPoint * dst_pt ) +{ + CvQuadEdge2D *quadedge = (CvQuadEdge2D *) (edge & ~3); + + if( !quadedge ) + CV_Error( CV_StsNullPtr, "" ); + + quadedge->pt[edge & 3] = org_pt; + quadedge->pt[(edge + 2) & 3] = dst_pt; +} + + +static void +cvSubdiv2DDeleteEdge( CvSubdiv2D * subdiv, CvSubdiv2DEdge edge ) +{ + CvQuadEdge2D *quadedge = (CvQuadEdge2D *) (edge & ~3); + + if( !subdiv || !quadedge ) + CV_Error( CV_StsNullPtr, "" ); + + cvSubdiv2DSplice( edge, cvSubdiv2DGetEdge( edge, CV_PREV_AROUND_ORG )); + + CvSubdiv2DEdge sym_edge = cvSubdiv2DSymEdge( edge ); + cvSubdiv2DSplice( sym_edge, cvSubdiv2DGetEdge( sym_edge, CV_PREV_AROUND_ORG )); + + cvSetRemoveByPtr( (CvSet*)(subdiv->edges), quadedge ); + subdiv->quad_edges--; +} + + +static CvSubdiv2DEdge +cvSubdiv2DConnectEdges( CvSubdiv2D * subdiv, CvSubdiv2DEdge edgeA, CvSubdiv2DEdge edgeB ) +{ + if( !subdiv ) + CV_Error( CV_StsNullPtr, "" ); + + CvSubdiv2DEdge new_edge = cvSubdiv2DMakeEdge( subdiv ); + + cvSubdiv2DSplice( new_edge, cvSubdiv2DGetEdge( edgeA, CV_NEXT_AROUND_LEFT )); + cvSubdiv2DSplice( cvSubdiv2DSymEdge( new_edge ), edgeB ); + + CvSubdiv2DPoint* dstA = cvSubdiv2DEdgeDst( edgeA ); + CvSubdiv2DPoint* orgB = cvSubdiv2DEdgeOrg( edgeB ); + cvSubdiv2DSetEdgePoints( new_edge, dstA, orgB ); + + return new_edge; +} + + +static void +cvSubdiv2DSwapEdges( CvSubdiv2DEdge edge ) +{ + CvSubdiv2DEdge sym_edge = cvSubdiv2DSymEdge( edge ); + CvSubdiv2DEdge a = cvSubdiv2DGetEdge( edge, CV_PREV_AROUND_ORG ); + CvSubdiv2DEdge b = cvSubdiv2DGetEdge( sym_edge, CV_PREV_AROUND_ORG ); + CvSubdiv2DPoint *dstB, *dstA; + + cvSubdiv2DSplice( edge, a ); + cvSubdiv2DSplice( sym_edge, b ); + + dstA = cvSubdiv2DEdgeDst( a ); + dstB = cvSubdiv2DEdgeDst( b ); + cvSubdiv2DSetEdgePoints( edge, dstA, dstB ); + + cvSubdiv2DSplice( edge, cvSubdiv2DGetEdge( a, CV_NEXT_AROUND_LEFT )); + cvSubdiv2DSplice( sym_edge, cvSubdiv2DGetEdge( b, CV_NEXT_AROUND_LEFT )); +} + + +static int +icvIsRightOf( CvPoint2D32f& pt, CvSubdiv2DEdge edge ) +{ + CvSubdiv2DPoint *org = cvSubdiv2DEdgeOrg(edge), *dst = cvSubdiv2DEdgeDst(edge); + double cw_area = cvTriangleArea( pt, dst->pt, org->pt ); + + return (cw_area > 0) - (cw_area < 0); +} + + +CV_IMPL CvSubdiv2DPointLocation +cvSubdiv2DLocate( CvSubdiv2D * subdiv, CvPoint2D32f pt, + CvSubdiv2DEdge * _edge, CvSubdiv2DPoint ** _point ) +{ + CvSubdiv2DPoint *point = 0; + int right_of_curr = 0; + + if( !subdiv ) + CV_Error( CV_StsNullPtr, "" ); + + if( !CV_IS_SUBDIV2D(subdiv) ) + CV_Error( CV_StsBadFlag, "" ); + + int i, max_edges = subdiv->quad_edges * 4; + CvSubdiv2DEdge edge = subdiv->recent_edge; + + if( max_edges == 0 ) + CV_Error( CV_StsBadSize, "" ); + CV_Assert(edge != 0); + + if( pt.x < subdiv->topleft.x || pt.y < subdiv->topleft.y || + pt.x >= subdiv->bottomright.x || pt.y >= subdiv->bottomright.y ) + CV_Error( CV_StsOutOfRange, "" ); + + CvSubdiv2DPointLocation location = CV_PTLOC_ERROR; + + right_of_curr = icvIsRightOf( pt, edge ); + if( right_of_curr > 0 ) + { + edge = cvSubdiv2DSymEdge( edge ); + right_of_curr = -right_of_curr; + } + + for( i = 0; i < max_edges; i++ ) + { + CvSubdiv2DEdge onext_edge = cvSubdiv2DNextEdge( edge ); + CvSubdiv2DEdge dprev_edge = cvSubdiv2DGetEdge( edge, CV_PREV_AROUND_DST ); + + int right_of_onext = icvIsRightOf( pt, onext_edge ); + int right_of_dprev = icvIsRightOf( pt, dprev_edge ); + + if( right_of_dprev > 0 ) + { + if( right_of_onext > 0 || (right_of_onext == 0 && right_of_curr == 0) ) + { + location = CV_PTLOC_INSIDE; + goto exit; + } + else + { + right_of_curr = right_of_onext; + edge = onext_edge; + } + } + else + { + if( right_of_onext > 0 ) + { + if( right_of_dprev == 0 && right_of_curr == 0 ) + { + location = CV_PTLOC_INSIDE; + goto exit; + } + else + { + right_of_curr = right_of_dprev; + edge = dprev_edge; + } + } + else if( right_of_curr == 0 && + icvIsRightOf( cvSubdiv2DEdgeDst( onext_edge )->pt, edge ) >= 0 ) + { + edge = cvSubdiv2DSymEdge( edge ); + } + else + { + right_of_curr = right_of_onext; + edge = onext_edge; + } + } + } +exit: + + subdiv->recent_edge = edge; + + if( location == CV_PTLOC_INSIDE ) + { + double t1, t2, t3; + CvPoint2D32f org_pt = cvSubdiv2DEdgeOrg( edge )->pt; + CvPoint2D32f dst_pt = cvSubdiv2DEdgeDst( edge )->pt; + + t1 = fabs( pt.x - org_pt.x ); + t1 += fabs( pt.y - org_pt.y ); + t2 = fabs( pt.x - dst_pt.x ); + t2 += fabs( pt.y - dst_pt.y ); + t3 = fabs( org_pt.x - dst_pt.x ); + t3 += fabs( org_pt.y - dst_pt.y ); + + if( t1 < FLT_EPSILON ) + { + location = CV_PTLOC_VERTEX; + point = cvSubdiv2DEdgeOrg( edge ); + edge = 0; + } + else if( t2 < FLT_EPSILON ) + { + location = CV_PTLOC_VERTEX; + point = cvSubdiv2DEdgeDst( edge ); + edge = 0; + } + else if( (t1 < t3 || t2 < t3) && + fabs( cvTriangleArea( pt, org_pt, dst_pt )) < FLT_EPSILON ) + { + location = CV_PTLOC_ON_EDGE; + point = 0; + } + } + + if( location == CV_PTLOC_ERROR ) + { + edge = 0; + point = 0; + } + + if( _edge ) + *_edge = edge; + if( _point ) + *_point = point; + + return location; +} + + +CV_INLINE int +icvIsPtInCircle3( CvPoint2D32f pt, CvPoint2D32f a, CvPoint2D32f b, CvPoint2D32f c ) +{ + const double eps = FLT_EPSILON*0.125; + double val = ((double)a.x * a.x + (double)a.y * a.y) * cvTriangleArea( b, c, pt ); + val -= ((double)b.x * b.x + (double)b.y * b.y) * cvTriangleArea( a, c, pt ); + val += ((double)c.x * c.x + (double)c.y * c.y) * cvTriangleArea( a, b, pt ); + val -= ((double)pt.x * pt.x + (double)pt.y * pt.y) * cvTriangleArea( a, b, c ); + + return val > eps ? 1 : val < -eps ? -1 : 0; +} + + +CV_IMPL CvSubdiv2DPoint * +cvSubdivDelaunay2DInsert( CvSubdiv2D * subdiv, CvPoint2D32f pt ) +{ + CvSubdiv2DPoint *point = 0; + CvSubdiv2DPointLocation location = CV_PTLOC_ERROR; + + CvSubdiv2DPoint *curr_point = 0, *first_point = 0; + CvSubdiv2DEdge curr_edge = 0, deleted_edge = 0, base_edge = 0; + int i, max_edges; + + if( !subdiv ) + CV_Error( CV_StsNullPtr, "" ); + + if( !CV_IS_SUBDIV2D(subdiv) ) + CV_Error( CV_StsBadFlag, "" ); + + location = cvSubdiv2DLocate( subdiv, pt, &curr_edge, &curr_point ); + + switch (location) + { + case CV_PTLOC_ERROR: + CV_Error( CV_StsBadSize, "" ); + + case CV_PTLOC_OUTSIDE_RECT: + CV_Error( CV_StsOutOfRange, "" ); + + case CV_PTLOC_VERTEX: + point = curr_point; + break; + + case CV_PTLOC_ON_EDGE: + deleted_edge = curr_edge; + subdiv->recent_edge = curr_edge = cvSubdiv2DGetEdge( curr_edge, CV_PREV_AROUND_ORG ); + cvSubdiv2DDeleteEdge( subdiv, deleted_edge ); + /* no break */ + + case CV_PTLOC_INSIDE: + + assert( curr_edge != 0 ); + subdiv->is_geometry_valid = 0; + + curr_point = cvSubdiv2DAddPoint( subdiv, pt, 0 ); + base_edge = cvSubdiv2DMakeEdge( subdiv ); + first_point = cvSubdiv2DEdgeOrg( curr_edge ); + cvSubdiv2DSetEdgePoints( base_edge, first_point, curr_point ); + cvSubdiv2DSplice( base_edge, curr_edge ); + + do + { + base_edge = cvSubdiv2DConnectEdges( subdiv, curr_edge, + cvSubdiv2DSymEdge( base_edge )); + curr_edge = cvSubdiv2DGetEdge( base_edge, CV_PREV_AROUND_ORG ); + } + while( cvSubdiv2DEdgeDst( curr_edge ) != first_point ); + + curr_edge = cvSubdiv2DGetEdge( base_edge, CV_PREV_AROUND_ORG ); + + max_edges = subdiv->quad_edges * 4; + + for( i = 0; i < max_edges; i++ ) + { + CvSubdiv2DPoint *temp_dst = 0, *curr_org = 0, *curr_dst = 0; + CvSubdiv2DEdge temp_edge = cvSubdiv2DGetEdge( curr_edge, CV_PREV_AROUND_ORG ); + + temp_dst = cvSubdiv2DEdgeDst( temp_edge ); + curr_org = cvSubdiv2DEdgeOrg( curr_edge ); + curr_dst = cvSubdiv2DEdgeDst( curr_edge ); + + if( icvIsRightOf( temp_dst->pt, curr_edge ) > 0 && + icvIsPtInCircle3( curr_org->pt, temp_dst->pt, + curr_dst->pt, curr_point->pt ) < 0 ) + { + cvSubdiv2DSwapEdges( curr_edge ); + curr_edge = cvSubdiv2DGetEdge( curr_edge, CV_PREV_AROUND_ORG ); + } + else if( curr_org == first_point ) + { + break; + } + else + { + curr_edge = cvSubdiv2DGetEdge( cvSubdiv2DNextEdge( curr_edge ), + CV_PREV_AROUND_LEFT ); + } + } + break; + default: + CV_Error_(CV_StsError, ("cvSubdiv2DLocate returned invalid location = %d", location) ); + } + + return curr_point; +} + + +CV_IMPL void +cvInitSubdivDelaunay2D( CvSubdiv2D * subdiv, CvRect rect ) +{ + float big_coord = 3.f * MAX( rect.width, rect.height ); + CvPoint2D32f ppA, ppB, ppC; + CvSubdiv2DPoint *pA, *pB, *pC; + CvSubdiv2DEdge edge_AB, edge_BC, edge_CA; + float rx = (float) rect.x; + float ry = (float) rect.y; + + if( !subdiv ) + CV_Error( CV_StsNullPtr, "" ); + + cvClearSet( (CvSet *) (subdiv->edges) ); + cvClearSet( (CvSet *) subdiv ); + + subdiv->quad_edges = 0; + subdiv->recent_edge = 0; + subdiv->is_geometry_valid = 0; + + subdiv->topleft = cvPoint2D32f( rx, ry ); + subdiv->bottomright = cvPoint2D32f( rx + rect.width, ry + rect.height ); + + ppA = cvPoint2D32f( rx + big_coord, ry ); + ppB = cvPoint2D32f( rx, ry + big_coord ); + ppC = cvPoint2D32f( rx - big_coord, ry - big_coord ); + + pA = cvSubdiv2DAddPoint( subdiv, ppA, 0 ); + pB = cvSubdiv2DAddPoint( subdiv, ppB, 0 ); + pC = cvSubdiv2DAddPoint( subdiv, ppC, 0 ); + + edge_AB = cvSubdiv2DMakeEdge( subdiv ); + edge_BC = cvSubdiv2DMakeEdge( subdiv ); + edge_CA = cvSubdiv2DMakeEdge( subdiv ); + + cvSubdiv2DSetEdgePoints( edge_AB, pA, pB ); + cvSubdiv2DSetEdgePoints( edge_BC, pB, pC ); + cvSubdiv2DSetEdgePoints( edge_CA, pC, pA ); + + cvSubdiv2DSplice( edge_AB, cvSubdiv2DSymEdge( edge_CA )); + cvSubdiv2DSplice( edge_BC, cvSubdiv2DSymEdge( edge_AB )); + cvSubdiv2DSplice( edge_CA, cvSubdiv2DSymEdge( edge_BC )); + + subdiv->recent_edge = edge_AB; +} + + +CV_IMPL void +cvClearSubdivVoronoi2D( CvSubdiv2D * subdiv ) +{ + int elem_size; + int i, total; + CvSeqReader reader; + + if( !subdiv ) + CV_Error( CV_StsNullPtr, "" ); + + /* clear pointers to voronoi points */ + total = subdiv->edges->total; + elem_size = subdiv->edges->elem_size; + + cvStartReadSeq( (CvSeq *) (subdiv->edges), &reader, 0 ); + + for( i = 0; i < total; i++ ) + { + CvQuadEdge2D *quadedge = (CvQuadEdge2D *) reader.ptr; + + quadedge->pt[1] = quadedge->pt[3] = 0; + CV_NEXT_SEQ_ELEM( elem_size, reader ); + } + + /* remove voronoi points */ + total = subdiv->total; + elem_size = subdiv->elem_size; + + cvStartReadSeq( (CvSeq *) subdiv, &reader, 0 ); + + for( i = 0; i < total; i++ ) + { + CvSubdiv2DPoint *pt = (CvSubdiv2DPoint *) reader.ptr; + + /* check for virtual point. it is also check that the point exists */ + if( pt->flags & CV_SUBDIV2D_VIRTUAL_POINT_FLAG ) + { + cvSetRemoveByPtr( (CvSet*)subdiv, pt ); + } + CV_NEXT_SEQ_ELEM( elem_size, reader ); + } + + subdiv->is_geometry_valid = 0; +} + + +CV_IMPL void +cvCalcSubdivVoronoi2D( CvSubdiv2D * subdiv ) +{ + CvSeqReader reader; + int i, total, elem_size; + + if( !subdiv ) + CV_Error( CV_StsNullPtr, "" ); + + /* check if it is already calculated */ + if( subdiv->is_geometry_valid ) + return; + + total = subdiv->edges->total; + elem_size = subdiv->edges->elem_size; + + cvClearSubdivVoronoi2D( subdiv ); + + cvStartReadSeq( (CvSeq *) (subdiv->edges), &reader, 0 ); + + if( total <= 3 ) + return; + + /* skip first three edges (bounding triangle) */ + for( i = 0; i < 3; i++ ) + CV_NEXT_SEQ_ELEM( elem_size, reader ); + + /* loop through all quad-edges */ + for( ; i < total; i++ ) + { + CvQuadEdge2D *quadedge = (CvQuadEdge2D *) (reader.ptr); + + if( CV_IS_SET_ELEM( quadedge )) + { + CvSubdiv2DEdge edge0 = (CvSubdiv2DEdge) quadedge, edge1, edge2; + double a0, b0, c0, a1, b1, c1; + CvPoint2D32f virt_point; + CvSubdiv2DPoint *voronoi_point; + + if( !quadedge->pt[3] ) + { + edge1 = cvSubdiv2DGetEdge( edge0, CV_NEXT_AROUND_LEFT ); + edge2 = cvSubdiv2DGetEdge( edge1, CV_NEXT_AROUND_LEFT ); + + icvCreateCenterNormalLine( edge0, &a0, &b0, &c0 ); + icvCreateCenterNormalLine( edge1, &a1, &b1, &c1 ); + + icvIntersectLines3( &a0, &b0, &c0, &a1, &b1, &c1, &virt_point ); + if( fabs( virt_point.x ) < FLT_MAX * 0.5 && + fabs( virt_point.y ) < FLT_MAX * 0.5 ) + { + voronoi_point = cvSubdiv2DAddPoint( subdiv, virt_point, 1 ); + + quadedge->pt[3] = + ((CvQuadEdge2D *) (edge1 & ~3))->pt[3 - (edge1 & 2)] = + ((CvQuadEdge2D *) (edge2 & ~3))->pt[3 - (edge2 & 2)] = voronoi_point; + } + } + + if( !quadedge->pt[1] ) + { + edge1 = cvSubdiv2DGetEdge( edge0, CV_NEXT_AROUND_RIGHT ); + edge2 = cvSubdiv2DGetEdge( edge1, CV_NEXT_AROUND_RIGHT ); + + icvCreateCenterNormalLine( edge0, &a0, &b0, &c0 ); + icvCreateCenterNormalLine( edge1, &a1, &b1, &c1 ); + + icvIntersectLines3( &a0, &b0, &c0, &a1, &b1, &c1, &virt_point ); + + if( fabs( virt_point.x ) < FLT_MAX * 0.5 && + fabs( virt_point.y ) < FLT_MAX * 0.5 ) + { + voronoi_point = cvSubdiv2DAddPoint( subdiv, virt_point, 1 ); + + quadedge->pt[1] = + ((CvQuadEdge2D *) (edge1 & ~3))->pt[1 + (edge1 & 2)] = + ((CvQuadEdge2D *) (edge2 & ~3))->pt[1 + (edge2 & 2)] = voronoi_point; + } + } + } + + CV_NEXT_SEQ_ELEM( elem_size, reader ); + } + + subdiv->is_geometry_valid = 1; +} + + +static int +icvIsRightOf2( const CvPoint2D32f& pt, const CvPoint2D32f& org, const CvPoint2D32f& diff ) +{ + double cw_area = ((double)org.x - pt.x)*diff.y - ((double)org.y - pt.y)*diff.x; + return (cw_area > 0) - (cw_area < 0); +} + + +CV_IMPL CvSubdiv2DPoint* +cvFindNearestPoint2D( CvSubdiv2D* subdiv, CvPoint2D32f pt ) +{ + CvSubdiv2DPoint* point = 0; + CvPoint2D32f start; + CvPoint2D32f diff; + CvSubdiv2DPointLocation loc; + CvSubdiv2DEdge edge; + int i; + + if( !subdiv ) + CV_Error( CV_StsNullPtr, "" ); + + if( !CV_IS_SUBDIV2D( subdiv )) + CV_Error( CV_StsNullPtr, "" ); + + if( subdiv->edges->active_count <= 3 ) + return 0; + + if( !subdiv->is_geometry_valid ) + cvCalcSubdivVoronoi2D( subdiv ); + + loc = cvSubdiv2DLocate( subdiv, pt, &edge, &point ); + + switch( loc ) + { + case CV_PTLOC_ON_EDGE: + case CV_PTLOC_INSIDE: + break; + default: + return point; + } + + point = 0; + + start = cvSubdiv2DEdgeOrg( edge )->pt; + diff.x = pt.x - start.x; + diff.y = pt.y - start.y; + + edge = cvSubdiv2DRotateEdge( edge, 1 ); + + for( i = 0; i < subdiv->total; i++ ) + { + CvPoint2D32f t; + + for(;;) + { + assert( cvSubdiv2DEdgeDst( edge )); + + t = cvSubdiv2DEdgeDst( edge )->pt; + if( icvIsRightOf2( t, start, diff ) >= 0 ) + break; + + edge = cvSubdiv2DGetEdge( edge, CV_NEXT_AROUND_LEFT ); + } + + for(;;) + { + assert( cvSubdiv2DEdgeOrg( edge )); + + t = cvSubdiv2DEdgeOrg( edge )->pt; + if( icvIsRightOf2( t, start, diff ) < 0 ) + break; + + edge = cvSubdiv2DGetEdge( edge, CV_PREV_AROUND_LEFT ); + } + + { + CvPoint2D32f tempDiff = cvSubdiv2DEdgeDst( edge )->pt; + t = cvSubdiv2DEdgeOrg( edge )->pt; + tempDiff.x -= t.x; + tempDiff.y -= t.y; + + if( icvIsRightOf2( pt, t, tempDiff ) >= 0 ) + { + point = cvSubdiv2DEdgeOrg( cvSubdiv2DRotateEdge( edge, 3 )); + break; + } + } + + edge = cvSubdiv2DSymEdge( edge ); + } + + return point; +} + + +namespace cv +{ + +int Subdiv2D::nextEdge(int edge) const +{ + CV_DbgAssert((size_t)(edge >> 2) < qedges.size()); + return qedges[edge >> 2].next[edge & 3]; +} + +int Subdiv2D::rotateEdge(int edge, int rotate) const +{ + return (edge & ~3) + ((edge + rotate) & 3); +} + +int Subdiv2D::symEdge(int edge) const +{ + return edge ^ 2; +} + +int Subdiv2D::getEdge(int edge, int nextEdgeType) const +{ + CV_DbgAssert((size_t)(edge >> 2) < qedges.size()); + edge = qedges[edge >> 2].next[(edge + nextEdgeType) & 3]; + return (edge & ~3) + ((edge + (nextEdgeType >> 4)) & 3); +} + +int Subdiv2D::edgeOrg(int edge, CV_OUT Point2f* orgpt) const +{ + CV_DbgAssert((size_t)(edge >> 2) < qedges.size()); + int vidx = qedges[edge >> 2].pt[edge & 3]; + if( orgpt ) + { + CV_DbgAssert((size_t)vidx < vtx.size()); + *orgpt = vtx[vidx].pt; + } + return vidx; +} + +int Subdiv2D::edgeDst(int edge, CV_OUT Point2f* dstpt) const +{ + CV_DbgAssert((size_t)(edge >> 2) < qedges.size()); + int vidx = qedges[edge >> 2].pt[(edge + 2) & 3]; + if( dstpt ) + { + CV_DbgAssert((size_t)vidx < vtx.size()); + *dstpt = vtx[vidx].pt; + } + return vidx; +} + + +Point2f Subdiv2D::getVertex(int vertex, CV_OUT int* firstEdge) const +{ + CV_DbgAssert((size_t)vertex < vtx.size()); + if( firstEdge ) + *firstEdge = vtx[vertex].firstEdge; + return vtx[vertex].pt; +} + + +Subdiv2D::Subdiv2D() +{ + validGeometry = false; + freeQEdge = 0; + freePoint = 0; + recentEdge = 0; +} + +Subdiv2D::Subdiv2D(Rect rect) +{ + validGeometry = false; + freeQEdge = 0; + freePoint = 0; + recentEdge = 0; + + initDelaunay(rect); +} + + +Subdiv2D::QuadEdge::QuadEdge() +{ + next[0] = next[1] = next[2] = next[3] = 0; + pt[0] = pt[1] = pt[2] = pt[3] = 0; +} + +Subdiv2D::QuadEdge::QuadEdge(int edgeidx) +{ + CV_DbgAssert((edgeidx & 3) == 0); + next[0] = edgeidx; + next[1] = edgeidx+3; + next[2] = edgeidx+2; + next[3] = edgeidx+1; + + pt[0] = pt[1] = pt[2] = pt[3] = 0; +} + +bool Subdiv2D::QuadEdge::isfree() const +{ + return next[0] <= 0; +} + +Subdiv2D::Vertex::Vertex() +{ + firstEdge = 0; + type = -1; +} + +Subdiv2D::Vertex::Vertex(Point2f _pt, bool _isvirtual, int _firstEdge) +{ + firstEdge = _firstEdge; + type = (int)_isvirtual; + pt = _pt; +} + +bool Subdiv2D::Vertex::isvirtual() const +{ + return type > 0; +} + +bool Subdiv2D::Vertex::isfree() const +{ + return type < 0; +} + +void Subdiv2D::splice( int edgeA, int edgeB ) +{ + int& a_next = qedges[edgeA >> 2].next[edgeA & 3]; + int& b_next = qedges[edgeB >> 2].next[edgeB & 3]; + int a_rot = rotateEdge(a_next, 1); + int b_rot = rotateEdge(b_next, 1); + int& a_rot_next = qedges[a_rot >> 2].next[a_rot & 3]; + int& b_rot_next = qedges[b_rot >> 2].next[b_rot & 3]; + std::swap(a_next, b_next); + std::swap(a_rot_next, b_rot_next); +} + +void Subdiv2D::setEdgePoints(int edge, int orgPt, int dstPt) +{ + qedges[edge >> 2].pt[edge & 3] = orgPt; + qedges[edge >> 2].pt[(edge + 2) & 3] = dstPt; + vtx[orgPt].firstEdge = edge; + vtx[dstPt].firstEdge = edge ^ 2; +} + +int Subdiv2D::connectEdges( int edgeA, int edgeB ) +{ + int edge = newEdge(); + + splice(edge, getEdge(edgeA, NEXT_AROUND_LEFT)); + splice(symEdge(edge), edgeB); + + setEdgePoints(edge, edgeDst(edgeA), edgeOrg(edgeB)); + return edge; +} + +void Subdiv2D::swapEdges( int edge ) +{ + int sedge = symEdge(edge); + int a = getEdge(edge, PREV_AROUND_ORG); + int b = getEdge(sedge, PREV_AROUND_ORG); + + splice(edge, a); + splice(sedge, b); + + setEdgePoints(edge, edgeDst(a), edgeDst(b)); + + splice(edge, getEdge(a, NEXT_AROUND_LEFT)); + splice(sedge, getEdge(b, NEXT_AROUND_LEFT)); +} + +int Subdiv2D::isRightOf(Point2f pt, int edge) const +{ + Point2f org, dst; + edgeOrg(edge, &org); + edgeDst(edge, &dst); + double cw_area = cvTriangleArea( pt, dst, org ); + + return (cw_area > 0) - (cw_area < 0); +} + + +int Subdiv2D::newEdge() +{ + if( freeQEdge <= 0 ) + { + qedges.push_back(QuadEdge()); + freeQEdge = (int)(qedges.size()-1); + } + int edge = freeQEdge*4; + freeQEdge = qedges[edge >> 2].next[1]; + qedges[edge >> 2] = QuadEdge(edge); + return edge; +} + +void Subdiv2D::deleteEdge(int edge) +{ + CV_DbgAssert((size_t)(edge >> 2) < (size_t)qedges.size()); + splice( edge, getEdge(edge, PREV_AROUND_ORG) ); + int sedge = symEdge(edge); + splice(sedge, getEdge(sedge, PREV_AROUND_ORG) ); + + edge >>= 2; + qedges[edge].next[0] = 0; + qedges[edge].next[1] = freeQEdge; + freeQEdge = edge; +} + +int Subdiv2D::newPoint(Point2f pt, bool isvirtual, int firstEdge) +{ + if( freePoint == 0 ) + { + vtx.push_back(Vertex()); + freePoint = (int)(vtx.size()-1); + } + int vidx = freePoint; + freePoint = vtx[vidx].firstEdge; + vtx[vidx] = Vertex(pt, isvirtual, firstEdge); + + return vidx; +} + +void Subdiv2D::deletePoint(int vidx) +{ + CV_DbgAssert( (size_t)vidx < vtx.size() ); + vtx[vidx].firstEdge = freePoint; + vtx[vidx].type = -1; + freePoint = vidx; +} + +int Subdiv2D::locate(Point2f pt, int& _edge, int& _vertex) +{ + int vertex = 0; + + int i, maxEdges = (int)(qedges.size() * 4); + + if( qedges.size() < (size_t)4 ) + CV_Error( CV_StsError, "Subdivision is empty" ); + + if( pt.x < topLeft.x || pt.y < topLeft.y || pt.x >= bottomRight.x || pt.y >= bottomRight.y ) + CV_Error( CV_StsOutOfRange, "" ); + + int edge = recentEdge; + CV_Assert(edge > 0); + + int location = PTLOC_ERROR; + + int right_of_curr = isRightOf(pt, edge); + if( right_of_curr > 0 ) + { + edge = symEdge(edge); + right_of_curr = -right_of_curr; + } + + for( i = 0; i < maxEdges; i++ ) + { + int onext_edge = nextEdge( edge ); + int dprev_edge = getEdge( edge, PREV_AROUND_DST ); + + int right_of_onext = isRightOf( pt, onext_edge ); + int right_of_dprev = isRightOf( pt, dprev_edge ); + + if( right_of_dprev > 0 ) + { + if( right_of_onext > 0 || (right_of_onext == 0 && right_of_curr == 0) ) + { + location = PTLOC_INSIDE; + break; + } + else + { + right_of_curr = right_of_onext; + edge = onext_edge; + } + } + else + { + if( right_of_onext > 0 ) + { + if( right_of_dprev == 0 && right_of_curr == 0 ) + { + location = PTLOC_INSIDE; + break; + } + else + { + right_of_curr = right_of_dprev; + edge = dprev_edge; + } + } + else if( right_of_curr == 0 && + isRightOf( vtx[edgeDst(onext_edge)].pt, edge ) >= 0 ) + { + edge = symEdge( edge ); + } + else + { + right_of_curr = right_of_onext; + edge = onext_edge; + } + } + } + + recentEdge = edge; + + if( location == PTLOC_INSIDE ) + { + Point2f org_pt, dst_pt; + edgeOrg(edge, &org_pt); + edgeDst(edge, &dst_pt); + + double t1 = fabs( pt.x - org_pt.x ); + t1 += fabs( pt.y - org_pt.y ); + double t2 = fabs( pt.x - dst_pt.x ); + t2 += fabs( pt.y - dst_pt.y ); + double t3 = fabs( org_pt.x - dst_pt.x ); + t3 += fabs( org_pt.y - dst_pt.y ); + + if( t1 < FLT_EPSILON ) + { + location = PTLOC_VERTEX; + vertex = edgeOrg( edge ); + edge = 0; + } + else if( t2 < FLT_EPSILON ) + { + location = PTLOC_VERTEX; + vertex = edgeDst( edge ); + edge = 0; + } + else if( (t1 < t3 || t2 < t3) && + fabs( cvTriangleArea( pt, org_pt, dst_pt )) < FLT_EPSILON ) + { + location = PTLOC_ON_EDGE; + vertex = 0; + } + } + + if( location == PTLOC_ERROR ) + { + edge = 0; + vertex = 0; + } + + _edge = edge; + _vertex = vertex; + + return location; +} + + +inline int +isPtInCircle3( Point2f pt, Point2f a, Point2f b, Point2f c) +{ + const double eps = FLT_EPSILON*0.125; + double val = ((double)a.x * a.x + (double)a.y * a.y) * cvTriangleArea( b, c, pt ); + val -= ((double)b.x * b.x + (double)b.y * b.y) * cvTriangleArea( a, c, pt ); + val += ((double)c.x * c.x + (double)c.y * c.y) * cvTriangleArea( a, b, pt ); + val -= ((double)pt.x * pt.x + (double)pt.y * pt.y) * cvTriangleArea( a, b, c ); + + return val > eps ? 1 : val < -eps ? -1 : 0; +} + + +int Subdiv2D::insert(Point2f pt) +{ + int curr_point = 0, curr_edge = 0, deleted_edge = 0; + int location = locate( pt, curr_edge, curr_point ); + + if( location == PTLOC_ERROR ) + CV_Error( CV_StsBadSize, "" ); + + if( location == PTLOC_OUTSIDE_RECT ) + CV_Error( CV_StsOutOfRange, "" ); + + if( location == PTLOC_VERTEX ) + return curr_point; + + if( location == PTLOC_ON_EDGE ) + { + deleted_edge = curr_edge; + recentEdge = curr_edge = getEdge( curr_edge, PREV_AROUND_ORG ); + deleteEdge(deleted_edge); + } + else if( location == PTLOC_INSIDE ) + ; + else + CV_Error_(CV_StsError, ("Subdiv2D::locate returned invalid location = %d", location) ); + + assert( curr_edge != 0 ); + validGeometry = false; + + curr_point = newPoint(pt, false); + int base_edge = newEdge(); + int first_point = edgeOrg(curr_edge); + setEdgePoints(base_edge, first_point, curr_point); + splice(base_edge, curr_edge); + + do + { + base_edge = connectEdges( curr_edge, symEdge(base_edge) ); + curr_edge = getEdge(base_edge, PREV_AROUND_ORG); + } + while( edgeDst(curr_edge) != first_point ); + + curr_edge = getEdge( base_edge, PREV_AROUND_ORG ); + + int i, max_edges = (int)(qedges.size()*4); + + for( i = 0; i < max_edges; i++ ) + { + int temp_dst = 0, curr_org = 0, curr_dst = 0; + int temp_edge = getEdge( curr_edge, PREV_AROUND_ORG ); + + temp_dst = edgeDst( temp_edge ); + curr_org = edgeOrg( curr_edge ); + curr_dst = edgeDst( curr_edge ); + + if( isRightOf( vtx[temp_dst].pt, curr_edge ) > 0 && + isPtInCircle3( vtx[curr_org].pt, vtx[temp_dst].pt, + vtx[curr_dst].pt, vtx[curr_point].pt ) < 0 ) + { + swapEdges( curr_edge ); + curr_edge = getEdge( curr_edge, PREV_AROUND_ORG ); + } + else if( curr_org == first_point ) + break; + else + curr_edge = getEdge( nextEdge( curr_edge ), PREV_AROUND_LEFT ); + } + + return curr_point; +} + +void Subdiv2D::insert(const vector& ptvec) +{ + for( size_t i = 0; i < ptvec.size(); i++ ) + insert(ptvec[i]); +} + +void Subdiv2D::initDelaunay( Rect rect ) +{ + float big_coord = 3.f * MAX( rect.width, rect.height ); + float rx = (float)rect.x; + float ry = (float)rect.y; + + vtx.clear(); + qedges.clear(); + + recentEdge = 0; + validGeometry = false; + + topLeft = Point2f( rx, ry ); + bottomRight = Point2f( rx + rect.width, ry + rect.height ); + + Point2f ppA( rx + big_coord, ry ); + Point2f ppB( rx, ry + big_coord ); + Point2f ppC( rx - big_coord, ry - big_coord ); + + vtx.push_back(Vertex()); + qedges.push_back(QuadEdge()); + + freeQEdge = 0; + freePoint = 0; + + int pA = newPoint(ppA, false); + int pB = newPoint(ppB, false); + int pC = newPoint(ppC, false); + + int edge_AB = newEdge(); + int edge_BC = newEdge(); + int edge_CA = newEdge(); + + setEdgePoints( edge_AB, pA, pB ); + setEdgePoints( edge_BC, pB, pC ); + setEdgePoints( edge_CA, pC, pA ); + + splice( edge_AB, symEdge( edge_CA )); + splice( edge_BC, symEdge( edge_AB )); + splice( edge_CA, symEdge( edge_BC )); + + recentEdge = edge_AB; +} + + +void Subdiv2D::clearVoronoi() +{ + size_t i, total = qedges.size(); + + for( i = 0; i < total; i++ ) + qedges[i].pt[1] = qedges[i].pt[3] = 0; + + total = vtx.size(); + for( i = 0; i < total; i++ ) + { + if( vtx[i].isvirtual() ) + deletePoint((int)i); + } + + validGeometry = false; +} + + +static Point2f computeVoronoiPoint(Point2f org0, Point2f dst0, Point2f org1, Point2f dst1) +{ + double a0 = dst0.x - org0.x; + double b0 = dst0.y - org0.y; + double c0 = -0.5*(a0 * (dst0.x + org0.x) + b0 * (dst0.y + org0.y)); + + double a1 = dst1.x - org1.x; + double b1 = dst1.y - org1.y; + double c1 = -0.5*(a1 * (dst1.x + org1.x) + b1 * (dst1.y + org1.y)); + + double det = a0 * b1 - a1 * b0; + + if( det != 0 ) + { + det = 1. / det; + return Point2f((float) ((b0 * c1 - b1 * c0) * det), + (float) ((a1 * c0 - a0 * c1) * det)); + } + + return Point2f(FLT_MAX, FLT_MAX); +} + + +void Subdiv2D::calcVoronoi() +{ + // check if it is already calculated + if( validGeometry ) + return; + + clearVoronoi(); + int i, total = (int)qedges.size(); + + // loop through all quad-edges, except for the first 3 (#1, #2, #3 - 0 is reserved for "NULL" pointer) + for( i = 4; i < total; i++ ) + { + QuadEdge& quadedge = qedges[i]; + + if( quadedge.isfree() ) + continue; + + int edge0 = (int)(i*4); + Point2f org0, dst0, org1, dst1; + + if( !quadedge.pt[3] ) + { + int edge1 = getEdge( edge0, NEXT_AROUND_LEFT ); + int edge2 = getEdge( edge1, NEXT_AROUND_LEFT ); + + edgeOrg(edge0, &org0); + edgeDst(edge0, &dst0); + edgeOrg(edge1, &org1); + edgeDst(edge1, &dst1); + + Point2f virt_point = computeVoronoiPoint(org0, dst0, org1, dst1); + + if( fabs( virt_point.x ) < FLT_MAX * 0.5 && + fabs( virt_point.y ) < FLT_MAX * 0.5 ) + { + quadedge.pt[3] = qedges[edge1 >> 2].pt[3 - (edge1 & 2)] = + qedges[edge2 >> 2].pt[3 - (edge2 & 2)] = newPoint(virt_point, true); + } + } + + if( !quadedge.pt[1] ) + { + int edge1 = getEdge( edge0, NEXT_AROUND_RIGHT ); + int edge2 = getEdge( edge1, NEXT_AROUND_RIGHT ); + + edgeOrg(edge0, &org0); + edgeDst(edge0, &dst0); + edgeOrg(edge1, &org1); + edgeDst(edge1, &dst1); + + Point2f virt_point = computeVoronoiPoint(org0, dst0, org1, dst1); + + if( fabs( virt_point.x ) < FLT_MAX * 0.5 && + fabs( virt_point.y ) < FLT_MAX * 0.5 ) + { + quadedge.pt[1] = qedges[edge1 >> 2].pt[1 + (edge1 & 2)] = + qedges[edge2 >> 2].pt[1 + (edge2 & 2)] = newPoint(virt_point, true); + } + } + } + + validGeometry = true; +} + + +static int +isRightOf2( const Point2f& pt, const Point2f& org, const Point2f& diff ) +{ + double cw_area = ((double)org.x - pt.x)*diff.y - ((double)org.y - pt.y)*diff.x; + return (cw_area > 0) - (cw_area < 0); +} + + +int Subdiv2D::findNearest(Point2f pt, Point2f* nearestPt) +{ + if( !validGeometry ) + calcVoronoi(); + + int vertex = 0, edge = 0; + int loc = locate( pt, edge, vertex ); + + if( loc != PTLOC_ON_EDGE && loc != PTLOC_INSIDE ) + return vertex; + + vertex = 0; + + Point2f start; + edgeOrg(edge, &start); + Point2f diff = pt - start; + + edge = rotateEdge(edge, 1); + + int i, total = (int)vtx.size(); + + for( i = 0; i < total; i++ ) + { + Point2f t; + + for(;;) + { + CV_Assert( edgeDst(edge, &t) > 0 ); + if( isRightOf2( t, start, diff ) >= 0 ) + break; + + edge = getEdge( edge, NEXT_AROUND_LEFT ); + } + + for(;;) + { + CV_Assert( edgeOrg( edge, &t ) > 0 ); + + if( isRightOf2( t, start, diff ) < 0 ) + break; + + edge = getEdge( edge, PREV_AROUND_LEFT ); + } + + Point2f tempDiff; + edgeDst(edge, &tempDiff); + edgeOrg(edge, &t); + tempDiff -= t; + + if( isRightOf2( pt, t, tempDiff ) >= 0 ) + { + vertex = edgeOrg(rotateEdge( edge, 3 )); + break; + } + + edge = symEdge( edge ); + } + + if( nearestPt && vertex > 0 ) + *nearestPt = vtx[vertex].pt; + + return vertex; +} + +void Subdiv2D::getEdgeList(vector& edgeList) const +{ + edgeList.clear(); + + for( size_t i = 4; i < qedges.size(); i++ ) + { + if( qedges[i].isfree() ) + continue; + if( qedges[i].pt[0] > 0 && qedges[i].pt[2] > 0 ) + { + Point2f org = vtx[qedges[i].pt[0]].pt; + Point2f dst = vtx[qedges[i].pt[2]].pt; + edgeList.push_back(Vec4f(org.x, org.y, dst.x, dst.y)); + } + } +} + +void Subdiv2D::getTriangleList(vector& triangleList) const +{ + triangleList.clear(); + int i, total = (int)(qedges.size()*4); + vector edgemask(total, false); + + for( i = 4; i < total; i += 2 ) + { + if( edgemask[i] ) + continue; + Point2f a, b, c; + int edge = i; + edgeOrg(edge, &a); + edgemask[edge] = true; + edge = getEdge(edge, NEXT_AROUND_LEFT); + edgeOrg(edge, &b); + edgemask[edge] = true; + edge = getEdge(edge, NEXT_AROUND_LEFT); + edgeOrg(edge, &c); + edgemask[edge] = true; + triangleList.push_back(Vec6f(a.x, a.y, b.x, b.y, c.x, c.y)); + } +} + +void Subdiv2D::getVoronoiFacetList(const vector& idx, + CV_OUT vector >& facetList, + CV_OUT vector& facetCenters) +{ + calcVoronoi(); + facetList.clear(); + facetCenters.clear(); + + vector buf; + + size_t i, total; + if( idx.empty() ) + i = 4, total = vtx.size(); + else + i = 0, total = idx.size(); + + for( ; i < total; i++ ) + { + int k = idx.empty() ? (int)i : idx[i]; + + if( vtx[k].isfree() || vtx[k].isvirtual() ) + continue; + int edge = rotateEdge(vtx[k].firstEdge, 1), t = edge; + + // gather points + buf.clear(); + do + { + buf.push_back(vtx[edgeOrg(t)].pt); + t = getEdge( t, NEXT_AROUND_LEFT ); + } + while( t != edge ); + + facetList.push_back(buf); + facetCenters.push_back(vtx[k].pt); + } +} + + +void Subdiv2D::check() const +{ + int i, j, total = (int)qedges.size(); + + for( i = 0; i < total; i++ ) + { + const QuadEdge& qe = qedges[i]; + + if( qe.isfree() ) + continue; + + for( j = 0; j < 4; j++ ) + { + int e = (int)(i*4 + j); + int o_next = nextEdge(e); + int o_prev = getEdge(e, PREV_AROUND_ORG ); + int d_prev = getEdge(e, PREV_AROUND_DST ); + int d_next = getEdge(e, NEXT_AROUND_DST ); + + // check points + CV_Assert( edgeOrg(e) == edgeOrg(o_next)); + CV_Assert( edgeOrg(e) == edgeOrg(o_prev)); + CV_Assert( edgeDst(e) == edgeDst(d_next)); + CV_Assert( edgeDst(e) == edgeDst(d_prev)); + + if( j % 2 == 0 ) + { + CV_Assert( edgeDst(o_next) == edgeOrg(d_prev)); + CV_Assert( edgeDst(o_prev) == edgeOrg(d_next)); + CV_Assert( getEdge(getEdge(getEdge(e,NEXT_AROUND_LEFT),NEXT_AROUND_LEFT),NEXT_AROUND_LEFT) == e ); + CV_Assert( getEdge(getEdge(getEdge(e,NEXT_AROUND_RIGHT),NEXT_AROUND_RIGHT),NEXT_AROUND_RIGHT) == e); + } + } + } +} + +} + +/* End of file. */ diff --git a/opencv/imgproc/sumpixels.cpp b/opencv/imgproc/sumpixels.cpp new file mode 100644 index 0000000..eb9a70f --- /dev/null +++ b/opencv/imgproc/sumpixels.cpp @@ -0,0 +1,309 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +namespace cv +{ + +template +void integral_( const T* src, size_t _srcstep, ST* sum, size_t _sumstep, + QT* sqsum, size_t _sqsumstep, ST* tilted, size_t _tiltedstep, + Size size, int cn ) +{ + int x, y, k; + + int srcstep = (int)(_srcstep/sizeof(T)); + int sumstep = (int)(_sumstep/sizeof(ST)); + int tiltedstep = (int)(_tiltedstep/sizeof(ST)); + int sqsumstep = (int)(_sqsumstep/sizeof(QT)); + + size.width *= cn; + + memset( sum, 0, (size.width+cn)*sizeof(sum[0])); + sum += sumstep + cn; + + if( sqsum ) + { + memset( sqsum, 0, (size.width+cn)*sizeof(sqsum[0])); + sqsum += sqsumstep + cn; + } + + if( tilted ) + { + memset( tilted, 0, (size.width+cn)*sizeof(tilted[0])); + tilted += tiltedstep + cn; + } + + if( sqsum == 0 && tilted == 0 ) + { + for( y = 0; y < size.height; y++, src += srcstep - cn, sum += sumstep - cn ) + { + for( k = 0; k < cn; k++, src++, sum++ ) + { + ST s = sum[-cn] = 0; + for( x = 0; x < size.width; x += cn ) + { + s += src[x]; + sum[x] = sum[x - sumstep] + s; + } + } + } + } + else if( tilted == 0 ) + { + for( y = 0; y < size.height; y++, src += srcstep - cn, + sum += sumstep - cn, sqsum += sqsumstep - cn ) + { + for( k = 0; k < cn; k++, src++, sum++, sqsum++ ) + { + ST s = sum[-cn] = 0; + QT sq = sqsum[-cn] = 0; + for( x = 0; x < size.width; x += cn ) + { + T it = src[x]; + s += it; + sq += (QT)it*it; + ST t = sum[x - sumstep] + s; + QT tq = sqsum[x - sqsumstep] + sq; + sum[x] = t; + sqsum[x] = tq; + } + } + } + } + else + { + AutoBuffer _buf(size.width+cn); + ST* buf = _buf; + ST s; + QT sq; + for( k = 0; k < cn; k++, src++, sum++, tilted++, buf++ ) + { + sum[-cn] = tilted[-cn] = 0; + + for( x = 0, s = 0, sq = 0; x < size.width; x += cn ) + { + T it = src[x]; + buf[x] = tilted[x] = it; + s += it; + sq += (QT)it*it; + sum[x] = s; + if( sqsum ) + sqsum[x] = sq; + } + + if( size.width == cn ) + buf[cn] = 0; + + if( sqsum ) + { + sqsum[-cn] = 0; + sqsum++; + } + } + + for( y = 1; y < size.height; y++ ) + { + src += srcstep - cn; + sum += sumstep - cn; + tilted += tiltedstep - cn; + buf += -cn; + + if( sqsum ) + sqsum += sqsumstep - cn; + + for( k = 0; k < cn; k++, src++, sum++, tilted++, buf++ ) + { + T it = src[0]; + ST t0 = s = it; + QT tq0 = sq = (QT)it*it; + + sum[-cn] = 0; + if( sqsum ) + sqsum[-cn] = 0; + tilted[-cn] = tilted[-tiltedstep]; + + sum[0] = sum[-sumstep] + t0; + if( sqsum ) + sqsum[0] = sqsum[-sqsumstep] + tq0; + tilted[0] = tilted[-tiltedstep] + t0 + buf[cn]; + + for( x = cn; x < size.width - cn; x += cn ) + { + ST t1 = buf[x]; + buf[x - cn] = t1 + t0; + t0 = it = src[x]; + tq0 = (QT)it*it; + s += t0; + sq += tq0; + sum[x] = sum[x - sumstep] + s; + if( sqsum ) + sqsum[x] = sqsum[x - sqsumstep] + sq; + t1 += buf[x + cn] + t0 + tilted[x - tiltedstep - cn]; + tilted[x] = t1; + } + + if( size.width > cn ) + { + ST t1 = buf[x]; + buf[x - cn] = t1 + t0; + t0 = it = src[x]; + tq0 = (QT)it*it; + s += t0; + sq += tq0; + sum[x] = sum[x - sumstep] + s; + if( sqsum ) + sqsum[x] = sqsum[x - sqsumstep] + sq; + tilted[x] = t0 + t1 + tilted[x - tiltedstep - cn]; + buf[x] = t0; + } + + if( sqsum ) + sqsum++; + } + } + } +} + + +#define DEF_INTEGRAL_FUNC(suffix, T, ST, QT) \ +void integral_##suffix( T* src, size_t srcstep, ST* sum, size_t sumstep, QT* sqsum, size_t sqsumstep, \ + ST* tilted, size_t tiltedstep, Size size, int cn ) \ +{ integral_(src, srcstep, sum, sumstep, sqsum, sqsumstep, tilted, tiltedstep, size, cn); } + +DEF_INTEGRAL_FUNC(8u32s, uchar, int, double) +DEF_INTEGRAL_FUNC(8u32f, uchar, float, double) +DEF_INTEGRAL_FUNC(8u64f, uchar, double, double) +DEF_INTEGRAL_FUNC(32f, float, float, double) +DEF_INTEGRAL_FUNC(32f64f, float, double, double) +DEF_INTEGRAL_FUNC(64f, double, double, double) + +typedef void (*IntegralFunc)(const uchar* src, size_t srcstep, uchar* sum, size_t sumstep, + uchar* sqsum, size_t sqsumstep, uchar* tilted, size_t tstep, + Size size, int cn ); + +} + + +void cv::integral( InputArray _src, OutputArray _sum, OutputArray _sqsum, OutputArray _tilted, int sdepth ) +{ + Mat src = _src.getMat(), sum, sqsum, tilted; + int depth = src.depth(), cn = src.channels(); + Size isize(src.cols + 1, src.rows+1); + + if( sdepth <= 0 ) + sdepth = depth == CV_8U ? CV_32S : CV_64F; + sdepth = CV_MAT_DEPTH(sdepth); + _sum.create( isize, CV_MAKETYPE(sdepth, cn) ); + sum = _sum.getMat(); + + if( _tilted.needed() ) + { + _tilted.create( isize, CV_MAKETYPE(sdepth, cn) ); + tilted = _tilted.getMat(); + } + + if( _sqsum.needed() ) + { + _sqsum.create( isize, CV_MAKETYPE(CV_64F, cn) ); + sqsum = _sqsum.getMat(); + } + + IntegralFunc func = 0; + + if( depth == CV_8U && sdepth == CV_32S ) + func = (IntegralFunc)integral_8u32s; + else if( depth == CV_8U && sdepth == CV_32F ) + func = (IntegralFunc)integral_8u32f; + else if( depth == CV_8U && sdepth == CV_64F ) + func = (IntegralFunc)integral_8u64f; + else if( depth == CV_32F && sdepth == CV_32F ) + func = (IntegralFunc)integral_32f; + else if( depth == CV_32F && sdepth == CV_64F ) + func = (IntegralFunc)integral_32f64f; + else if( depth == CV_64F && sdepth == CV_64F ) + func = (IntegralFunc)integral_64f; + else + CV_Error( CV_StsUnsupportedFormat, "" ); + + func( src.data, src.step, sum.data, sum.step, sqsum.data, sqsum.step, + tilted.data, tilted.step, src.size(), cn ); +} + +void cv::integral( InputArray src, OutputArray sum, int sdepth ) +{ + integral( src, sum, noArray(), noArray(), sdepth ); +} + +void cv::integral( InputArray src, OutputArray sum, OutputArray sqsum, int sdepth ) +{ + integral( src, sum, sqsum, noArray(), sdepth ); +} + + +CV_IMPL void +cvIntegral( const CvArr* image, CvArr* sumImage, + CvArr* sumSqImage, CvArr* tiltedSumImage ) +{ + cv::Mat src = cv::cvarrToMat(image), sum = cv::cvarrToMat(sumImage), sum0 = sum; + cv::Mat sqsum0, sqsum, tilted0, tilted; + cv::Mat *psqsum = 0, *ptilted = 0; + + if( sumSqImage ) + { + sqsum0 = sqsum = cv::cvarrToMat(sumSqImage); + psqsum = &sqsum; + } + + if( tiltedSumImage ) + { + tilted0 = tilted = cv::cvarrToMat(tiltedSumImage); + ptilted = &tilted; + } + cv::integral( src, sum, psqsum ? cv::_OutputArray(*psqsum) : cv::_OutputArray(), + ptilted ? cv::_OutputArray(*ptilted) : cv::_OutputArray(), sum.depth() ); + + CV_Assert( sum.data == sum0.data && sqsum.data == sqsum0.data && tilted.data == tilted0.data ); +} + +/* End of file. */ diff --git a/opencv/imgproc/tables.cpp b/opencv/imgproc/tables.cpp new file mode 100644 index 0000000..20acf88 --- /dev/null +++ b/opencv/imgproc/tables.cpp @@ -0,0 +1,214 @@ +/* //////////////////////////////////////////////////////////////////// +// +// CvMat helper tables +// +// */ + +#include "precomp.hpp" + +const float icv8x32fTab_cv[] = +{ + -256.f, -255.f, -254.f, -253.f, -252.f, -251.f, -250.f, -249.f, + -248.f, -247.f, -246.f, -245.f, -244.f, -243.f, -242.f, -241.f, + -240.f, -239.f, -238.f, -237.f, -236.f, -235.f, -234.f, -233.f, + -232.f, -231.f, -230.f, -229.f, -228.f, -227.f, -226.f, -225.f, + -224.f, -223.f, -222.f, -221.f, -220.f, -219.f, -218.f, -217.f, + -216.f, -215.f, -214.f, -213.f, -212.f, -211.f, -210.f, -209.f, + -208.f, -207.f, -206.f, -205.f, -204.f, -203.f, -202.f, -201.f, + -200.f, -199.f, -198.f, -197.f, -196.f, -195.f, -194.f, -193.f, + -192.f, -191.f, -190.f, -189.f, -188.f, -187.f, -186.f, -185.f, + -184.f, -183.f, -182.f, -181.f, -180.f, -179.f, -178.f, -177.f, + -176.f, -175.f, -174.f, -173.f, -172.f, -171.f, -170.f, -169.f, + -168.f, -167.f, -166.f, -165.f, -164.f, -163.f, -162.f, -161.f, + -160.f, -159.f, -158.f, -157.f, -156.f, -155.f, -154.f, -153.f, + -152.f, -151.f, -150.f, -149.f, -148.f, -147.f, -146.f, -145.f, + -144.f, -143.f, -142.f, -141.f, -140.f, -139.f, -138.f, -137.f, + -136.f, -135.f, -134.f, -133.f, -132.f, -131.f, -130.f, -129.f, + -128.f, -127.f, -126.f, -125.f, -124.f, -123.f, -122.f, -121.f, + -120.f, -119.f, -118.f, -117.f, -116.f, -115.f, -114.f, -113.f, + -112.f, -111.f, -110.f, -109.f, -108.f, -107.f, -106.f, -105.f, + -104.f, -103.f, -102.f, -101.f, -100.f, -99.f, -98.f, -97.f, + -96.f, -95.f, -94.f, -93.f, -92.f, -91.f, -90.f, -89.f, + -88.f, -87.f, -86.f, -85.f, -84.f, -83.f, -82.f, -81.f, + -80.f, -79.f, -78.f, -77.f, -76.f, -75.f, -74.f, -73.f, + -72.f, -71.f, -70.f, -69.f, -68.f, -67.f, -66.f, -65.f, + -64.f, -63.f, -62.f, -61.f, -60.f, -59.f, -58.f, -57.f, + -56.f, -55.f, -54.f, -53.f, -52.f, -51.f, -50.f, -49.f, + -48.f, -47.f, -46.f, -45.f, -44.f, -43.f, -42.f, -41.f, + -40.f, -39.f, -38.f, -37.f, -36.f, -35.f, -34.f, -33.f, + -32.f, -31.f, -30.f, -29.f, -28.f, -27.f, -26.f, -25.f, + -24.f, -23.f, -22.f, -21.f, -20.f, -19.f, -18.f, -17.f, + -16.f, -15.f, -14.f, -13.f, -12.f, -11.f, -10.f, -9.f, + -8.f, -7.f, -6.f, -5.f, -4.f, -3.f, -2.f, -1.f, + 0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, + 8.f, 9.f, 10.f, 11.f, 12.f, 13.f, 14.f, 15.f, + 16.f, 17.f, 18.f, 19.f, 20.f, 21.f, 22.f, 23.f, + 24.f, 25.f, 26.f, 27.f, 28.f, 29.f, 30.f, 31.f, + 32.f, 33.f, 34.f, 35.f, 36.f, 37.f, 38.f, 39.f, + 40.f, 41.f, 42.f, 43.f, 44.f, 45.f, 46.f, 47.f, + 48.f, 49.f, 50.f, 51.f, 52.f, 53.f, 54.f, 55.f, + 56.f, 57.f, 58.f, 59.f, 60.f, 61.f, 62.f, 63.f, + 64.f, 65.f, 66.f, 67.f, 68.f, 69.f, 70.f, 71.f, + 72.f, 73.f, 74.f, 75.f, 76.f, 77.f, 78.f, 79.f, + 80.f, 81.f, 82.f, 83.f, 84.f, 85.f, 86.f, 87.f, + 88.f, 89.f, 90.f, 91.f, 92.f, 93.f, 94.f, 95.f, + 96.f, 97.f, 98.f, 99.f, 100.f, 101.f, 102.f, 103.f, + 104.f, 105.f, 106.f, 107.f, 108.f, 109.f, 110.f, 111.f, + 112.f, 113.f, 114.f, 115.f, 116.f, 117.f, 118.f, 119.f, + 120.f, 121.f, 122.f, 123.f, 124.f, 125.f, 126.f, 127.f, + 128.f, 129.f, 130.f, 131.f, 132.f, 133.f, 134.f, 135.f, + 136.f, 137.f, 138.f, 139.f, 140.f, 141.f, 142.f, 143.f, + 144.f, 145.f, 146.f, 147.f, 148.f, 149.f, 150.f, 151.f, + 152.f, 153.f, 154.f, 155.f, 156.f, 157.f, 158.f, 159.f, + 160.f, 161.f, 162.f, 163.f, 164.f, 165.f, 166.f, 167.f, + 168.f, 169.f, 170.f, 171.f, 172.f, 173.f, 174.f, 175.f, + 176.f, 177.f, 178.f, 179.f, 180.f, 181.f, 182.f, 183.f, + 184.f, 185.f, 186.f, 187.f, 188.f, 189.f, 190.f, 191.f, + 192.f, 193.f, 194.f, 195.f, 196.f, 197.f, 198.f, 199.f, + 200.f, 201.f, 202.f, 203.f, 204.f, 205.f, 206.f, 207.f, + 208.f, 209.f, 210.f, 211.f, 212.f, 213.f, 214.f, 215.f, + 216.f, 217.f, 218.f, 219.f, 220.f, 221.f, 222.f, 223.f, + 224.f, 225.f, 226.f, 227.f, 228.f, 229.f, 230.f, 231.f, + 232.f, 233.f, 234.f, 235.f, 236.f, 237.f, 238.f, 239.f, + 240.f, 241.f, 242.f, 243.f, 244.f, 245.f, 246.f, 247.f, + 248.f, 249.f, 250.f, 251.f, 252.f, 253.f, 254.f, 255.f, + 256.f, 257.f, 258.f, 259.f, 260.f, 261.f, 262.f, 263.f, + 264.f, 265.f, 266.f, 267.f, 268.f, 269.f, 270.f, 271.f, + 272.f, 273.f, 274.f, 275.f, 276.f, 277.f, 278.f, 279.f, + 280.f, 281.f, 282.f, 283.f, 284.f, 285.f, 286.f, 287.f, + 288.f, 289.f, 290.f, 291.f, 292.f, 293.f, 294.f, 295.f, + 296.f, 297.f, 298.f, 299.f, 300.f, 301.f, 302.f, 303.f, + 304.f, 305.f, 306.f, 307.f, 308.f, 309.f, 310.f, 311.f, + 312.f, 313.f, 314.f, 315.f, 316.f, 317.f, 318.f, 319.f, + 320.f, 321.f, 322.f, 323.f, 324.f, 325.f, 326.f, 327.f, + 328.f, 329.f, 330.f, 331.f, 332.f, 333.f, 334.f, 335.f, + 336.f, 337.f, 338.f, 339.f, 340.f, 341.f, 342.f, 343.f, + 344.f, 345.f, 346.f, 347.f, 348.f, 349.f, 350.f, 351.f, + 352.f, 353.f, 354.f, 355.f, 356.f, 357.f, 358.f, 359.f, + 360.f, 361.f, 362.f, 363.f, 364.f, 365.f, 366.f, 367.f, + 368.f, 369.f, 370.f, 371.f, 372.f, 373.f, 374.f, 375.f, + 376.f, 377.f, 378.f, 379.f, 380.f, 381.f, 382.f, 383.f, + 384.f, 385.f, 386.f, 387.f, 388.f, 389.f, 390.f, 391.f, + 392.f, 393.f, 394.f, 395.f, 396.f, 397.f, 398.f, 399.f, + 400.f, 401.f, 402.f, 403.f, 404.f, 405.f, 406.f, 407.f, + 408.f, 409.f, 410.f, 411.f, 412.f, 413.f, 414.f, 415.f, + 416.f, 417.f, 418.f, 419.f, 420.f, 421.f, 422.f, 423.f, + 424.f, 425.f, 426.f, 427.f, 428.f, 429.f, 430.f, 431.f, + 432.f, 433.f, 434.f, 435.f, 436.f, 437.f, 438.f, 439.f, + 440.f, 441.f, 442.f, 443.f, 444.f, 445.f, 446.f, 447.f, + 448.f, 449.f, 450.f, 451.f, 452.f, 453.f, 454.f, 455.f, + 456.f, 457.f, 458.f, 459.f, 460.f, 461.f, 462.f, 463.f, + 464.f, 465.f, 466.f, 467.f, 468.f, 469.f, 470.f, 471.f, + 472.f, 473.f, 474.f, 475.f, 476.f, 477.f, 478.f, 479.f, + 480.f, 481.f, 482.f, 483.f, 484.f, 485.f, 486.f, 487.f, + 488.f, 489.f, 490.f, 491.f, 492.f, 493.f, 494.f, 495.f, + 496.f, 497.f, 498.f, 499.f, 500.f, 501.f, 502.f, 503.f, + 504.f, 505.f, 506.f, 507.f, 508.f, 509.f, 510.f, 511.f, +}; + +const float icv8x32fSqrTab[] = +{ + 16384.f, 16129.f, 15876.f, 15625.f, 15376.f, 15129.f, 14884.f, 14641.f, + 14400.f, 14161.f, 13924.f, 13689.f, 13456.f, 13225.f, 12996.f, 12769.f, + 12544.f, 12321.f, 12100.f, 11881.f, 11664.f, 11449.f, 11236.f, 11025.f, + 10816.f, 10609.f, 10404.f, 10201.f, 10000.f, 9801.f, 9604.f, 9409.f, + 9216.f, 9025.f, 8836.f, 8649.f, 8464.f, 8281.f, 8100.f, 7921.f, + 7744.f, 7569.f, 7396.f, 7225.f, 7056.f, 6889.f, 6724.f, 6561.f, + 6400.f, 6241.f, 6084.f, 5929.f, 5776.f, 5625.f, 5476.f, 5329.f, + 5184.f, 5041.f, 4900.f, 4761.f, 4624.f, 4489.f, 4356.f, 4225.f, + 4096.f, 3969.f, 3844.f, 3721.f, 3600.f, 3481.f, 3364.f, 3249.f, + 3136.f, 3025.f, 2916.f, 2809.f, 2704.f, 2601.f, 2500.f, 2401.f, + 2304.f, 2209.f, 2116.f, 2025.f, 1936.f, 1849.f, 1764.f, 1681.f, + 1600.f, 1521.f, 1444.f, 1369.f, 1296.f, 1225.f, 1156.f, 1089.f, + 1024.f, 961.f, 900.f, 841.f, 784.f, 729.f, 676.f, 625.f, + 576.f, 529.f, 484.f, 441.f, 400.f, 361.f, 324.f, 289.f, + 256.f, 225.f, 196.f, 169.f, 144.f, 121.f, 100.f, 81.f, + 64.f, 49.f, 36.f, 25.f, 16.f, 9.f, 4.f, 1.f, + 0.f, 1.f, 4.f, 9.f, 16.f, 25.f, 36.f, 49.f, + 64.f, 81.f, 100.f, 121.f, 144.f, 169.f, 196.f, 225.f, + 256.f, 289.f, 324.f, 361.f, 400.f, 441.f, 484.f, 529.f, + 576.f, 625.f, 676.f, 729.f, 784.f, 841.f, 900.f, 961.f, + 1024.f, 1089.f, 1156.f, 1225.f, 1296.f, 1369.f, 1444.f, 1521.f, + 1600.f, 1681.f, 1764.f, 1849.f, 1936.f, 2025.f, 2116.f, 2209.f, + 2304.f, 2401.f, 2500.f, 2601.f, 2704.f, 2809.f, 2916.f, 3025.f, + 3136.f, 3249.f, 3364.f, 3481.f, 3600.f, 3721.f, 3844.f, 3969.f, + 4096.f, 4225.f, 4356.f, 4489.f, 4624.f, 4761.f, 4900.f, 5041.f, + 5184.f, 5329.f, 5476.f, 5625.f, 5776.f, 5929.f, 6084.f, 6241.f, + 6400.f, 6561.f, 6724.f, 6889.f, 7056.f, 7225.f, 7396.f, 7569.f, + 7744.f, 7921.f, 8100.f, 8281.f, 8464.f, 8649.f, 8836.f, 9025.f, + 9216.f, 9409.f, 9604.f, 9801.f, 10000.f, 10201.f, 10404.f, 10609.f, + 10816.f, 11025.f, 11236.f, 11449.f, 11664.f, 11881.f, 12100.f, 12321.f, + 12544.f, 12769.f, 12996.f, 13225.f, 13456.f, 13689.f, 13924.f, 14161.f, + 14400.f, 14641.f, 14884.f, 15129.f, 15376.f, 15625.f, 15876.f, 16129.f, + 16384.f, 16641.f, 16900.f, 17161.f, 17424.f, 17689.f, 17956.f, 18225.f, + 18496.f, 18769.f, 19044.f, 19321.f, 19600.f, 19881.f, 20164.f, 20449.f, + 20736.f, 21025.f, 21316.f, 21609.f, 21904.f, 22201.f, 22500.f, 22801.f, + 23104.f, 23409.f, 23716.f, 24025.f, 24336.f, 24649.f, 24964.f, 25281.f, + 25600.f, 25921.f, 26244.f, 26569.f, 26896.f, 27225.f, 27556.f, 27889.f, + 28224.f, 28561.f, 28900.f, 29241.f, 29584.f, 29929.f, 30276.f, 30625.f, + 30976.f, 31329.f, 31684.f, 32041.f, 32400.f, 32761.f, 33124.f, 33489.f, + 33856.f, 34225.f, 34596.f, 34969.f, 35344.f, 35721.f, 36100.f, 36481.f, + 36864.f, 37249.f, 37636.f, 38025.f, 38416.f, 38809.f, 39204.f, 39601.f, + 40000.f, 40401.f, 40804.f, 41209.f, 41616.f, 42025.f, 42436.f, 42849.f, + 43264.f, 43681.f, 44100.f, 44521.f, 44944.f, 45369.f, 45796.f, 46225.f, + 46656.f, 47089.f, 47524.f, 47961.f, 48400.f, 48841.f, 49284.f, 49729.f, + 50176.f, 50625.f, 51076.f, 51529.f, 51984.f, 52441.f, 52900.f, 53361.f, + 53824.f, 54289.f, 54756.f, 55225.f, 55696.f, 56169.f, 56644.f, 57121.f, + 57600.f, 58081.f, 58564.f, 59049.f, 59536.f, 60025.f, 60516.f, 61009.f, + 61504.f, 62001.f, 62500.f, 63001.f, 63504.f, 64009.f, 64516.f, 65025.f +}; + +const uchar icvSaturate8u_cv[] = +{ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, + 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, + 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, + 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255 +}; + +/* End of file. */ diff --git a/opencv/imgproc/templmatch.cpp b/opencv/imgproc/templmatch.cpp new file mode 100644 index 0000000..fdf08e0 --- /dev/null +++ b/opencv/imgproc/templmatch.cpp @@ -0,0 +1,381 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +namespace cv +{ + +void crossCorr( const Mat& img, const Mat& templ, Mat& corr, + Size corrsize, int ctype, + Point anchor, double delta, int borderType ) +{ + const double blockScale = 4.5; + const int minBlockSize = 256; + std::vector buf; + + int depth = img.depth(), cn = img.channels(); + int tdepth = templ.depth(), tcn = templ.channels(); + int cdepth = CV_MAT_DEPTH(ctype), ccn = CV_MAT_CN(ctype); + + CV_Assert( img.dims <= 2 && templ.dims <= 2 && corr.dims <= 2 ); + CV_Assert( depth == CV_8U || depth == CV_16U || depth == CV_32F || depth == CV_64F ); + CV_Assert( depth == tdepth || tdepth == CV_32F ); + + CV_Assert( corrsize.height <= img.rows + templ.rows - 1 && + corrsize.width <= img.cols + templ.cols - 1 ); + + CV_Assert( ccn == 1 || delta == 0 ); + + corr.create(corrsize, ctype); + + int maxDepth = depth > CV_8U ? CV_64F : std::max(std::max(CV_32F, tdepth), cdepth); + Size blocksize, dftsize; + + blocksize.width = cvRound(templ.cols*blockScale); + blocksize.width = std::max( blocksize.width, minBlockSize - templ.cols + 1 ); + blocksize.width = std::min( blocksize.width, corr.cols ); + blocksize.height = cvRound(templ.rows*blockScale); + blocksize.height = std::max( blocksize.height, minBlockSize - templ.rows + 1 ); + blocksize.height = std::min( blocksize.height, corr.rows ); + + dftsize.width = std::max(getOptimalDFTSize(blocksize.width + templ.cols - 1), 2); + dftsize.height = getOptimalDFTSize(blocksize.height + templ.rows - 1); + if( dftsize.width <= 0 || dftsize.height <= 0 ) + CV_Error( CV_StsOutOfRange, "the input arrays are too big" ); + + // recompute block size + blocksize.width = dftsize.width - templ.cols + 1; + blocksize.width = MIN( blocksize.width, corr.cols ); + blocksize.height = dftsize.height - templ.rows + 1; + blocksize.height = MIN( blocksize.height, corr.rows ); + + Mat dftTempl( dftsize.height*tcn, dftsize.width, maxDepth ); + Mat dftImg( dftsize, maxDepth ); + + int i, k, bufSize = 0; + if( tcn > 1 && tdepth != maxDepth ) + bufSize = templ.cols*templ.rows*CV_ELEM_SIZE(tdepth); + + if( cn > 1 && depth != maxDepth ) + bufSize = std::max( bufSize, (blocksize.width + templ.cols - 1)* + (blocksize.height + templ.rows - 1)*CV_ELEM_SIZE(depth)); + + if( (ccn > 1 || cn > 1) && cdepth != maxDepth ) + bufSize = std::max( bufSize, blocksize.width*blocksize.height*CV_ELEM_SIZE(cdepth)); + + buf.resize(bufSize); + + // compute DFT of each template plane + for( k = 0; k < tcn; k++ ) + { + int yofs = k*dftsize.height; + Mat src = templ; + Mat dst(dftTempl, Rect(0, yofs, dftsize.width, dftsize.height)); + Mat dst1(dftTempl, Rect(0, yofs, templ.cols, templ.rows)); + + if( tcn > 1 ) + { + src = tdepth == maxDepth ? dst1 : Mat(templ.size(), tdepth, &buf[0]); + int pairs[] = {k, 0}; + mixChannels(&templ, 1, &src, 1, pairs, 1); + } + + if( dst1.data != src.data ) + src.convertTo(dst1, dst1.depth()); + + if( dst.cols > templ.cols ) + { + Mat part(dst, Range(0, templ.rows), Range(templ.cols, dst.cols)); + part = Scalar::all(0); + } + dft(dst, dst, 0, templ.rows); + } + + int tileCountX = (corr.cols + blocksize.width - 1)/blocksize.width; + int tileCountY = (corr.rows + blocksize.height - 1)/blocksize.height; + int tileCount = tileCountX * tileCountY; + + Size wholeSize = img.size(); + Point roiofs(0,0); + Mat img0 = img; + + if( !(borderType & BORDER_ISOLATED) ) + { + img.locateROI(wholeSize, roiofs); + img0.adjustROI(roiofs.y, wholeSize.height-img.rows-roiofs.y, + roiofs.x, wholeSize.width-img.cols-roiofs.x); + } + + // calculate correlation by blocks + for( i = 0; i < tileCount; i++ ) + { + int x = (i%tileCountX)*blocksize.width; + int y = (i/tileCountX)*blocksize.height; + + Size bsz(std::min(blocksize.width, corr.cols - x), + std::min(blocksize.height, corr.rows - y)); + Size dsz(bsz.width + templ.cols - 1, bsz.height + templ.rows - 1); + int x0 = x - anchor.x + roiofs.x, y0 = y - anchor.y + roiofs.y; + int x1 = std::max(0, x0), y1 = std::max(0, y0); + int x2 = std::min(img0.cols, x0 + dsz.width); + int y2 = std::min(img0.rows, y0 + dsz.height); + Mat src0(img0, Range(y1, y2), Range(x1, x2)); + Mat dst(dftImg, Rect(0, 0, dsz.width, dsz.height)); + Mat dst1(dftImg, Rect(x1-x0, y1-y0, x2-x1, y2-y1)); + Mat cdst(corr, Rect(x, y, bsz.width, bsz.height)); + + for( k = 0; k < cn; k++ ) + { + Mat src = src0; + dftImg = Scalar::all(0); + + if( cn > 1 ) + { + src = depth == maxDepth ? dst1 : Mat(y2-y1, x2-x1, depth, &buf[0]); + int pairs[] = {k, 0}; + mixChannels(&src0, 1, &src, 1, pairs, 1); + } + + if( dst1.data != src.data ) + src.convertTo(dst1, dst1.depth()); + + if( x2 - x1 < dsz.width || y2 - y1 < dsz.height ) + copyMakeBorder(dst1, dst, y1-y0, dst.rows-dst1.rows-(y1-y0), + x1-x0, dst.cols-dst1.cols-(x1-x0), borderType); + + dft( dftImg, dftImg, 0, dsz.height ); + Mat dftTempl1(dftTempl, Rect(0, tcn > 1 ? k*dftsize.height : 0, + dftsize.width, dftsize.height)); + mulSpectrums(dftImg, dftTempl1, dftImg, 0, true); + dft( dftImg, dftImg, DFT_INVERSE + DFT_SCALE, bsz.height ); + + src = dftImg(Rect(0, 0, bsz.width, bsz.height)); + + if( ccn > 1 ) + { + if( cdepth != maxDepth ) + { + Mat plane(bsz, cdepth, &buf[0]); + src.convertTo(plane, cdepth, 1, delta); + src = plane; + } + int pairs[] = {0, k}; + mixChannels(&src, 1, &cdst, 1, pairs, 1); + } + else + { + if( k == 0 ) + src.convertTo(cdst, cdepth, 1, delta); + else + { + if( maxDepth != cdepth ) + { + Mat plane(bsz, cdepth, &buf[0]); + src.convertTo(plane, cdepth); + src = plane; + } + add(src, cdst, cdst); + } + } + } + } +} + +/*void +cv::crossCorr( const Mat& img, const Mat& templ, Mat& corr, + Point anchor, double delta, int borderType ) +{ + CvMat _img = img, _templ = templ, _corr = corr; + icvCrossCorr( &_img, &_templ, &_corr, anchor, delta, borderType ); +}*/ + +} + +/*****************************************************************************************/ + +void cv::matchTemplate( InputArray _img, InputArray _templ, OutputArray _result, int method ) +{ + CV_Assert( CV_TM_SQDIFF <= method && method <= CV_TM_CCOEFF_NORMED ); + + int numType = method == CV_TM_CCORR || method == CV_TM_CCORR_NORMED ? 0 : + method == CV_TM_CCOEFF || method == CV_TM_CCOEFF_NORMED ? 1 : 2; + bool isNormed = method == CV_TM_CCORR_NORMED || + method == CV_TM_SQDIFF_NORMED || + method == CV_TM_CCOEFF_NORMED; + + Mat img = _img.getMat(), templ = _templ.getMat(); + if( img.rows < templ.rows || img.cols < templ.cols ) + std::swap(img, templ); + + CV_Assert( (img.depth() == CV_8U || img.depth() == CV_32F) && + img.type() == templ.type() ); + + Size corrSize(img.cols - templ.cols + 1, img.rows - templ.rows + 1); + _result.create(corrSize, CV_32F); + Mat result = _result.getMat(); + + int cn = img.channels(); + crossCorr( img, templ, result, result.size(), result.type(), Point(0,0), 0, 0); + + if( method == CV_TM_CCORR ) + return; + + double invArea = 1./((double)templ.rows * templ.cols); + + Mat sum, sqsum; + Scalar templMean, templSdv; + double *q0 = 0, *q1 = 0, *q2 = 0, *q3 = 0; + double templNorm = 0, templSum2 = 0; + + if( method == CV_TM_CCOEFF ) + { + integral(img, sum, CV_64F); + templMean = mean(templ); + } + else + { + integral(img, sum, sqsum, CV_64F); + meanStdDev( templ, templMean, templSdv ); + + templNorm = CV_SQR(templSdv[0]) + CV_SQR(templSdv[1]) + + CV_SQR(templSdv[2]) + CV_SQR(templSdv[3]); + + if( templNorm < DBL_EPSILON && method == CV_TM_CCOEFF_NORMED ) + { + result = Scalar::all(1); + return; + } + + templSum2 = templNorm + + CV_SQR(templMean[0]) + CV_SQR(templMean[1]) + + CV_SQR(templMean[2]) + CV_SQR(templMean[3]); + + if( numType != 1 ) + { + templMean = Scalar::all(0); + templNorm = templSum2; + } + + templSum2 /= invArea; + templNorm = sqrt(templNorm); + templNorm /= sqrt(invArea); // care of accuracy here + + q0 = (double*)sqsum.data; + q1 = q0 + templ.cols*cn; + q2 = (double*)(sqsum.data + templ.rows*sqsum.step); + q3 = q2 + templ.cols*cn; + } + + double* p0 = (double*)sum.data; + double* p1 = p0 + templ.cols*cn; + double* p2 = (double*)(sum.data + templ.rows*sum.step); + double* p3 = p2 + templ.cols*cn; + + int sumstep = sum.data ? (int)(sum.step / sizeof(double)) : 0; + int sqstep = sqsum.data ? (int)(sqsum.step / sizeof(double)) : 0; + + int i, j, k; + + for( i = 0; i < result.rows; i++ ) + { + float* rrow = (float*)(result.data + i*result.step); + int idx = i * sumstep; + int idx2 = i * sqstep; + + for( j = 0; j < result.cols; j++, idx += cn, idx2 += cn ) + { + double num = rrow[j], t; + double wndMean2 = 0, wndSum2 = 0; + + if( numType == 1 ) + { + for( k = 0; k < cn; k++ ) + { + t = p0[idx+k] - p1[idx+k] - p2[idx+k] + p3[idx+k]; + wndMean2 += CV_SQR(t); + num -= t*templMean[k]; + } + + wndMean2 *= invArea; + } + + if( isNormed || numType == 2 ) + { + for( k = 0; k < cn; k++ ) + { + t = q0[idx2+k] - q1[idx2+k] - q2[idx2+k] + q3[idx2+k]; + wndSum2 += t; + } + + if( numType == 2 ) + num = wndSum2 - 2*num + templSum2; + } + + if( isNormed ) + { + t = sqrt(MAX(wndSum2 - wndMean2,0))*templNorm; + if( fabs(num) < t ) + num /= t; + else if( fabs(num) < t*1.125 ) + num = num > 0 ? 1 : -1; + else + num = method != CV_TM_SQDIFF_NORMED ? 0 : 1; + } + + rrow[j] = (float)num; + } + } +} + + +CV_IMPL void +cvMatchTemplate( const CvArr* _img, const CvArr* _templ, CvArr* _result, int method ) +{ + cv::Mat img = cv::cvarrToMat(_img), templ = cv::cvarrToMat(_templ), + result = cv::cvarrToMat(_result); + CV_Assert( result.size() == cv::Size(std::abs(img.cols - templ.cols) + 1, + std::abs(img.rows - templ.rows) + 1) && + result.type() == CV_32F ); + matchTemplate(img, templ, result, method); +} + +/* End of file. */ diff --git a/opencv/imgproc/thresh.cpp b/opencv/imgproc/thresh.cpp new file mode 100644 index 0000000..4579c8e --- /dev/null +++ b/opencv/imgproc/thresh.cpp @@ -0,0 +1,636 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +namespace cv +{ + +static void +thresh_8u( const Mat& _src, Mat& _dst, uchar thresh, uchar maxval, int type ) +{ + int i, j, j_scalar = 0; + uchar tab[256]; + Size roi = _src.size(); + roi.width *= _src.channels(); + + if( _src.isContinuous() && _dst.isContinuous() ) + { + roi.width *= roi.height; + roi.height = 1; + } + +#ifdef HAVE_TEGRA_OPTIMIZATION + switch( type ) + { + case THRESH_BINARY: + if(tegra::thresh_8u_binary(_src, _dst, roi.width, roi.height, thresh, maxval)) return; + break; + case THRESH_BINARY_INV: + if(tegra::thresh_8u_binary_inv(_src, _dst, roi.width, roi.height, thresh, maxval)) return; + break; + case THRESH_TRUNC: + if(tegra::thresh_8u_trunc(_src, _dst, roi.width, roi.height, thresh)) return; + break; + case THRESH_TOZERO: + if(tegra::thresh_8u_tozero(_src, _dst, roi.width, roi.height, thresh)) return; + break; + case THRESH_TOZERO_INV: + if(tegra::thresh_8u_tozero_inv(_src, _dst, roi.width, roi.height, thresh)) return; + break; + } +#endif + + switch( type ) + { + case THRESH_BINARY: + for( i = 0; i <= thresh; i++ ) + tab[i] = 0; + for( ; i < 256; i++ ) + tab[i] = maxval; + break; + case THRESH_BINARY_INV: + for( i = 0; i <= thresh; i++ ) + tab[i] = maxval; + for( ; i < 256; i++ ) + tab[i] = 0; + break; + case THRESH_TRUNC: + for( i = 0; i <= thresh; i++ ) + tab[i] = (uchar)i; + for( ; i < 256; i++ ) + tab[i] = thresh; + break; + case THRESH_TOZERO: + for( i = 0; i <= thresh; i++ ) + tab[i] = 0; + for( ; i < 256; i++ ) + tab[i] = (uchar)i; + break; + case THRESH_TOZERO_INV: + for( i = 0; i <= thresh; i++ ) + tab[i] = (uchar)i; + for( ; i < 256; i++ ) + tab[i] = 0; + break; + default: + CV_Error( CV_StsBadArg, "Unknown threshold type" ); + } + +#if CV_SSE2 + if( checkHardwareSupport(CV_CPU_SSE2) ) + { + __m128i _x80 = _mm_set1_epi8('\x80'); + __m128i thresh_u = _mm_set1_epi8(thresh); + __m128i thresh_s = _mm_set1_epi8(thresh ^ 0x80); + __m128i maxval_ = _mm_set1_epi8(maxval); + j_scalar = roi.width & -8; + + for( i = 0; i < roi.height; i++ ) + { + const uchar* src = (const uchar*)(_src.data + _src.step*i); + uchar* dst = (uchar*)(_dst.data + _dst.step*i); + + switch( type ) + { + case THRESH_BINARY: + for( j = 0; j <= roi.width - 32; j += 32 ) + { + __m128i v0, v1; + v0 = _mm_loadu_si128( (const __m128i*)(src + j) ); + v1 = _mm_loadu_si128( (const __m128i*)(src + j + 16) ); + v0 = _mm_cmpgt_epi8( _mm_xor_si128(v0, _x80), thresh_s ); + v1 = _mm_cmpgt_epi8( _mm_xor_si128(v1, _x80), thresh_s ); + v0 = _mm_and_si128( v0, maxval_ ); + v1 = _mm_and_si128( v1, maxval_ ); + _mm_storeu_si128( (__m128i*)(dst + j), v0 ); + _mm_storeu_si128( (__m128i*)(dst + j + 16), v1 ); + } + + for( ; j <= roi.width - 8; j += 8 ) + { + __m128i v0 = _mm_loadl_epi64( (const __m128i*)(src + j) ); + v0 = _mm_cmpgt_epi8( _mm_xor_si128(v0, _x80), thresh_s ); + v0 = _mm_and_si128( v0, maxval_ ); + _mm_storel_epi64( (__m128i*)(dst + j), v0 ); + } + break; + + case THRESH_BINARY_INV: + for( j = 0; j <= roi.width - 32; j += 32 ) + { + __m128i v0, v1; + v0 = _mm_loadu_si128( (const __m128i*)(src + j) ); + v1 = _mm_loadu_si128( (const __m128i*)(src + j + 16) ); + v0 = _mm_cmpgt_epi8( _mm_xor_si128(v0, _x80), thresh_s ); + v1 = _mm_cmpgt_epi8( _mm_xor_si128(v1, _x80), thresh_s ); + v0 = _mm_andnot_si128( v0, maxval_ ); + v1 = _mm_andnot_si128( v1, maxval_ ); + _mm_storeu_si128( (__m128i*)(dst + j), v0 ); + _mm_storeu_si128( (__m128i*)(dst + j + 16), v1 ); + } + + for( ; j <= roi.width - 8; j += 8 ) + { + __m128i v0 = _mm_loadl_epi64( (const __m128i*)(src + j) ); + v0 = _mm_cmpgt_epi8( _mm_xor_si128(v0, _x80), thresh_s ); + v0 = _mm_andnot_si128( v0, maxval_ ); + _mm_storel_epi64( (__m128i*)(dst + j), v0 ); + } + break; + + case THRESH_TRUNC: + for( j = 0; j <= roi.width - 32; j += 32 ) + { + __m128i v0, v1; + v0 = _mm_loadu_si128( (const __m128i*)(src + j) ); + v1 = _mm_loadu_si128( (const __m128i*)(src + j + 16) ); + v0 = _mm_subs_epu8( v0, _mm_subs_epu8( v0, thresh_u )); + v1 = _mm_subs_epu8( v1, _mm_subs_epu8( v1, thresh_u )); + _mm_storeu_si128( (__m128i*)(dst + j), v0 ); + _mm_storeu_si128( (__m128i*)(dst + j + 16), v1 ); + } + + for( ; j <= roi.width - 8; j += 8 ) + { + __m128i v0 = _mm_loadl_epi64( (const __m128i*)(src + j) ); + v0 = _mm_subs_epu8( v0, _mm_subs_epu8( v0, thresh_u )); + _mm_storel_epi64( (__m128i*)(dst + j), v0 ); + } + break; + + case THRESH_TOZERO: + for( j = 0; j <= roi.width - 32; j += 32 ) + { + __m128i v0, v1; + v0 = _mm_loadu_si128( (const __m128i*)(src + j) ); + v1 = _mm_loadu_si128( (const __m128i*)(src + j + 16) ); + v0 = _mm_and_si128( v0, _mm_cmpgt_epi8(_mm_xor_si128(v0, _x80), thresh_s )); + v1 = _mm_and_si128( v1, _mm_cmpgt_epi8(_mm_xor_si128(v1, _x80), thresh_s )); + _mm_storeu_si128( (__m128i*)(dst + j), v0 ); + _mm_storeu_si128( (__m128i*)(dst + j + 16), v1 ); + } + + for( ; j <= roi.width - 8; j += 8 ) + { + __m128i v0 = _mm_loadl_epi64( (const __m128i*)(src + j) ); + v0 = _mm_and_si128( v0, _mm_cmpgt_epi8(_mm_xor_si128(v0, _x80), thresh_s )); + _mm_storel_epi64( (__m128i*)(dst + j), v0 ); + } + break; + + case THRESH_TOZERO_INV: + for( j = 0; j <= roi.width - 32; j += 32 ) + { + __m128i v0, v1; + v0 = _mm_loadu_si128( (const __m128i*)(src + j) ); + v1 = _mm_loadu_si128( (const __m128i*)(src + j + 16) ); + v0 = _mm_andnot_si128( _mm_cmpgt_epi8(_mm_xor_si128(v0, _x80), thresh_s ), v0 ); + v1 = _mm_andnot_si128( _mm_cmpgt_epi8(_mm_xor_si128(v1, _x80), thresh_s ), v1 ); + _mm_storeu_si128( (__m128i*)(dst + j), v0 ); + _mm_storeu_si128( (__m128i*)(dst + j + 16), v1 ); + } + + for( ; j <= roi.width - 8; j += 8 ) + { + __m128i v0 = _mm_loadl_epi64( (const __m128i*)(src + j) ); + v0 = _mm_andnot_si128( _mm_cmpgt_epi8(_mm_xor_si128(v0, _x80), thresh_s ), v0 ); + _mm_storel_epi64( (__m128i*)(dst + j), v0 ); + } + break; + } + } + } +#endif + + if( j_scalar < roi.width ) + { + for( i = 0; i < roi.height; i++ ) + { + const uchar* src = (const uchar*)(_src.data + _src.step*i); + uchar* dst = (uchar*)(_dst.data + _dst.step*i); + + for( j = j_scalar; j <= roi.width - 4; j += 4 ) + { + uchar t0 = tab[src[j]]; + uchar t1 = tab[src[j+1]]; + + dst[j] = t0; + dst[j+1] = t1; + + t0 = tab[src[j+2]]; + t1 = tab[src[j+3]]; + + dst[j+2] = t0; + dst[j+3] = t1; + } + + for( ; j < roi.width; j++ ) + dst[j] = tab[src[j]]; + } + } +} + + +static void +thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type ) +{ + int i, j; + Size roi = _src.size(); + roi.width *= _src.channels(); + const float* src = (const float*)_src.data; + float* dst = (float*)_dst.data; + size_t src_step = _src.step/sizeof(src[0]); + size_t dst_step = _dst.step/sizeof(dst[0]); + +#if CV_SSE2 + volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE); +#endif + + if( _src.isContinuous() && _dst.isContinuous() ) + { + roi.width *= roi.height; + roi.height = 1; + } + + switch( type ) + { + case THRESH_BINARY: + for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step ) + { + j = 0; + #if CV_SSE2 + if( useSIMD ) + { + __m128 thresh4 = _mm_set1_ps(thresh), maxval4 = _mm_set1_ps(maxval); + for( ; j <= roi.width - 8; j += 8 ) + { + __m128 v0, v1; + v0 = _mm_loadu_ps( src + j ); + v1 = _mm_loadu_ps( src + j + 4 ); + v0 = _mm_cmpgt_ps( v0, thresh4 ); + v1 = _mm_cmpgt_ps( v1, thresh4 ); + v0 = _mm_and_ps( v0, maxval4 ); + v1 = _mm_and_ps( v1, maxval4 ); + _mm_storeu_ps( dst + j, v0 ); + _mm_storeu_ps( dst + j + 4, v1 ); + } + } + #endif + + for( ; j < roi.width; j++ ) + dst[j] = src[j] > thresh ? maxval : 0; + } + break; + + case THRESH_BINARY_INV: + for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step ) + { + j = 0; + #if CV_SSE2 + if( useSIMD ) + { + __m128 thresh4 = _mm_set1_ps(thresh), maxval4 = _mm_set1_ps(maxval); + for( ; j <= roi.width - 8; j += 8 ) + { + __m128 v0, v1; + v0 = _mm_loadu_ps( src + j ); + v1 = _mm_loadu_ps( src + j + 4 ); + v0 = _mm_cmple_ps( v0, thresh4 ); + v1 = _mm_cmple_ps( v1, thresh4 ); + v0 = _mm_and_ps( v0, maxval4 ); + v1 = _mm_and_ps( v1, maxval4 ); + _mm_storeu_ps( dst + j, v0 ); + _mm_storeu_ps( dst + j + 4, v1 ); + } + } + #endif + + for( ; j < roi.width; j++ ) + dst[j] = src[j] <= thresh ? maxval : 0; + } + break; + + case THRESH_TRUNC: + for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step ) + { + j = 0; + #if CV_SSE2 + if( useSIMD ) + { + __m128 thresh4 = _mm_set1_ps(thresh); + for( ; j <= roi.width - 8; j += 8 ) + { + __m128 v0, v1; + v0 = _mm_loadu_ps( src + j ); + v1 = _mm_loadu_ps( src + j + 4 ); + v0 = _mm_min_ps( v0, thresh4 ); + v1 = _mm_min_ps( v1, thresh4 ); + _mm_storeu_ps( dst + j, v0 ); + _mm_storeu_ps( dst + j + 4, v1 ); + } + } + #endif + + for( ; j < roi.width; j++ ) + dst[j] = std::min(src[j], thresh); + } + break; + + case THRESH_TOZERO: + for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step ) + { + j = 0; + #if CV_SSE2 + if( useSIMD ) + { + __m128 thresh4 = _mm_set1_ps(thresh); + for( ; j <= roi.width - 8; j += 8 ) + { + __m128 v0, v1; + v0 = _mm_loadu_ps( src + j ); + v1 = _mm_loadu_ps( src + j + 4 ); + v0 = _mm_and_ps(v0, _mm_cmpgt_ps(v0, thresh4)); + v1 = _mm_and_ps(v1, _mm_cmpgt_ps(v1, thresh4)); + _mm_storeu_ps( dst + j, v0 ); + _mm_storeu_ps( dst + j + 4, v1 ); + } + } + #endif + + for( ; j < roi.width; j++ ) + { + float v = src[j]; + dst[j] = v > thresh ? v : 0; + } + } + break; + + case THRESH_TOZERO_INV: + for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step ) + { + j = 0; + #if CV_SSE2 + if( useSIMD ) + { + __m128 thresh4 = _mm_set1_ps(thresh); + for( ; j <= roi.width - 8; j += 8 ) + { + __m128 v0, v1; + v0 = _mm_loadu_ps( src + j ); + v1 = _mm_loadu_ps( src + j + 4 ); + v0 = _mm_and_ps(v0, _mm_cmple_ps(v0, thresh4)); + v1 = _mm_and_ps(v1, _mm_cmple_ps(v1, thresh4)); + _mm_storeu_ps( dst + j, v0 ); + _mm_storeu_ps( dst + j + 4, v1 ); + } + } + #endif + for( ; j < roi.width; j++ ) + { + float v = src[j]; + dst[j] = v <= thresh ? v : 0; + } + } + break; + default: + return CV_Error( CV_StsBadArg, "" ); + } +} + + +static double +getThreshVal_Otsu_8u( const Mat& _src ) +{ + Size size = _src.size(); + if( _src.isContinuous() ) + { + size.width *= size.height; + size.height = 1; + } + const int N = 256; + int i, j, h[N] = {0}; + for( i = 0; i < size.height; i++ ) + { + const uchar* src = _src.data + _src.step*i; + for( j = 0; j <= size.width - 4; j += 4 ) + { + int v0 = src[j], v1 = src[j+1]; + h[v0]++; h[v1]++; + v0 = src[j+2]; v1 = src[j+3]; + h[v0]++; h[v1]++; + } + for( ; j < size.width; j++ ) + h[src[j]]++; + } + + double mu = 0, scale = 1./(size.width*size.height); + for( i = 0; i < N; i++ ) + mu += i*(double)h[i]; + + mu *= scale; + double mu1 = 0, q1 = 0; + double max_sigma = 0, max_val = 0; + + for( i = 0; i < N; i++ ) + { + double p_i, q2, mu2, sigma; + + p_i = h[i]*scale; + mu1 *= q1; + q1 += p_i; + q2 = 1. - q1; + + if( std::min(q1,q2) < FLT_EPSILON || std::max(q1,q2) > 1. - FLT_EPSILON ) + continue; + + mu1 = (mu1 + i*p_i)/q1; + mu2 = (mu - q1*mu1)/q2; + sigma = q1*q2*(mu1 - mu2)*(mu1 - mu2); + if( sigma > max_sigma ) + { + max_sigma = sigma; + max_val = i; + } + } + + return max_val; +} + +} + +double cv::threshold( InputArray _src, OutputArray _dst, double thresh, double maxval, int type ) +{ + Mat src = _src.getMat(); + bool use_otsu = (type & THRESH_OTSU) != 0; + type &= THRESH_MASK; + + if( use_otsu ) + { + CV_Assert( src.type() == CV_8UC1 ); + thresh = getThreshVal_Otsu_8u(src); + } + + _dst.create( src.size(), src.type() ); + Mat dst = _dst.getMat(); + + if( src.depth() == CV_8U ) + { + int ithresh = cvFloor(thresh); + thresh = ithresh; + int imaxval = cvRound(maxval); + if( type == THRESH_TRUNC ) + imaxval = ithresh; + imaxval = saturate_cast(imaxval); + + if( ithresh < 0 || ithresh >= 255 ) + { + if( type == THRESH_BINARY || type == THRESH_BINARY_INV || + ((type == THRESH_TRUNC || type == THRESH_TOZERO_INV) && ithresh < 0) || + (type == THRESH_TOZERO && ithresh >= 255) ) + { + int v = type == THRESH_BINARY ? (ithresh >= 255 ? 0 : imaxval) : + type == THRESH_BINARY_INV ? (ithresh >= 255 ? imaxval : 0) : + type == THRESH_TRUNC ? imaxval : 0; + dst.setTo(v); + } + else + src.copyTo(dst); + } + else + thresh_8u( src, dst, (uchar)ithresh, (uchar)imaxval, type ); + } + else if( src.depth() == CV_32F ) + thresh_32f( src, dst, (float)thresh, (float)maxval, type ); + else + CV_Error( CV_StsUnsupportedFormat, "" ); + + return thresh; +} + + +void cv::adaptiveThreshold( InputArray _src, OutputArray _dst, double maxValue, + int method, int type, int blockSize, double delta ) +{ + Mat src = _src.getMat(); + CV_Assert( src.type() == CV_8UC1 ); + CV_Assert( blockSize % 2 == 1 && blockSize > 1 ); + Size size = src.size(); + + _dst.create( size, src.type() ); + Mat dst = _dst.getMat(); + + if( maxValue < 0 ) + { + dst = Scalar(0); + return; + } + + Mat mean; + + if( src.data != dst.data ) + mean = dst; + + if( method == ADAPTIVE_THRESH_MEAN_C ) + boxFilter( src, mean, src.type(), Size(blockSize, blockSize), + Point(-1,-1), true, BORDER_REPLICATE ); + else if( method == ADAPTIVE_THRESH_GAUSSIAN_C ) + GaussianBlur( src, mean, Size(blockSize, blockSize), 0, 0, BORDER_REPLICATE ); + else + CV_Error( CV_StsBadFlag, "Unknown/unsupported adaptive threshold method" ); + + int i, j; + uchar imaxval = saturate_cast(maxValue); + int idelta = type == THRESH_BINARY ? cvCeil(delta) : cvFloor(delta); + uchar tab[768]; + + if( type == CV_THRESH_BINARY ) + for( i = 0; i < 768; i++ ) + tab[i] = (uchar)(i - 255 > -idelta ? imaxval : 0); + else if( type == CV_THRESH_BINARY_INV ) + for( i = 0; i < 768; i++ ) + tab[i] = (uchar)(i - 255 <= -idelta ? imaxval : 0); + else + CV_Error( CV_StsBadFlag, "Unknown/unsupported threshold type" ); + + if( src.isContinuous() && mean.isContinuous() && dst.isContinuous() ) + { + size.width *= size.height; + size.height = 1; + } + + for( i = 0; i < size.height; i++ ) + { + const uchar* sdata = src.data + src.step*i; + const uchar* mdata = mean.data + mean.step*i; + uchar* ddata = dst.data + dst.step*i; + + for( j = 0; j < size.width; j++ ) + ddata[j] = tab[sdata[j] - mdata[j] + 255]; + } +} + +CV_IMPL double +cvThreshold( const void* srcarr, void* dstarr, double thresh, double maxval, int type ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), dst0 = dst; + + CV_Assert( src.size == dst.size && src.channels() == dst.channels() && + (src.depth() == dst.depth() || dst.depth() == CV_8U)); + + thresh = cv::threshold( src, dst, thresh, maxval, type ); + if( dst0.data != dst.data ) + dst.convertTo( dst0, dst0.depth() ); + return thresh; +} + + +CV_IMPL void +cvAdaptiveThreshold( const void *srcIm, void *dstIm, double maxValue, + int method, int type, int blockSize, double delta ) +{ + cv::Mat src = cv::cvarrToMat(srcIm), dst = cv::cvarrToMat(dstIm); + CV_Assert( src.size == dst.size && src.type() == dst.type() ); + cv::adaptiveThreshold( src, dst, maxValue, method, type, blockSize, delta ); +} + +/* End of file. */ diff --git a/opencv/imgproc/undistort.cpp b/opencv/imgproc/undistort.cpp new file mode 100644 index 0000000..d84cfef --- /dev/null +++ b/opencv/imgproc/undistort.cpp @@ -0,0 +1,572 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +cv::Mat cv::getDefaultNewCameraMatrix( InputArray _cameraMatrix, Size imgsize, + bool centerPrincipalPoint ) +{ + Mat cameraMatrix = _cameraMatrix.getMat(); + if( !centerPrincipalPoint && cameraMatrix.type() == CV_64F ) + return cameraMatrix; + + Mat newCameraMatrix; + cameraMatrix.convertTo(newCameraMatrix, CV_64F); + if( centerPrincipalPoint ) + { + ((double*)newCameraMatrix.data)[2] = (imgsize.width-1)*0.5; + ((double*)newCameraMatrix.data)[5] = (imgsize.height-1)*0.5; + } + return newCameraMatrix; +} + +void cv::initUndistortRectifyMap( InputArray _cameraMatrix, InputArray _distCoeffs, + InputArray _matR, InputArray _newCameraMatrix, + Size size, int m1type, OutputArray _map1, OutputArray _map2 ) +{ + Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat(); + Mat matR = _matR.getMat(), newCameraMatrix = _newCameraMatrix.getMat(); + + if( m1type <= 0 ) + m1type = CV_16SC2; + CV_Assert( m1type == CV_16SC2 || m1type == CV_32FC1 || m1type == CV_32FC2 ); + _map1.create( size, m1type ); + Mat map1 = _map1.getMat(), map2; + if( m1type != CV_32FC2 ) + { + _map2.create( size, m1type == CV_16SC2 ? CV_16UC1 : CV_32FC1 ); + map2 = _map2.getMat(); + } + else + _map2.release(); + + Mat_ R = Mat_::eye(3, 3); + Mat_ A = Mat_(cameraMatrix), Ar; + + if( newCameraMatrix.data ) + Ar = Mat_(newCameraMatrix); + else + Ar = getDefaultNewCameraMatrix( A, size, true ); + + if( matR.data ) + R = Mat_(matR); + + if( distCoeffs.data ) + distCoeffs = Mat_(distCoeffs); + else + { + distCoeffs.create(8, 1, CV_64F); + distCoeffs = 0.; + } + + CV_Assert( A.size() == Size(3,3) && A.size() == R.size() ); + CV_Assert( Ar.size() == Size(3,3) || Ar.size() == Size(4, 3)); + Mat_ iR = (Ar.colRange(0,3)*R).inv(DECOMP_LU); + const double* ir = &iR(0,0); + + double u0 = A(0, 2), v0 = A(1, 2); + double fx = A(0, 0), fy = A(1, 1); + + CV_Assert( distCoeffs.size() == Size(1, 4) || distCoeffs.size() == Size(4, 1) || + distCoeffs.size() == Size(1, 5) || distCoeffs.size() == Size(5, 1) || + distCoeffs.size() == Size(1, 8) || distCoeffs.size() == Size(8, 1)); + + if( distCoeffs.rows != 1 && !distCoeffs.isContinuous() ) + distCoeffs = distCoeffs.t(); + + double k1 = ((double*)distCoeffs.data)[0]; + double k2 = ((double*)distCoeffs.data)[1]; + double p1 = ((double*)distCoeffs.data)[2]; + double p2 = ((double*)distCoeffs.data)[3]; + double k3 = distCoeffs.cols + distCoeffs.rows - 1 >= 5 ? ((double*)distCoeffs.data)[4] : 0.; + double k4 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? ((double*)distCoeffs.data)[5] : 0.; + double k5 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? ((double*)distCoeffs.data)[6] : 0.; + double k6 = distCoeffs.cols + distCoeffs.rows - 1 >= 8 ? ((double*)distCoeffs.data)[7] : 0.; + + for( int i = 0; i < size.height; i++ ) + { + float* m1f = (float*)(map1.data + map1.step*i); + float* m2f = (float*)(map2.data + map2.step*i); + short* m1 = (short*)m1f; + ushort* m2 = (ushort*)m2f; + double _x = i*ir[1] + ir[2], _y = i*ir[4] + ir[5], _w = i*ir[7] + ir[8]; + + for( int j = 0; j < size.width; j++, _x += ir[0], _y += ir[3], _w += ir[6] ) + { + double w = 1./_w, x = _x*w, y = _y*w; + double x2 = x*x, y2 = y*y; + double r2 = x2 + y2, _2xy = 2*x*y; + double kr = (1 + ((k3*r2 + k2)*r2 + k1)*r2)/(1 + ((k6*r2 + k5)*r2 + k4)*r2); + double u = fx*(x*kr + p1*_2xy + p2*(r2 + 2*x2)) + u0; + double v = fy*(y*kr + p1*(r2 + 2*y2) + p2*_2xy) + v0; + if( m1type == CV_16SC2 ) + { + int iu = saturate_cast(u*INTER_TAB_SIZE); + int iv = saturate_cast(v*INTER_TAB_SIZE); + m1[j*2] = (short)(iu >> INTER_BITS); + m1[j*2+1] = (short)(iv >> INTER_BITS); + m2[j] = (ushort)((iv & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE + (iu & (INTER_TAB_SIZE-1))); + } + else if( m1type == CV_32FC1 ) + { + m1f[j] = (float)u; + m2f[j] = (float)v; + } + else + { + m1f[j*2] = (float)u; + m1f[j*2+1] = (float)v; + } + } + } +} + + +void cv::undistort( InputArray _src, OutputArray _dst, InputArray _cameraMatrix, + InputArray _distCoeffs, InputArray _newCameraMatrix ) +{ + Mat src = _src.getMat(), cameraMatrix = _cameraMatrix.getMat(); + Mat distCoeffs = _distCoeffs.getMat(), newCameraMatrix = _newCameraMatrix.getMat(); + + _dst.create( src.size(), src.type() ); + Mat dst = _dst.getMat(); + + CV_Assert( dst.data != src.data ); + + int stripe_size0 = std::min(std::max(1, (1 << 12) / std::max(src.cols, 1)), src.rows); + Mat map1(stripe_size0, src.cols, CV_16SC2), map2(stripe_size0, src.cols, CV_16UC1); + + Mat_ A, Ar, I = Mat_::eye(3,3); + + cameraMatrix.convertTo(A, CV_64F); + if( distCoeffs.data ) + distCoeffs = Mat_(distCoeffs); + else + { + distCoeffs.create(5, 1, CV_64F); + distCoeffs = 0.; + } + + if( newCameraMatrix.data ) + newCameraMatrix.convertTo(Ar, CV_64F); + else + A.copyTo(Ar); + + double v0 = Ar(1, 2); + for( int y = 0; y < src.rows; y += stripe_size0 ) + { + int stripe_size = std::min( stripe_size0, src.rows - y ); + Ar(1, 2) = v0 - y; + Mat map1_part = map1.rowRange(0, stripe_size), + map2_part = map2.rowRange(0, stripe_size), + dst_part = dst.rowRange(y, y + stripe_size); + + initUndistortRectifyMap( A, distCoeffs, I, Ar, Size(src.cols, stripe_size), + map1_part.type(), map1_part, map2_part ); + remap( src, dst_part, map1_part, map2_part, INTER_LINEAR, BORDER_CONSTANT ); + } +} + + +CV_IMPL void +cvUndistort2( const CvArr* srcarr, CvArr* dstarr, const CvMat* Aarr, const CvMat* dist_coeffs, const CvMat* newAarr ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), dst0 = dst; + cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs = cv::cvarrToMat(dist_coeffs), newA; + if( newAarr ) + newA = cv::cvarrToMat(newAarr); + + CV_Assert( src.size() == dst.size() && src.type() == dst.type() ); + cv::undistort( src, dst, A, distCoeffs, newA ); +} + + +CV_IMPL void cvInitUndistortMap( const CvMat* Aarr, const CvMat* dist_coeffs, + CvArr* mapxarr, CvArr* mapyarr ) +{ + cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs = cv::cvarrToMat(dist_coeffs); + cv::Mat mapx = cv::cvarrToMat(mapxarr), mapy, mapx0 = mapx, mapy0; + + if( mapyarr ) + mapy0 = mapy = cv::cvarrToMat(mapyarr); + + cv::initUndistortRectifyMap( A, distCoeffs, cv::Mat(), A, + mapx.size(), mapx.type(), mapx, mapy ); + CV_Assert( mapx0.data == mapx.data && mapy0.data == mapy.data ); +} + +void +cvInitUndistortRectifyMap( const CvMat* Aarr, const CvMat* dist_coeffs, + const CvMat *Rarr, const CvMat* ArArr, CvArr* mapxarr, CvArr* mapyarr ) +{ + cv::Mat A = cv::cvarrToMat(Aarr), distCoeffs, R, Ar; + cv::Mat mapx = cv::cvarrToMat(mapxarr), mapy, mapx0 = mapx, mapy0; + + if( mapyarr ) + mapy0 = mapy = cv::cvarrToMat(mapyarr); + + if( dist_coeffs ) + distCoeffs = cv::cvarrToMat(dist_coeffs); + if( Rarr ) + R = cv::cvarrToMat(Rarr); + if( ArArr ) + Ar = cv::cvarrToMat(ArArr); + + cv::initUndistortRectifyMap( A, distCoeffs, R, Ar, mapx.size(), mapx.type(), mapx, mapy ); + CV_Assert( mapx0.data == mapx.data && mapy0.data == mapy.data ); +} + + +void cvUndistortPoints( const CvMat* _src, CvMat* _dst, const CvMat* _cameraMatrix, + const CvMat* _distCoeffs, + const CvMat* matR, const CvMat* matP ) +{ + double A[3][3], RR[3][3], k[8]={0,0,0,0,0,0,0,0}, fx, fy, ifx, ify, cx, cy; + CvMat matA=cvMat(3, 3, CV_64F, A), _Dk; + CvMat _RR=cvMat(3, 3, CV_64F, RR); + const CvPoint2D32f* srcf; + const CvPoint2D64f* srcd; + CvPoint2D32f* dstf; + CvPoint2D64f* dstd; + int stype, dtype; + int sstep, dstep; + int i, j, n, iters = 1; + + CV_Assert( CV_IS_MAT(_src) && CV_IS_MAT(_dst) && + (_src->rows == 1 || _src->cols == 1) && + (_dst->rows == 1 || _dst->cols == 1) && + _src->cols + _src->rows - 1 == _dst->rows + _dst->cols - 1 && + (CV_MAT_TYPE(_src->type) == CV_32FC2 || CV_MAT_TYPE(_src->type) == CV_64FC2) && + (CV_MAT_TYPE(_dst->type) == CV_32FC2 || CV_MAT_TYPE(_dst->type) == CV_64FC2)); + + CV_Assert( CV_IS_MAT(_cameraMatrix) && + _cameraMatrix->rows == 3 && _cameraMatrix->cols == 3 ); + + cvConvert( _cameraMatrix, &matA ); + + if( _distCoeffs ) + { + CV_Assert( CV_IS_MAT(_distCoeffs) && + (_distCoeffs->rows == 1 || _distCoeffs->cols == 1) && + (_distCoeffs->rows*_distCoeffs->cols == 4 || + _distCoeffs->rows*_distCoeffs->cols == 5 || + _distCoeffs->rows*_distCoeffs->cols == 8)); + + _Dk = cvMat( _distCoeffs->rows, _distCoeffs->cols, + CV_MAKETYPE(CV_64F,CV_MAT_CN(_distCoeffs->type)), k); + + cvConvert( _distCoeffs, &_Dk ); + iters = 5; + } + + if( matR ) + { + CV_Assert( CV_IS_MAT(matR) && matR->rows == 3 && matR->cols == 3 ); + cvConvert( matR, &_RR ); + } + else + cvSetIdentity(&_RR); + + if( matP ) + { + double PP[3][3]; + CvMat _P3x3, _PP=cvMat(3, 3, CV_64F, PP); + CV_Assert( CV_IS_MAT(matP) && matP->rows == 3 && (matP->cols == 3 || matP->cols == 4)); + cvConvert( cvGetCols(matP, &_P3x3, 0, 3), &_PP ); + cvMatMul( &_PP, &_RR, &_RR ); + } + + srcf = (const CvPoint2D32f*)_src->data.ptr; + srcd = (const CvPoint2D64f*)_src->data.ptr; + dstf = (CvPoint2D32f*)_dst->data.ptr; + dstd = (CvPoint2D64f*)_dst->data.ptr; + stype = CV_MAT_TYPE(_src->type); + dtype = CV_MAT_TYPE(_dst->type); + sstep = _src->rows == 1 ? 1 : _src->step/CV_ELEM_SIZE(stype); + dstep = _dst->rows == 1 ? 1 : _dst->step/CV_ELEM_SIZE(dtype); + + n = _src->rows + _src->cols - 1; + + fx = A[0][0]; + fy = A[1][1]; + ifx = 1./fx; + ify = 1./fy; + cx = A[0][2]; + cy = A[1][2]; + + for( i = 0; i < n; i++ ) + { + double x, y, x0, y0; + if( stype == CV_32FC2 ) + { + x = srcf[i*sstep].x; + y = srcf[i*sstep].y; + } + else + { + x = srcd[i*sstep].x; + y = srcd[i*sstep].y; + } + + x0 = x = (x - cx)*ifx; + y0 = y = (y - cy)*ify; + + // compensate distortion iteratively + for( j = 0; j < iters; j++ ) + { + double r2 = x*x + y*y; + double icdist = (1 + ((k[7]*r2 + k[6])*r2 + k[5])*r2)/(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2); + double deltaX = 2*k[2]*x*y + k[3]*(r2 + 2*x*x); + double deltaY = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y; + x = (x0 - deltaX)*icdist; + y = (y0 - deltaY)*icdist; + } + + double xx = RR[0][0]*x + RR[0][1]*y + RR[0][2]; + double yy = RR[1][0]*x + RR[1][1]*y + RR[1][2]; + double ww = 1./(RR[2][0]*x + RR[2][1]*y + RR[2][2]); + x = xx*ww; + y = yy*ww; + + if( dtype == CV_32FC2 ) + { + dstf[i*dstep].x = (float)x; + dstf[i*dstep].y = (float)y; + } + else + { + dstd[i*dstep].x = x; + dstd[i*dstep].y = y; + } + } +} + + +void cv::undistortPoints( InputArray _src, OutputArray _dst, + InputArray _cameraMatrix, + InputArray _distCoeffs, + InputArray _Rmat, + InputArray _Pmat ) +{ + Mat src = _src.getMat(), cameraMatrix = _cameraMatrix.getMat(); + Mat distCoeffs = _distCoeffs.getMat(), R = _Rmat.getMat(), P = _Pmat.getMat(); + + CV_Assert( src.isContinuous() && (src.depth() == CV_32F || src.depth() == CV_64F) && + ((src.rows == 1 && src.channels() == 2) || src.cols*src.channels() == 2)); + + _dst.create(src.size(), src.type(), -1, true); + Mat dst = _dst.getMat(); + + CvMat _csrc = src, _cdst = dst, _ccameraMatrix = cameraMatrix; + CvMat matR, matP, _cdistCoeffs, *pR=0, *pP=0, *pD=0; + if( R.data ) + pR = &(matR = R); + if( P.data ) + pP = &(matP = P); + if( distCoeffs.data ) + pD = &(_cdistCoeffs = distCoeffs); + cvUndistortPoints(&_csrc, &_cdst, &_ccameraMatrix, pD, pR, pP); +} + +namespace cv +{ + +static Point2f mapPointSpherical(const Point2f& p, float alpha, Vec4d* J, int projType) +{ + double x = p.x, y = p.y; + double beta = 1 + 2*alpha; + double v = x*x + y*y + 1, iv = 1/v; + double u = sqrt(beta*v + alpha*alpha); + + double k = (u - alpha)*iv; + double kv = (v*beta/u - (u - alpha)*2)*iv*iv; + double kx = kv*x, ky = kv*y; + + if( projType == PROJ_SPHERICAL_ORTHO ) + { + if(J) + *J = Vec4d(kx*x + k, kx*y, ky*x, ky*y + k); + return Point2f((float)(x*k), (float)(y*k)); + } + if( projType == PROJ_SPHERICAL_EQRECT ) + { + // equirectangular + double iR = 1/(alpha + 1); + double x1 = std::max(std::min(x*k*iR, 1.), -1.); + double y1 = std::max(std::min(y*k*iR, 1.), -1.); + + if(J) + { + double fx1 = iR/sqrt(1 - x1*x1); + double fy1 = iR/sqrt(1 - y1*y1); + *J = Vec4d(fx1*(kx*x + k), fx1*ky*x, fy1*kx*y, fy1*(ky*y + k)); + } + return Point2f((float)asin(x1), (float)asin(y1)); + } + CV_Error(CV_StsBadArg, "Unknown projection type"); + return Point2f(); +} + + +static Point2f invMapPointSpherical(Point2f _p, float alpha, int projType) +{ + static int avgiter = 0, avgn = 0; + + double eps = 1e-12; + Vec2d p(_p.x, _p.y), q(_p.x, _p.y), err; + Vec4d J; + int i, maxiter = 5; + + for( i = 0; i < maxiter; i++ ) + { + Point2f p1 = mapPointSpherical(Point2f((float)q[0], (float)q[1]), alpha, &J, projType); + err = Vec2d(p1.x, p1.y) - p; + if( err[0]*err[0] + err[1]*err[1] < eps ) + break; + + Vec4d JtJ(J[0]*J[0] + J[2]*J[2], J[0]*J[1] + J[2]*J[3], + J[0]*J[1] + J[2]*J[3], J[1]*J[1] + J[3]*J[3]); + double d = JtJ[0]*JtJ[3] - JtJ[1]*JtJ[2]; + d = d ? 1./d : 0; + Vec4d iJtJ(JtJ[3]*d, -JtJ[1]*d, -JtJ[2]*d, JtJ[0]*d); + Vec2d JtErr(J[0]*err[0] + J[2]*err[1], J[1]*err[0] + J[3]*err[1]); + + q -= Vec2d(iJtJ[0]*JtErr[0] + iJtJ[1]*JtErr[1], iJtJ[2]*JtErr[0] + iJtJ[3]*JtErr[1]); + //Matx22d J(kx*x + k, kx*y, ky*x, ky*y + k); + //q -= Vec2d((J.t()*J).inv()*(J.t()*err)); + } + + if( i < maxiter ) + { + avgiter += i; + avgn++; + if( avgn == 1500 ) + printf("avg iters = %g\n", (double)avgiter/avgn); + } + + return i < maxiter ? Point2f((float)q[0], (float)q[1]) : Point2f(-FLT_MAX, -FLT_MAX); +} + +} + +float cv::initWideAngleProjMap( InputArray _cameraMatrix0, InputArray _distCoeffs0, + Size imageSize, int destImageWidth, int m1type, + OutputArray _map1, OutputArray _map2, int projType, double _alpha ) +{ + Mat cameraMatrix0 = _cameraMatrix0.getMat(), distCoeffs0 = _distCoeffs0.getMat(); + double k[8] = {0,0,0,0,0,0,0,0}, M[9]={0,0,0,0,0,0,0,0,0}; + Mat distCoeffs(distCoeffs0.rows, distCoeffs0.cols, CV_MAKETYPE(CV_64F,distCoeffs0.channels()), k); + Mat cameraMatrix(3,3,CV_64F,M); + Point2f scenter((float)cameraMatrix.at(0,2), (float)cameraMatrix.at(1,2)); + Point2f dcenter((destImageWidth-1)*0.5f, 0.f); + float xmin = FLT_MAX, xmax = -FLT_MAX, ymin = FLT_MAX, ymax = -FLT_MAX; + int N = 9; + std::vector u(1), v(1); + Mat _u(u), I = Mat::eye(3,3,CV_64F); + float alpha = (float)_alpha; + + int ndcoeffs = distCoeffs0.cols*distCoeffs0.rows*distCoeffs0.channels(); + CV_Assert((distCoeffs0.cols == 1 || distCoeffs0.rows == 1) && + (ndcoeffs == 4 || ndcoeffs == 5 || ndcoeffs == 8)); + CV_Assert(cameraMatrix0.size() == Size(3,3)); + distCoeffs0.convertTo(distCoeffs,CV_64F); + cameraMatrix0.convertTo(cameraMatrix,CV_64F); + + alpha = std::min(alpha, 0.999f); + + for( int i = 0; i < N; i++ ) + for( int j = 0; j < N; j++ ) + { + Point2f p((float)j*imageSize.width/(N-1), (float)i*imageSize.height/(N-1)); + u[0] = p; + undistortPoints(_u, v, cameraMatrix, distCoeffs, I, I); + Point2f q = mapPointSpherical(v[0], alpha, 0, projType); + if( xmin > q.x ) xmin = q.x; + if( xmax < q.x ) xmax = q.x; + if( ymin > q.y ) ymin = q.y; + if( ymax < q.y ) ymax = q.y; + } + + float scale = (float)std::min(dcenter.x/fabs(xmax), dcenter.x/fabs(xmin)); + Size dsize(destImageWidth, cvCeil(std::max(scale*fabs(ymin)*2, scale*fabs(ymax)*2))); + dcenter.y = (dsize.height - 1)*0.5f; + + Mat mapxy(dsize, CV_32FC2); + double k1 = k[0], k2 = k[1], k3 = k[2], p1 = k[3], p2 = k[4], k4 = k[5], k5 = k[6], k6 = k[7]; + double fx = cameraMatrix.at(0,0), fy = cameraMatrix.at(1,1), cx = scenter.x, cy = scenter.y; + + for( int y = 0; y < dsize.height; y++ ) + { + Point2f* mxy = mapxy.ptr(y); + for( int x = 0; x < dsize.width; x++ ) + { + Point2f p = (Point2f((float)x, (float)y) - dcenter)*(1.f/scale); + Point2f q = invMapPointSpherical(p, alpha, projType); + if( q.x <= -FLT_MAX && q.y <= -FLT_MAX ) + { + mxy[x] = Point2f(-1.f, -1.f); + continue; + } + double x2 = q.x*q.x, y2 = q.y*q.y; + double r2 = x2 + y2, _2xy = 2*q.x*q.y; + double kr = 1 + ((k3*r2 + k2)*r2 + k1)*r2/(1 + ((k6*r2 + k5)*r2 + k4)*r2); + double u = fx*(q.x*kr + p1*_2xy + p2*(r2 + 2*x2)) + cx; + double v = fy*(q.y*kr + p1*(r2 + 2*y2) + p2*_2xy) + cy; + + mxy[x] = Point2f((float)u, (float)v); + } + } + + if(m1type == CV_32FC2) + { + _map1.create(mapxy.size(), mapxy.type()); + Mat map1 = _map1.getMat(); + mapxy.copyTo(map1); + _map2.release(); + } + else + convertMaps(mapxy, Mat(), _map1, _map2, m1type, false); + + return scale; +} + +/* End of file */ diff --git a/opencv/imgproc/utils.cpp b/opencv/imgproc/utils.cpp new file mode 100644 index 0000000..8ebcbad --- /dev/null +++ b/opencv/imgproc/utils.cpp @@ -0,0 +1,242 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#include "precomp.hpp" + +CV_IMPL CvSeq* cvPointSeqFromMat( int seq_kind, const CvArr* arr, + CvContour* contour_header, CvSeqBlock* block ) +{ + CV_Assert( arr != 0 && contour_header != 0 && block != 0 ); + + int eltype; + CvMat* mat = (CvMat*)arr; + + if( !CV_IS_MAT( mat )) + CV_Error( CV_StsBadArg, "Input array is not a valid matrix" ); + + eltype = CV_MAT_TYPE( mat->type ); + if( eltype != CV_32SC2 && eltype != CV_32FC2 ) + CV_Error( CV_StsUnsupportedFormat, + "The matrix can not be converted to point sequence because of " + "inappropriate element type" ); + + if( (mat->width != 1 && mat->height != 1) || !CV_IS_MAT_CONT(mat->type)) + CV_Error( CV_StsBadArg, + "The matrix converted to point sequence must be " + "1-dimensional and continuous" ); + + cvMakeSeqHeaderForArray( + (seq_kind & (CV_SEQ_KIND_MASK|CV_SEQ_FLAG_CLOSED)) | eltype, + sizeof(CvContour), CV_ELEM_SIZE(eltype), mat->data.ptr, + mat->width*mat->height, (CvSeq*)contour_header, block ); + + return (CvSeq*)contour_header; +} + +namespace cv +{ + +static void copyMakeBorder_8u( const uchar* src, size_t srcstep, Size srcroi, + uchar* dst, size_t dststep, Size dstroi, + int top, int left, int cn, int borderType ) +{ + const int isz = (int)sizeof(int); + int i, j, k, elemSize = 1; + bool intMode = false; + + if( (cn | srcstep | dststep | (size_t)src | (size_t)dst) % isz == 0 ) + { + cn /= isz; + elemSize = isz; + intMode = true; + } + + AutoBuffer _tab((dstroi.width - srcroi.width)*cn); + int* tab = _tab; + int right = dstroi.width - srcroi.width - left; + int bottom = dstroi.height - srcroi.height - top; + + for( i = 0; i < left; i++ ) + { + j = borderInterpolate(i - left, srcroi.width, borderType)*cn; + for( k = 0; k < cn; k++ ) + tab[i*cn + k] = j + k; + } + + for( i = 0; i < right; i++ ) + { + j = borderInterpolate(srcroi.width + i, srcroi.width, borderType)*cn; + for( k = 0; k < cn; k++ ) + tab[(i+left)*cn + k] = j + k; + } + + srcroi.width *= cn; + dstroi.width *= cn; + left *= cn; + right *= cn; + + uchar* dstInner = dst + dststep*top + left*elemSize; + + for( i = 0; i < srcroi.height; i++, dstInner += dststep, src += srcstep ) + { + if( dstInner != src ) + memcpy(dstInner, src, srcroi.width*elemSize); + + if( intMode ) + { + const int* isrc = (int*)src; + int* idstInner = (int*)dstInner; + for( j = 0; j < left; j++ ) + idstInner[j - left] = isrc[tab[j]]; + for( j = 0; j < right; j++ ) + idstInner[j + srcroi.width] = isrc[tab[j + left]]; + } + else + { + for( j = 0; j < left; j++ ) + dstInner[j - left] = src[tab[j]]; + for( j = 0; j < right; j++ ) + dstInner[j + srcroi.width] = src[tab[j + left]]; + } + } + + dstroi.width *= elemSize; + dst += dststep*top; + + for( i = 0; i < top; i++ ) + { + j = borderInterpolate(i - top, srcroi.height, borderType); + memcpy(dst + (i - top)*dststep, dst + j*dststep, dstroi.width); + } + + for( i = 0; i < bottom; i++ ) + { + j = borderInterpolate(i + srcroi.height, srcroi.height, borderType); + memcpy(dst + (i + srcroi.height)*dststep, dst + j*dststep, dstroi.width); + } +} + + +static void copyMakeConstBorder_8u( const uchar* src, size_t srcstep, Size srcroi, + uchar* dst, size_t dststep, Size dstroi, + int top, int left, int cn, const uchar* value ) +{ + int i, j; + AutoBuffer _constBuf(dstroi.width*cn); + uchar* constBuf = _constBuf; + int right = dstroi.width - srcroi.width - left; + int bottom = dstroi.height - srcroi.height - top; + + for( i = 0; i < dstroi.width; i++ ) + { + for( j = 0; j < cn; j++ ) + constBuf[i*cn + j] = value[j]; + } + + srcroi.width *= cn; + dstroi.width *= cn; + left *= cn; + right *= cn; + + uchar* dstInner = dst + dststep*top + left; + + for( i = 0; i < srcroi.height; i++, dstInner += dststep, src += srcstep ) + { + if( dstInner != src ) + memcpy( dstInner, src, srcroi.width ); + memcpy( dstInner - left, constBuf, left ); + memcpy( dstInner + srcroi.width, constBuf, right ); + } + + dst += dststep*top; + + for( i = 0; i < top; i++ ) + memcpy(dst + (i - top)*dststep, constBuf, dstroi.width); + + for( i = 0; i < bottom; i++ ) + memcpy(dst + (i + srcroi.height)*dststep, constBuf, dstroi.width); +} + +} + +void cv::copyMakeBorder( InputArray _src, OutputArray _dst, int top, int bottom, + int left, int right, int borderType, const Scalar& value ) +{ + Mat src = _src.getMat(); + CV_Assert( top >= 0 && bottom >= 0 && left >= 0 && right >= 0 ); + + _dst.create( src.rows + top + bottom, src.cols + left + right, src.type() ); + Mat dst = _dst.getMat(); + + if( borderType != BORDER_CONSTANT ) + copyMakeBorder_8u( src.data, src.step, src.size(), + dst.data, dst.step, dst.size(), + top, left, (int)src.elemSize(), borderType ); + else + { + int cn = src.channels(), cn1 = cn; + AutoBuffer buf(cn); + if( cn > 4 ) + { + CV_Assert( value[0] == value[1] && value[0] == value[2] && value[0] == value[3] ); + cn1 = 1; + } + scalarToRawData(value, buf, CV_MAKETYPE(src.depth(), cn1), cn); + copyMakeConstBorder_8u( src.data, src.step, src.size(), + dst.data, dst.step, dst.size(), + top, left, (int)src.elemSize(), (uchar*)(double*)buf ); + } +} + + +CV_IMPL void +cvCopyMakeBorder( const CvArr* srcarr, CvArr* dstarr, CvPoint offset, + int borderType, CvScalar value ) +{ + cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr); + int left = offset.x, right = dst.cols - src.cols - left; + int top = offset.y, bottom = dst.rows - src.rows - top; + + CV_Assert( dst.type() == src.type() ); + cv::copyMakeBorder( src, dst, top, bottom, left, right, borderType, value ); +} + +/* End of file. */ diff --git a/opencv2/core/core.hpp b/opencv2/core/core.hpp new file mode 100644 index 0000000..9e1d855 --- /dev/null +++ b/opencv2/core/core.hpp @@ -0,0 +1,4345 @@ +/*! \file core.hpp + \brief The Core Functionality + */ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_HPP__ +#define __OPENCV_CORE_HPP__ + +#include "opencv2/core/types_c.h" +#include "opencv2/core/version.hpp" + +#ifdef __cplusplus + +#ifndef SKIP_INCLUDES +#include +#include +#include +#include +#include +#include +#include +#include +#include +#endif // SKIP_INCLUDES + +/*! \namespace cv + Namespace where all the C++ OpenCV functionality resides +*/ +namespace cv { + +#undef abs +#undef min +#undef max +#undef Complex + +using std::vector; +using std::string; +using std::ptrdiff_t; + +template class CV_EXPORTS Size_; +template class CV_EXPORTS Point_; +template class CV_EXPORTS Rect_; +template class CV_EXPORTS Vec; +template class CV_EXPORTS Matx; + +typedef std::string String; +typedef std::basic_string WString; + +class Mat; +class SparseMat; +typedef Mat MatND; + +class CV_EXPORTS MatExpr; +class CV_EXPORTS MatOp_Base; +class CV_EXPORTS MatArg; +class CV_EXPORTS MatConstIterator; + +template class CV_EXPORTS Mat_; +template class CV_EXPORTS MatIterator_; +template class CV_EXPORTS MatConstIterator_; +template class CV_EXPORTS MatCommaInitializer_; + +CV_EXPORTS string fromUtf16(const WString& str); +CV_EXPORTS WString toUtf16(const string& str); + +CV_EXPORTS string format( const char* fmt, ... ); +CV_EXPORTS string tempfile( const char* suffix CV_DEFAULT(0)); + +// matrix decomposition types +enum { DECOMP_LU=0, DECOMP_SVD=1, DECOMP_EIG=2, DECOMP_CHOLESKY=3, DECOMP_QR=4, DECOMP_NORMAL=16 }; +enum { NORM_INF=1, NORM_L1=2, NORM_L2=4, NORM_TYPE_MASK=7, NORM_RELATIVE=8, NORM_MINMAX=32}; +enum { CMP_EQ=0, CMP_GT=1, CMP_GE=2, CMP_LT=3, CMP_LE=4, CMP_NE=5 }; +enum { GEMM_1_T=1, GEMM_2_T=2, GEMM_3_T=4 }; +enum { DFT_INVERSE=1, DFT_SCALE=2, DFT_ROWS=4, DFT_COMPLEX_OUTPUT=16, DFT_REAL_OUTPUT=32, + DCT_INVERSE = DFT_INVERSE, DCT_ROWS=DFT_ROWS }; + + +/*! + The standard OpenCV exception class. + Instances of the class are thrown by various functions and methods in the case of critical errors. + */ +class CV_EXPORTS Exception : public std::exception +{ +public: + /*! + Default constructor + */ + Exception(); + /*! + Full constructor. Normally the constuctor is not called explicitly. + Instead, the macros CV_Error(), CV_Error_() and CV_Assert() are used. + */ + Exception(int _code, const string& _err, const string& _func, const string& _file, int _line); + virtual ~Exception() throw(); + + /*! + \return the error description and the context as a text string. + */ + virtual const char *what() const throw(); + void formatMessage(); + + string msg; ///< the formatted error message + + int code; ///< error code @see CVStatus + string err; ///< error description + string func; ///< function name. Available only when the compiler supports __func__ macro + string file; ///< source file name where the error has occured + int line; ///< line number in the source file where the error has occured +}; + + +//! Signals an error and raises the exception. + +/*! + By default the function prints information about the error to stderr, + then it either stops if setBreakOnError() had been called before or raises the exception. + It is possible to alternate error processing by using redirectError(). + + \param exc the exception raisen. + */ +CV_EXPORTS void error( const Exception& exc ); + +//! Sets/resets the break-on-error mode. + +/*! + When the break-on-error mode is set, the default error handler + issues a hardware exception, which can make debugging more convenient. + + \return the previous state + */ +CV_EXPORTS bool setBreakOnError(bool flag); + +typedef int (CV_CDECL *ErrorCallback)( int status, const char* func_name, + const char* err_msg, const char* file_name, + int line, void* userdata ); + +//! Sets the new error handler and the optional user data. + +/*! + The function sets the new error handler, called from cv::error(). + + \param errCallback the new error handler. If NULL, the default error handler is used. + \param userdata the optional user data pointer, passed to the callback. + \param prevUserdata the optional output parameter where the previous user data pointer is stored + + \return the previous error handler +*/ +CV_EXPORTS ErrorCallback redirectError( ErrorCallback errCallback, + void* userdata=0, void** prevUserdata=0); + +#ifdef __GNUC__ +#define CV_Error( code, msg ) cv::error( cv::Exception(code, msg, __func__, __FILE__, __LINE__) ) +#define CV_Error_( code, args ) cv::error( cv::Exception(code, cv::format args, __func__, __FILE__, __LINE__) ) +#define CV_Assert( expr ) if((expr)) ; else cv::error( cv::Exception(CV_StsAssert, #expr, __func__, __FILE__, __LINE__) ) +#else +#define CV_Error( code, msg ) cv::error( cv::Exception(code, msg, "", __FILE__, __LINE__) ) +#define CV_Error_( code, args ) cv::error( cv::Exception(code, cv::format args, "", __FILE__, __LINE__) ) +#define CV_Assert( expr ) if((expr)) ; else cv::error( cv::Exception(CV_StsAssert, #expr, "", __FILE__, __LINE__) ) +#endif + +#ifdef _DEBUG +#define CV_DbgAssert(expr) CV_Assert(expr) +#else +#define CV_DbgAssert(expr) +#endif + +CV_EXPORTS void setNumThreads(int nthreads); +CV_EXPORTS int getNumThreads(); +CV_EXPORTS int getThreadNum(); + +//! Returns the number of ticks. + +/*! + The function returns the number of ticks since the certain event (e.g. when the machine was turned on). + It can be used to initialize cv::RNG or to measure a function execution time by reading the tick count + before and after the function call. The granularity of ticks depends on the hardware and OS used. Use + cv::getTickFrequency() to convert ticks to seconds. +*/ +CV_EXPORTS_W int64 getTickCount(); + +/*! + Returns the number of ticks per seconds. + + The function returns the number of ticks (as returned by cv::getTickCount()) per second. + The following code computes the execution time in milliseconds: + + \code + double exec_time = (double)getTickCount(); + // do something ... + exec_time = ((double)getTickCount() - exec_time)*1000./getTickFrequency(); + \endcode +*/ +CV_EXPORTS_W double getTickFrequency(); + +/*! + Returns the number of CPU ticks. + + On platforms where the feature is available, the function returns the number of CPU ticks + since the certain event (normally, the system power-on moment). Using this function + one can accurately measure the execution time of very small code fragments, + for which cv::getTickCount() granularity is not enough. +*/ +CV_EXPORTS_W int64 getCPUTickCount(); + +/*! + Returns SSE etc. support status + + The function returns true if certain hardware features are available. + Currently, the following features are recognized: + - CV_CPU_MMX - MMX + - CV_CPU_SSE - SSE + - CV_CPU_SSE2 - SSE 2 + - CV_CPU_SSE3 - SSE 3 + - CV_CPU_SSSE3 - SSSE 3 + - CV_CPU_SSE4_1 - SSE 4.1 + - CV_CPU_SSE4_2 - SSE 4.2 + - CV_CPU_POPCNT - POPCOUNT + - CV_CPU_AVX - AVX + + \note {Note that the function output is not static. Once you called cv::useOptimized(false), + most of the hardware acceleration is disabled and thus the function will returns false, + until you call cv::useOptimized(true)} +*/ +CV_EXPORTS_W bool checkHardwareSupport(int feature); + +//! returns the number of CPUs (including hyper-threading) +CV_EXPORTS_W int getNumberOfCPUs(); + +/*! + Allocates memory buffer + + This is specialized OpenCV memory allocation function that returns properly aligned memory buffers. + The usage is identical to malloc(). The allocated buffers must be freed with cv::fastFree(). + If there is not enough memory, the function calls cv::error(), which raises an exception. + + \param bufSize buffer size in bytes + \return the allocated memory buffer. +*/ +CV_EXPORTS void* fastMalloc(size_t bufSize); + +/*! + Frees the memory allocated with cv::fastMalloc + + This is the corresponding deallocation function for cv::fastMalloc(). + When ptr==NULL, the function has no effect. +*/ +CV_EXPORTS void fastFree(void* ptr); + +template static inline _Tp* allocate(size_t n) +{ + return new _Tp[n]; +} + +template static inline void deallocate(_Tp* ptr, size_t) +{ + delete[] ptr; +} + +/*! + Aligns pointer by the certain number of bytes + + This small inline function aligns the pointer by the certian number of bytes by shifting + it forward by 0 or a positive offset. +*/ +template static inline _Tp* alignPtr(_Tp* ptr, int n=(int)sizeof(_Tp)) +{ + return (_Tp*)(((size_t)ptr + n-1) & -n); +} + +/*! + Aligns buffer size by the certain number of bytes + + This small inline function aligns a buffer size by the certian number of bytes by enlarging it. +*/ +static inline size_t alignSize(size_t sz, int n) +{ + return (sz + n-1) & -n; +} + +/*! + Turns on/off available optimization + + The function turns on or off the optimized code in OpenCV. Some optimization can not be enabled + or disabled, but, for example, most of SSE code in OpenCV can be temporarily turned on or off this way. + + \note{Since optimization may imply using special data structures, it may be unsafe + to call this function anywhere in the code. Instead, call it somewhere at the top level.} +*/ +CV_EXPORTS_W void setUseOptimized(bool onoff); + +/*! + Returns the current optimization status + + The function returns the current optimization status, which is controlled by cv::setUseOptimized(). +*/ +CV_EXPORTS_W bool useOptimized(); + +/*! + The STL-compilant memory Allocator based on cv::fastMalloc() and cv::fastFree() +*/ +template class CV_EXPORTS Allocator +{ +public: + typedef _Tp value_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef size_t size_type; + typedef ptrdiff_t difference_type; + template class rebind { typedef Allocator other; }; + + explicit Allocator() {} + ~Allocator() {} + explicit Allocator(Allocator const&) {} + template + explicit Allocator(Allocator const&) {} + + // address + pointer address(reference r) { return &r; } + const_pointer address(const_reference r) { return &r; } + + pointer allocate(size_type count, const void* =0) + { return reinterpret_cast(fastMalloc(count * sizeof (_Tp))); } + + void deallocate(pointer p, size_type) {fastFree(p); } + + size_type max_size() const + { return max(static_cast<_Tp>(-1)/sizeof(_Tp), 1); } + + void construct(pointer p, const _Tp& v) { new(static_cast(p)) _Tp(v); } + void destroy(pointer p) { p->~_Tp(); } +}; + +/////////////////////// Vec (used as element of multi-channel images ///////////////////// + +/*! + A helper class for cv::DataType + + The class is specialized for each fundamental numerical data type supported by OpenCV. + It provides DataDepth::value constant. +*/ +template class CV_EXPORTS DataDepth {}; + +template<> class DataDepth { public: enum { value = CV_8U, fmt=(int)'u' }; }; +template<> class DataDepth { public: enum { value = CV_8U, fmt=(int)'u' }; }; +template<> class DataDepth { public: enum { value = CV_8S, fmt=(int)'c' }; }; +template<> class DataDepth { public: enum { value = CV_8S, fmt=(int)'c' }; }; +template<> class DataDepth { public: enum { value = CV_16U, fmt=(int)'w' }; }; +template<> class DataDepth { public: enum { value = CV_16S, fmt=(int)'s' }; }; +template<> class DataDepth { public: enum { value = CV_32S, fmt=(int)'i' }; }; +// this is temporary solution to support 32-bit unsigned integers +template<> class DataDepth { public: enum { value = CV_32S, fmt=(int)'i' }; }; +template<> class DataDepth { public: enum { value = CV_32F, fmt=(int)'f' }; }; +template<> class DataDepth { public: enum { value = CV_64F, fmt=(int)'d' }; }; +template class DataDepth<_Tp*> { public: enum { value = CV_USRTYPE1, fmt=(int)'r' }; }; + + +////////////////////////////// Small Matrix /////////////////////////// + +/*! + A short numerical vector. + + This template class represents short numerical vectors (of 1, 2, 3, 4 ... elements) + on which you can perform basic arithmetical operations, access individual elements using [] operator etc. + The vectors are allocated on stack, as opposite to std::valarray, std::vector, cv::Mat etc., + which elements are dynamically allocated in the heap. + + The template takes 2 parameters: + -# _Tp element type + -# cn the number of elements + + In addition to the universal notation like Vec, you can use shorter aliases + for the most popular specialized variants of Vec, e.g. Vec3f ~ Vec. + */ + +struct CV_EXPORTS Matx_AddOp {}; +struct CV_EXPORTS Matx_SubOp {}; +struct CV_EXPORTS Matx_ScaleOp {}; +struct CV_EXPORTS Matx_MulOp {}; +struct CV_EXPORTS Matx_MatMulOp {}; +struct CV_EXPORTS Matx_TOp {}; + +template class CV_EXPORTS Matx +{ +public: + typedef _Tp value_type; + typedef Matx<_Tp, MIN(m, n), 1> diag_type; + typedef Matx<_Tp, m, n> mat_type; + enum { depth = DataDepth<_Tp>::value, rows = m, cols = n, channels = rows*cols, + type = CV_MAKETYPE(depth, channels) }; + + //! default constructor + Matx(); + + Matx(_Tp v0); //!< 1x1 matrix + Matx(_Tp v0, _Tp v1); //!< 1x2 or 2x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2); //!< 1x3 or 3x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 1x4, 2x2 or 4x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 1x5 or 5x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 1x6, 2x3, 3x2 or 6x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 1x7 or 7x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 1x8, 2x4, 4x2 or 8x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 1x9, 3x3 or 9x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 1x10, 2x5 or 5x2 or 10x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11); //!< 1x12, 2x6, 3x4, 4x3, 6x2 or 12x1 matrix + Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11, + _Tp v12, _Tp v13, _Tp v14, _Tp v15); //!< 1x16, 4x4 or 16x1 matrix + explicit Matx(const _Tp* vals); //!< initialize from a plain array + + static Matx all(_Tp alpha); + static Matx zeros(); + static Matx ones(); + static Matx eye(); + static Matx diag(const diag_type& d); + static Matx randu(_Tp a, _Tp b); + static Matx randn(_Tp a, _Tp b); + + //! dot product computed with the default precision + _Tp dot(const Matx<_Tp, m, n>& v) const; + + //! dot product computed in double-precision arithmetics + double ddot(const Matx<_Tp, m, n>& v) const; + + //! convertion to another data type + template operator Matx() const; + + //! change the matrix shape + template Matx<_Tp, m1, n1> reshape() const; + + //! extract part of the matrix + template Matx<_Tp, m1, n1> get_minor(int i, int j) const; + + //! extract the matrix row + Matx<_Tp, 1, n> row(int i) const; + + //! extract the matrix column + Matx<_Tp, m, 1> col(int i) const; + + //! extract the matrix diagonal + Matx<_Tp, MIN(m,n), 1> diag() const; + + //! transpose the matrix + Matx<_Tp, n, m> t() const; + + //! invert matrix the matrix + Matx<_Tp, n, m> inv(int method=DECOMP_LU) const; + + //! solve linear system + template Matx<_Tp, n, l> solve(const Matx<_Tp, m, l>& rhs, int flags=DECOMP_LU) const; + Matx<_Tp, n, 1> solve(const Matx<_Tp, m, 1>& rhs, int method) const; + + //! multiply two matrices element-wise + Matx<_Tp, m, n> mul(const Matx<_Tp, m, n>& a) const; + + //! element access + const _Tp& operator ()(int i, int j) const; + _Tp& operator ()(int i, int j); + + //! 1D element access + const _Tp& operator ()(int i) const; + _Tp& operator ()(int i); + + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp); + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp); + template Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp); + Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp); + template Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp); + Matx(const Matx<_Tp, n, m>& a, Matx_TOp); + + _Tp val[m*n]; //< matrix elements +}; + + +typedef Matx Matx12f; +typedef Matx Matx12d; +typedef Matx Matx13f; +typedef Matx Matx13d; +typedef Matx Matx14f; +typedef Matx Matx14d; +typedef Matx Matx16f; +typedef Matx Matx16d; + +typedef Matx Matx21f; +typedef Matx Matx21d; +typedef Matx Matx31f; +typedef Matx Matx31d; +typedef Matx Matx41f; +typedef Matx Matx41d; +typedef Matx Matx61f; +typedef Matx Matx61d; + +typedef Matx Matx22f; +typedef Matx Matx22d; +typedef Matx Matx23f; +typedef Matx Matx23d; +typedef Matx Matx32f; +typedef Matx Matx32d; + +typedef Matx Matx33f; +typedef Matx Matx33d; + +typedef Matx Matx34f; +typedef Matx Matx34d; +typedef Matx Matx43f; +typedef Matx Matx43d; + +typedef Matx Matx44f; +typedef Matx Matx44d; +typedef Matx Matx66f; +typedef Matx Matx66d; + + +/*! + A short numerical vector. + + This template class represents short numerical vectors (of 1, 2, 3, 4 ... elements) + on which you can perform basic arithmetical operations, access individual elements using [] operator etc. + The vectors are allocated on stack, as opposite to std::valarray, std::vector, cv::Mat etc., + which elements are dynamically allocated in the heap. + + The template takes 2 parameters: + -# _Tp element type + -# cn the number of elements + + In addition to the universal notation like Vec, you can use shorter aliases + for the most popular specialized variants of Vec, e.g. Vec3f ~ Vec. +*/ +template class CV_EXPORTS Vec : public Matx<_Tp, cn, 1> +{ +public: + typedef _Tp value_type; + enum { depth = DataDepth<_Tp>::value, channels = cn, type = CV_MAKETYPE(depth, channels) }; + + //! default constructor + Vec(); + + Vec(_Tp v0); //!< 1-element vector constructor + Vec(_Tp v0, _Tp v1); //!< 2-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2); //!< 3-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3); //!< 4-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4); //!< 5-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5); //!< 6-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6); //!< 7-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7); //!< 8-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8); //!< 9-element vector constructor + Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5, _Tp v6, _Tp v7, _Tp v8, _Tp v9); //!< 10-element vector constructor + explicit Vec(const _Tp* values); + + Vec(const Vec<_Tp, cn>& v); + Vec<_Tp, cn>& operator =(const Matx<_Tp, cn, 1>& m); + + static Vec all(_Tp alpha); + + //! per-element multiplication + Vec mul(const Vec<_Tp, cn>& v) const; + + /*! + cross product of the two 3D vectors. + + For other dimensionalities the exception is raised + */ + Vec cross(const Vec& v) const; + //! convertion to another data type + template operator Vec() const; + //! conversion to 4-element CvScalar. + operator CvScalar() const; + + /*! element access */ + const _Tp& operator [](int i) const; + _Tp& operator[](int i); + const _Tp& operator ()(int i) const; + _Tp& operator ()(int i); + + Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp); + Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp); + template Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp); +}; + + +/* \typedef + + Shorter aliases for the most popular specializations of Vec +*/ +typedef Vec Vec2b; +typedef Vec Vec3b; +typedef Vec Vec4b; + +typedef Vec Vec2s; +typedef Vec Vec3s; +typedef Vec Vec4s; + +typedef Vec Vec2w; +typedef Vec Vec3w; +typedef Vec Vec4w; + +typedef Vec Vec2i; +typedef Vec Vec3i; +typedef Vec Vec4i; +typedef Vec Vec6i; +typedef Vec Vec8i; + +typedef Vec Vec2f; +typedef Vec Vec3f; +typedef Vec Vec4f; +typedef Vec Vec6f; + +typedef Vec Vec2d; +typedef Vec Vec3d; +typedef Vec Vec4d; +typedef Vec Vec6d; + + +//////////////////////////////// Complex ////////////////////////////// + +/*! + A complex number class. + + The template class is similar and compatible with std::complex, however it provides slightly + more convenient access to the real and imaginary parts using through the simple field access, as opposite + to std::complex::real() and std::complex::imag(). +*/ +template class CV_EXPORTS Complex +{ +public: + + //! constructors + Complex(); + Complex( _Tp _re, _Tp _im=0 ); + Complex( const std::complex<_Tp>& c ); + + //! conversion to another data type + template operator Complex() const; + //! conjugation + Complex conj() const; + //! conversion to std::complex + operator std::complex<_Tp>() const; + + _Tp re, im; //< the real and the imaginary parts +}; + + +/*! + \typedef +*/ +typedef Complex Complexf; +typedef Complex Complexd; + + +//////////////////////////////// Point_ //////////////////////////////// + +/*! + template 2D point class. + + The class defines a point in 2D space. Data type of the point coordinates is specified + as a template parameter. There are a few shorter aliases available for user convenience. + See cv::Point, cv::Point2i, cv::Point2f and cv::Point2d. +*/ +template class CV_EXPORTS Point_ +{ +public: + typedef _Tp value_type; + + // various constructors + Point_(); + Point_(_Tp _x, _Tp _y); + Point_(const Point_& pt); + Point_(const CvPoint& pt); + Point_(const CvPoint2D32f& pt); + Point_(const Size_<_Tp>& sz); + Point_(const Vec<_Tp, 2>& v); + + Point_& operator = (const Point_& pt); + //! conversion to another data type + template operator Point_<_Tp2>() const; + + //! conversion to the old-style C structures + operator CvPoint() const; + operator CvPoint2D32f() const; + operator Vec<_Tp, 2>() const; + + //! dot product + _Tp dot(const Point_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point_& pt) const; + //! checks whether the point is inside the specified rectangle + bool inside(const Rect_<_Tp>& r) const; + + _Tp x, y; //< the point coordinates +}; + +/*! + template 3D point class. + + The class defines a point in 3D space. Data type of the point coordinates is specified + as a template parameter. + + \see cv::Point3i, cv::Point3f and cv::Point3d +*/ +template class CV_EXPORTS Point3_ +{ +public: + typedef _Tp value_type; + + // various constructors + Point3_(); + Point3_(_Tp _x, _Tp _y, _Tp _z); + Point3_(const Point3_& pt); + explicit Point3_(const Point_<_Tp>& pt); + Point3_(const CvPoint3D32f& pt); + Point3_(const Vec<_Tp, 3>& v); + + Point3_& operator = (const Point3_& pt); + //! conversion to another data type + template operator Point3_<_Tp2>() const; + //! conversion to the old-style CvPoint... + operator CvPoint3D32f() const; + //! conversion to cv::Vec<> + operator Vec<_Tp, 3>() const; + + //! dot product + _Tp dot(const Point3_& pt) const; + //! dot product computed in double-precision arithmetics + double ddot(const Point3_& pt) const; + //! cross product of the 2 3D points + Point3_ cross(const Point3_& pt) const; + + _Tp x, y, z; //< the point coordinates +}; + +//////////////////////////////// Size_ //////////////////////////////// + +/*! + The 2D size class + + The class represents the size of a 2D rectangle, image size, matrix size etc. + Normally, cv::Size ~ cv::Size_ is used. +*/ +template class CV_EXPORTS Size_ +{ +public: + typedef _Tp value_type; + + //! various constructors + Size_(); + Size_(_Tp _width, _Tp _height); + Size_(const Size_& sz); + Size_(const CvSize& sz); + Size_(const CvSize2D32f& sz); + Size_(const Point_<_Tp>& pt); + + Size_& operator = (const Size_& sz); + //! the area (width*height) + _Tp area() const; + + //! conversion of another data type. + template operator Size_<_Tp2>() const; + + //! conversion to the old-style OpenCV types + operator CvSize() const; + operator CvSize2D32f() const; + + _Tp width, height; // the width and the height +}; + +//////////////////////////////// Rect_ //////////////////////////////// + +/*! + The 2D up-right rectangle class + + The class represents a 2D rectangle with coordinates of the specified data type. + Normally, cv::Rect ~ cv::Rect_ is used. +*/ +template class CV_EXPORTS Rect_ +{ +public: + typedef _Tp value_type; + + //! various constructors + Rect_(); + Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height); + Rect_(const Rect_& r); + Rect_(const CvRect& r); + Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz); + Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2); + + Rect_& operator = ( const Rect_& r ); + //! the top-left corner + Point_<_Tp> tl() const; + //! the bottom-right corner + Point_<_Tp> br() const; + + //! size (width, height) of the rectangle + Size_<_Tp> size() const; + //! area (width*height) of the rectangle + _Tp area() const; + + //! conversion to another data type + template operator Rect_<_Tp2>() const; + //! conversion to the old-style CvRect + operator CvRect() const; + + //! checks whether the rectangle contains the point + bool contains(const Point_<_Tp>& pt) const; + + _Tp x, y, width, height; //< the top-left corner, as well as width and height of the rectangle +}; + + +/*! + \typedef + + shorter aliases for the most popular cv::Point_<>, cv::Size_<> and cv::Rect_<> specializations +*/ +typedef Point_ Point2i; +typedef Point2i Point; +typedef Size_ Size2i; +typedef Size2i Size; +typedef Rect_ Rect; +typedef Point_ Point2f; +typedef Point_ Point2d; +typedef Size_ Size2f; +typedef Point3_ Point3i; +typedef Point3_ Point3f; +typedef Point3_ Point3d; + + +/*! + The rotated 2D rectangle. + + The class represents rotated (i.e. not up-right) rectangles on a plane. + Each rectangle is described by the center point (mass center), length of each side + (represented by cv::Size2f structure) and the rotation angle in degrees. +*/ +class CV_EXPORTS RotatedRect +{ +public: + //! various constructors + RotatedRect(); + RotatedRect(const Point2f& _center, const Size2f& _size, float _angle); + RotatedRect(const CvBox2D& box); + + //! returns 4 vertices of the rectangle + void points(Point2f pts[]) const; + //! returns the minimal up-right rectangle containing the rotated rectangle + Rect boundingRect() const; + //! conversion to the old-style CvBox2D structure + operator CvBox2D() const; + + Point2f center; //< the rectangle mass center + Size2f size; //< width and height of the rectangle + float angle; //< the rotation angle. When the angle is 0, 90, 180, 270 etc., the rectangle becomes an up-right rectangle. +}; + +//////////////////////////////// Scalar_ /////////////////////////////// + +/*! + The template scalar class. + + This is partially specialized cv::Vec class with the number of elements = 4, i.e. a short vector of four elements. + Normally, cv::Scalar ~ cv::Scalar_ is used. +*/ +template class CV_EXPORTS Scalar_ : public Vec<_Tp, 4> +{ +public: + //! various constructors + Scalar_(); + Scalar_(_Tp v0, _Tp v1, _Tp v2=0, _Tp v3=0); + Scalar_(const CvScalar& s); + Scalar_(_Tp v0); + + //! returns a scalar with all elements set to v0 + static Scalar_<_Tp> all(_Tp v0); + //! conversion to the old-style CvScalar + operator CvScalar() const; + + //! conversion to another data type + template operator Scalar_() const; + + //! per-element product + Scalar_<_Tp> mul(const Scalar_<_Tp>& t, double scale=1 ) const; + + // returns (v0, -v1, -v2, -v3) + Scalar_<_Tp> conj() const; + + // returns true iff v1 == v2 == v3 == 0 + bool isReal() const; +}; + +typedef Scalar_ Scalar; + +CV_EXPORTS void scalarToRawData(const Scalar& s, void* buf, int type, int unroll_to=0); + +//////////////////////////////// Range ///////////////////////////////// + +/*! + The 2D range class + + This is the class used to specify a continuous subsequence, i.e. part of a contour, or a column span in a matrix. +*/ +class CV_EXPORTS Range +{ +public: + Range(); + Range(int _start, int _end); + Range(const CvSlice& slice); + int size() const; + bool empty() const; + static Range all(); + operator CvSlice() const; + + int start, end; +}; + +/////////////////////////////// DataType //////////////////////////////// + +/*! + Informative template class for OpenCV "scalars". + + The class is specialized for each primitive numerical type supported by OpenCV (such as unsigned char or float), + as well as for more complex types, like cv::Complex<>, std::complex<>, cv::Vec<> etc. + The common property of all such types (called "scalars", do not confuse it with cv::Scalar_) + is that each of them is basically a tuple of numbers of the same type. Each "scalar" can be represented + by the depth id (CV_8U ... CV_64F) and the number of channels. + OpenCV matrices, 2D or nD, dense or sparse, can store "scalars", + as long as the number of channels does not exceed CV_CN_MAX. +*/ +template class DataType +{ +public: + typedef _Tp value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + + enum { generic_type = 1, depth = -1, channels = 1, fmt=0, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef bool value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef uchar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef schar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef schar value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef ushort value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef short value_type; + typedef int work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef int value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef float value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template<> class DataType +{ +public: + typedef double value_type; + typedef value_type work_type; + typedef value_type channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 1, + fmt=DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template class DataType > +{ +public: + typedef Vec<_Tp, cn> value_type; + typedef Vec::work_type, cn> work_type; + typedef _Tp channel_type; + typedef value_type vec_type; + enum { generic_type = 0, depth = DataDepth::value, channels = cn, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; +}; + +template class DataType > +{ +public: + typedef std::complex<_Tp> value_type; + typedef value_type work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Complex<_Tp> value_type; + typedef value_type work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Point_<_Tp> value_type; + typedef Point_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Point3_<_Tp> value_type; + typedef Point3_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 3, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Size_<_Tp> value_type; + typedef Size_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Rect_<_Tp> value_type; + typedef Rect_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 4, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template class DataType > +{ +public: + typedef Scalar_<_Tp> value_type; + typedef Scalar_::work_type> work_type; + typedef _Tp channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 4, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + +template<> class DataType +{ +public: + typedef Range value_type; + typedef value_type work_type; + typedef int channel_type; + enum { generic_type = 0, depth = DataDepth::value, channels = 2, + fmt = ((channels-1)<<8) + DataDepth::fmt, + type = CV_MAKETYPE(depth, channels) }; + typedef Vec vec_type; +}; + + +//////////////////// generic_type ref-counting pointer class for C/C++ objects //////////////////////// + +/*! + Smart pointer to dynamically allocated objects. + + This is template pointer-wrapping class that stores the associated reference counter along with the + object pointer. The class is similar to std::smart_ptr<> from the recent addons to the C++ standard, + but is shorter to write :) and self-contained (i.e. does add any dependency on the compiler or an external library). + + Basically, you can use "Ptr ptr" (or faster "const Ptr& ptr" for read-only access) + everywhere instead of "MyObjectType* ptr", where MyObjectType is some C structure or a C++ class. + To make it all work, you need to specialize Ptr<>::delete_obj(), like: + + \code + template<> void Ptr::delete_obj() { call_destructor_func(obj); } + \endcode + + \note{if MyObjectType is a C++ class with a destructor, you do not need to specialize delete_obj(), + since the default implementation calls "delete obj;"} + + \note{Another good property of the class is that the operations on the reference counter are atomic, + i.e. it is safe to use the class in multi-threaded applications} +*/ +template class CV_EXPORTS Ptr +{ +public: + //! empty constructor + Ptr(); + //! take ownership of the pointer. The associated reference counter is allocated and set to 1 + Ptr(_Tp* _obj); + //! calls release() + ~Ptr(); + //! copy constructor. Copies the members and calls addref() + Ptr(const Ptr& ptr); + //! copy operator. Calls ptr.addref() and release() before copying the members + Ptr& operator = (const Ptr& ptr); + //! increments the reference counter + void addref(); + //! decrements the reference counter. If it reaches 0, delete_obj() is called + void release(); + //! deletes the object. Override if needed + void delete_obj(); + //! returns true iff obj==NULL + bool empty() const; + + + //! helper operators making "Ptr ptr" use very similar to "T* ptr". + _Tp* operator -> (); + const _Tp* operator -> () const; + + operator _Tp* (); + operator const _Tp*() const; + +protected: + _Tp* obj; //< the object pointer. + int* refcount; //< the associated reference counter +}; + + +//////////////////////// Input/Output Array Arguments ///////////////////////////////// + +/*! + Proxy datatype for passing Mat's and vector<>'s as input parameters + */ +class CV_EXPORTS _InputArray +{ +public: + enum { KIND_SHIFT=16, NONE=0< _InputArray(const vector<_Tp>& vec); + template _InputArray(const vector >& vec); + _InputArray(const vector& vec); + template _InputArray(const Matx<_Tp, m, n>& matx); + _InputArray(const Scalar& s); + _InputArray(const double& val); + virtual Mat getMat(int i=-1) const; + virtual void getMatVector(vector& mv) const; + virtual int kind() const; + virtual Size size(int i=-1) const; + virtual size_t total(int i=-1) const; + virtual int type(int i=-1) const; + virtual int depth(int i=-1) const; + virtual int channels(int i=-1) const; + virtual bool empty() const; + + int flags; + void* obj; + Size sz; +}; + + +enum +{ + DEPTH_MASK_8U = 1 << CV_8U, + DEPTH_MASK_8S = 1 << CV_8S, + DEPTH_MASK_16U = 1 << CV_16U, + DEPTH_MASK_16S = 1 << CV_16S, + DEPTH_MASK_32S = 1 << CV_32S, + DEPTH_MASK_32F = 1 << CV_32F, + DEPTH_MASK_64F = 1 << CV_64F, + DEPTH_MASK_ALL = (DEPTH_MASK_64F<<1)-1, + DEPTH_MASK_ALL_BUT_8S = DEPTH_MASK_ALL & ~DEPTH_MASK_8S, + DEPTH_MASK_FLT = DEPTH_MASK_32F + DEPTH_MASK_64F +}; + + +/*! + Proxy datatype for passing Mat's and vector<>'s as input parameters + */ +class CV_EXPORTS _OutputArray : public _InputArray +{ +public: + _OutputArray(); + _OutputArray(Mat& m); + template _OutputArray(vector<_Tp>& vec); + template _OutputArray(vector >& vec); + _OutputArray(vector& vec); + template _OutputArray(Matx<_Tp, m, n>& matx); + virtual bool fixedSize() const; + virtual bool fixedType() const; + virtual bool needed() const; + virtual Mat& getMatRef(int i=-1) const; + virtual void create(Size sz, int type, int i=-1, bool allocateVector=false, int fixedDepthMask=0) const; + virtual void create(int rows, int cols, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + virtual void create(int dims, const int* size, int type, int i=-1, bool allowTransposed=false, int fixedDepthMask=0) const; + virtual void release() const; + virtual void clear() const; +}; + +typedef const _InputArray& InputArray; +typedef InputArray InputArrayOfArrays; +typedef const _OutputArray& OutputArray; +typedef OutputArray OutputArrayOfArrays; +typedef OutputArray InputOutputArray; +typedef OutputArray InputOutputArrayOfArrays; + +CV_EXPORTS OutputArray noArray(); + +/////////////////////////////////////// Mat /////////////////////////////////////////// + +enum { MAGIC_MASK=0xFFFF0000, TYPE_MASK=0x00000FFF, DEPTH_MASK=7 }; + +static inline size_t getElemSize(int type) { return CV_ELEM_SIZE(type); } + +/*! + Custom array allocator + +*/ +class CV_EXPORTS MatAllocator +{ +public: + MatAllocator() {} + virtual ~MatAllocator() {} + virtual void allocate(int dims, const int* sizes, int type, int*& refcount, + uchar*& datastart, uchar*& data, size_t* step) = 0; + virtual void deallocate(int* refcount, uchar* datastart, uchar* data) = 0; +}; + +/*! + The n-dimensional matrix class. + + The class represents an n-dimensional dense numerical array that can act as + a matrix, image, optical flow map, 3-focal tensor etc. + It is very similar to CvMat and CvMatND types from earlier versions of OpenCV, + and similarly to those types, the matrix can be multi-channel. It also fully supports ROI mechanism. + + There are many different ways to create cv::Mat object. Here are the some popular ones: +
    +
  • using cv::Mat::create(nrows, ncols, type) method or + the similar constructor cv::Mat::Mat(nrows, ncols, type[, fill_value]) constructor. + A new matrix of the specified size and specifed type will be allocated. + "type" has the same meaning as in cvCreateMat function, + e.g. CV_8UC1 means 8-bit single-channel matrix, CV_32FC2 means 2-channel (i.e. complex) + floating-point matrix etc: + + \code + // make 7x7 complex matrix filled with 1+3j. + cv::Mat M(7,7,CV_32FC2,Scalar(1,3)); + // and now turn M to 100x60 15-channel 8-bit matrix. + // The old content will be deallocated + M.create(100,60,CV_8UC(15)); + \endcode + + As noted in the introduction of this chapter, Mat::create() + will only allocate a new matrix when the current matrix dimensionality + or type are different from the specified. + +
  • by using a copy constructor or assignment operator, where on the right side it can + be a matrix or expression, see below. Again, as noted in the introduction, + matrix assignment is O(1) operation because it only copies the header + and increases the reference counter. cv::Mat::clone() method can be used to get a full + (a.k.a. deep) copy of the matrix when you need it. + +
  • by constructing a header for a part of another matrix. It can be a single row, single column, + several rows, several columns, rectangular region in the matrix (called a minor in algebra) or + a diagonal. Such operations are also O(1), because the new header will reference the same data. + You can actually modify a part of the matrix using this feature, e.g. + + \code + // add 5-th row, multiplied by 3 to the 3rd row + M.row(3) = M.row(3) + M.row(5)*3; + + // now copy 7-th column to the 1-st column + // M.col(1) = M.col(7); // this will not work + Mat M1 = M.col(1); + M.col(7).copyTo(M1); + + // create new 320x240 image + cv::Mat img(Size(320,240),CV_8UC3); + // select a roi + cv::Mat roi(img, Rect(10,10,100,100)); + // fill the ROI with (0,255,0) (which is green in RGB space); + // the original 320x240 image will be modified + roi = Scalar(0,255,0); + \endcode + + Thanks to the additional cv::Mat::datastart and cv::Mat::dataend members, it is possible to + compute the relative sub-matrix position in the main "container" matrix using cv::Mat::locateROI(): + + \code + Mat A = Mat::eye(10, 10, CV_32S); + // extracts A columns, 1 (inclusive) to 3 (exclusive). + Mat B = A(Range::all(), Range(1, 3)); + // extracts B rows, 5 (inclusive) to 9 (exclusive). + // that is, C ~ A(Range(5, 9), Range(1, 3)) + Mat C = B(Range(5, 9), Range::all()); + Size size; Point ofs; + C.locateROI(size, ofs); + // size will be (width=10,height=10) and the ofs will be (x=1, y=5) + \endcode + + As in the case of whole matrices, if you need a deep copy, use cv::Mat::clone() method + of the extracted sub-matrices. + +
  • by making a header for user-allocated-data. It can be useful for +
      +
    1. processing "foreign" data using OpenCV (e.g. when you implement + a DirectShow filter or a processing module for gstreamer etc.), e.g. + + \code + void process_video_frame(const unsigned char* pixels, + int width, int height, int step) + { + cv::Mat img(height, width, CV_8UC3, pixels, step); + cv::GaussianBlur(img, img, cv::Size(7,7), 1.5, 1.5); + } + \endcode + +
    2. for quick initialization of small matrices and/or super-fast element access + + \code + double m[3][3] = {{a, b, c}, {d, e, f}, {g, h, i}}; + cv::Mat M = cv::Mat(3, 3, CV_64F, m).inv(); + \endcode +
    + + partial yet very common cases of this "user-allocated data" case are conversions + from CvMat and IplImage to cv::Mat. For this purpose there are special constructors + taking pointers to CvMat or IplImage and the optional + flag indicating whether to copy the data or not. + + Backward conversion from cv::Mat to CvMat or IplImage is provided via cast operators + cv::Mat::operator CvMat() an cv::Mat::operator IplImage(). + The operators do not copy the data. + + + \code + IplImage* img = cvLoadImage("greatwave.jpg", 1); + Mat mtx(img); // convert IplImage* -> cv::Mat + CvMat oldmat = mtx; // convert cv::Mat -> CvMat + CV_Assert(oldmat.cols == img->width && oldmat.rows == img->height && + oldmat.data.ptr == (uchar*)img->imageData && oldmat.step == img->widthStep); + \endcode + +
  • by using MATLAB-style matrix initializers, cv::Mat::zeros(), cv::Mat::ones(), cv::Mat::eye(), e.g.: + + \code + // create a double-precision identity martix and add it to M. + M += Mat::eye(M.rows, M.cols, CV_64F); + \endcode + +
  • by using comma-separated initializer: + + \code + // create 3x3 double-precision identity matrix + Mat M = (Mat_(3,3) << 1, 0, 0, 0, 1, 0, 0, 0, 1); + \endcode + + here we first call constructor of cv::Mat_ class (that we describe further) with the proper matrix, + and then we just put "<<" operator followed by comma-separated values that can be constants, + variables, expressions etc. Also, note the extra parentheses that are needed to avoid compiler errors. + +
+ + Once matrix is created, it will be automatically managed by using reference-counting mechanism + (unless the matrix header is built on top of user-allocated data, + in which case you should handle the data by yourself). + The matrix data will be deallocated when no one points to it; + if you want to release the data pointed by a matrix header before the matrix destructor is called, + use cv::Mat::release(). + + The next important thing to learn about the matrix class is element access. Here is how the matrix is stored. + The elements are stored in row-major order (row by row). The cv::Mat::data member points to the first element of the first row, + cv::Mat::rows contains the number of matrix rows and cv::Mat::cols - the number of matrix columns. There is yet another member, + cv::Mat::step that is used to actually compute address of a matrix element. cv::Mat::step is needed because the matrix can be + a part of another matrix or because there can some padding space in the end of each row for a proper alignment. + + \image html roi.png + + Given these parameters, address of the matrix element M_{ij} is computed as following: + + addr(M_{ij})=M.data + M.step*i + j*M.elemSize() + + if you know the matrix element type, e.g. it is float, then you can use cv::Mat::at() method: + + addr(M_{ij})=&M.at(i,j) + + (where & is used to convert the reference returned by cv::Mat::at() to a pointer). + if you need to process a whole row of matrix, the most efficient way is to get + the pointer to the row first, and then just use plain C operator []: + + \code + // compute sum of positive matrix elements + // (assuming that M is double-precision matrix) + double sum=0; + for(int i = 0; i < M.rows; i++) + { + const double* Mi = M.ptr(i); + for(int j = 0; j < M.cols; j++) + sum += std::max(Mi[j], 0.); + } + \endcode + + Some operations, like the above one, do not actually depend on the matrix shape, + they just process elements of a matrix one by one (or elements from multiple matrices + that are sitting in the same place, e.g. matrix addition). Such operations are called + element-wise and it makes sense to check whether all the input/output matrices are continuous, + i.e. have no gaps in the end of each row, and if yes, process them as a single long row: + + \code + // compute sum of positive matrix elements, optimized variant + double sum=0; + int cols = M.cols, rows = M.rows; + if(M.isContinuous()) + { + cols *= rows; + rows = 1; + } + for(int i = 0; i < rows; i++) + { + const double* Mi = M.ptr(i); + for(int j = 0; j < cols; j++) + sum += std::max(Mi[j], 0.); + } + \endcode + in the case of continuous matrix the outer loop body will be executed just once, + so the overhead will be smaller, which will be especially noticeable in the case of small matrices. + + Finally, there are STL-style iterators that are smart enough to skip gaps between successive rows: + \code + // compute sum of positive matrix elements, iterator-based variant + double sum=0; + MatConstIterator_ it = M.begin(), it_end = M.end(); + for(; it != it_end; ++it) + sum += std::max(*it, 0.); + \endcode + + The matrix iterators are random-access iterators, so they can be passed + to any STL algorithm, including std::sort(). +*/ +class CV_EXPORTS Mat +{ +public: + //! default constructor + Mat(); + //! constructs 2D matrix of the specified size and type + // (_type is CV_8UC1, CV_64FC3, CV_32SC(12) etc.) + Mat(int _rows, int _cols, int _type); + Mat(Size _size, int _type); + //! constucts 2D matrix and fills it with the specified value _s. + Mat(int _rows, int _cols, int _type, const Scalar& _s); + Mat(Size _size, int _type, const Scalar& _s); + + //! constructs n-dimensional matrix + Mat(int _ndims, const int* _sizes, int _type); + Mat(int _ndims, const int* _sizes, int _type, const Scalar& _s); + + //! copy constructor + Mat(const Mat& m); + //! constructor for matrix headers pointing to user-allocated data + Mat(int _rows, int _cols, int _type, void* _data, size_t _step=AUTO_STEP); + Mat(Size _size, int _type, void* _data, size_t _step=AUTO_STEP); + Mat(int _ndims, const int* _sizes, int _type, void* _data, const size_t* _steps=0); + + //! creates a matrix header for a part of the bigger matrix + Mat(const Mat& m, const Range& rowRange, const Range& colRange=Range::all()); + Mat(const Mat& m, const Rect& roi); + Mat(const Mat& m, const Range* ranges); + //! converts old-style CvMat to the new matrix; the data is not copied by default + Mat(const CvMat* m, bool copyData=false); + //! converts old-style CvMatND to the new matrix; the data is not copied by default + Mat(const CvMatND* m, bool copyData=false); + //! converts old-style IplImage to the new matrix; the data is not copied by default + Mat(const IplImage* img, bool copyData=false); + //! builds matrix from std::vector with or without copying the data + template explicit Mat(const vector<_Tp>& vec, bool copyData=false); + //! builds matrix from cv::Vec; the data is copied by default + template explicit Mat(const Vec<_Tp, n>& vec, + bool copyData=true); + //! builds matrix from cv::Matx; the data is copied by default + template explicit Mat(const Matx<_Tp, m, n>& mtx, + bool copyData=true); + //! builds matrix from a 2D point + template explicit Mat(const Point_<_Tp>& pt, bool copyData=true); + //! builds matrix from a 3D point + template explicit Mat(const Point3_<_Tp>& pt, bool copyData=true); + //! builds matrix from comma initializer + template explicit Mat(const MatCommaInitializer_<_Tp>& commaInitializer); + //! destructor - calls release() + ~Mat(); + //! assignment operators + Mat& operator = (const Mat& m); + Mat& operator = (const MatExpr& expr); + + //! returns a new matrix header for the specified row + Mat row(int y) const; + //! returns a new matrix header for the specified column + Mat col(int x) const; + //! ... for the specified row span + Mat rowRange(int startrow, int endrow) const; + Mat rowRange(const Range& r) const; + //! ... for the specified column span + Mat colRange(int startcol, int endcol) const; + Mat colRange(const Range& r) const; + //! ... for the specified diagonal + // (d=0 - the main diagonal, + // >0 - a diagonal from the lower half, + // <0 - a diagonal from the upper half) + Mat diag(int d=0) const; + //! constructs a square diagonal matrix which main diagonal is vector "d" + static Mat diag(const Mat& d); + + //! returns deep copy of the matrix, i.e. the data is copied + Mat clone() const; + //! copies the matrix content to "m". + // It calls m.create(this->size(), this->type()). + void copyTo( OutputArray m ) const; + //! copies those matrix elements to "m" that are marked with non-zero mask elements. + void copyTo( OutputArray m, InputArray mask ) const; + //! converts matrix to another datatype with optional scalng. See cvConvertScale. + void convertTo( OutputArray m, int rtype, double alpha=1, double beta=0 ) const; + + void assignTo( Mat& m, int type=-1 ) const; + + //! sets every matrix element to s + Mat& operator = (const Scalar& s); + //! sets some of the matrix elements to s, according to the mask + Mat& setTo(InputArray value, InputArray mask=noArray()); + //! creates alternative matrix header for the same data, with different + // number of channels and/or different number of rows. see cvReshape. + Mat reshape(int _cn, int _rows=0) const; + Mat reshape(int _cn, int _newndims, const int* _newsz) const; + + //! matrix transposition by means of matrix expressions + MatExpr t() const; + //! matrix inversion by means of matrix expressions + MatExpr inv(int method=DECOMP_LU) const; + //! per-element matrix multiplication by means of matrix expressions + MatExpr mul(InputArray m, double scale=1) const; + + //! computes cross-product of 2 3D vectors + Mat cross(InputArray m) const; + //! computes dot-product + double dot(InputArray m) const; + + //! Matlab-style matrix initialization + static MatExpr zeros(int rows, int cols, int type); + static MatExpr zeros(Size size, int type); + static MatExpr zeros(int ndims, const int* sz, int type); + static MatExpr ones(int rows, int cols, int type); + static MatExpr ones(Size size, int type); + static MatExpr ones(int ndims, const int* sz, int type); + static MatExpr eye(int rows, int cols, int type); + static MatExpr eye(Size size, int type); + + //! allocates new matrix data unless the matrix already has specified size and type. + // previous data is unreferenced if needed. + void create(int _rows, int _cols, int _type); + void create(Size _size, int _type); + void create(int _ndims, const int* _sizes, int _type); + + //! increases the reference counter; use with care to avoid memleaks + void addref(); + //! decreases reference counter; + // deallocates the data when reference counter reaches 0. + void release(); + + //! deallocates the matrix data + void deallocate(); + //! internal use function; properly re-allocates _size, _step arrays + void copySize(const Mat& m); + + //! reserves enough space to fit sz hyper-planes + void reserve(size_t sz); + //! resizes matrix to the specified number of hyper-planes + void resize(size_t sz); + //! resizes matrix to the specified number of hyper-planes; initializes the newly added elements + void resize(size_t sz, const Scalar& s); + //! internal function + void push_back_(const void* elem); + //! adds element to the end of 1d matrix (or possibly multiple elements when _Tp=Mat) + template void push_back(const _Tp& elem); + template void push_back(const Mat_<_Tp>& elem); + void push_back(const Mat& m); + //! removes several hyper-planes from bottom of the matrix + void pop_back(size_t nelems=1); + + //! locates matrix header within a parent matrix. See below + void locateROI( Size& wholeSize, Point& ofs ) const; + //! moves/resizes the current matrix ROI inside the parent matrix. + Mat& adjustROI( int dtop, int dbottom, int dleft, int dright ); + //! extracts a rectangular sub-matrix + // (this is a generalized form of row, rowRange etc.) + Mat operator()( Range rowRange, Range colRange ) const; + Mat operator()( const Rect& roi ) const; + Mat operator()( const Range* ranges ) const; + + //! converts header to CvMat; no data is copied + operator CvMat() const; + //! converts header to CvMatND; no data is copied + operator CvMatND() const; + //! converts header to IplImage; no data is copied + operator IplImage() const; + + template operator vector<_Tp>() const; + template operator Vec<_Tp, n>() const; + template operator Matx<_Tp, m, n>() const; + + //! returns true iff the matrix data is continuous + // (i.e. when there are no gaps between successive rows). + // similar to CV_IS_MAT_CONT(cvmat->type) + bool isContinuous() const; + + //! returns true if the matrix is a submatrix of another matrix + bool isSubmatrix() const; + + //! returns element size in bytes, + // similar to CV_ELEM_SIZE(cvmat->type) + size_t elemSize() const; + //! returns the size of element channel in bytes. + size_t elemSize1() const; + //! returns element type, similar to CV_MAT_TYPE(cvmat->type) + int type() const; + //! returns element type, similar to CV_MAT_DEPTH(cvmat->type) + int depth() const; + //! returns element type, similar to CV_MAT_CN(cvmat->type) + int channels() const; + //! returns step/elemSize1() + size_t step1(int i=0) const; + //! returns true if matrix data is NULL + bool empty() const; + //! returns the total number of matrix elements + size_t total() const; + + //! returns N if the matrix is 1-channel (N x ptdim) or ptdim-channel (1 x N) or (N x 1); negative number otherwise + int checkVector(int elemChannels, int depth=-1, bool requireContinuous=true) const; + + //! returns pointer to i0-th submatrix along the dimension #0 + uchar* ptr(int i0=0); + const uchar* ptr(int i0=0) const; + + //! returns pointer to (i0,i1) submatrix along the dimensions #0 and #1 + uchar* ptr(int i0, int i1); + const uchar* ptr(int i0, int i1) const; + + //! returns pointer to (i0,i1,i3) submatrix along the dimensions #0, #1, #2 + uchar* ptr(int i0, int i1, int i2); + const uchar* ptr(int i0, int i1, int i2) const; + + //! returns pointer to the matrix element + uchar* ptr(const int* idx); + //! returns read-only pointer to the matrix element + const uchar* ptr(const int* idx) const; + + template uchar* ptr(const Vec& idx); + template const uchar* ptr(const Vec& idx) const; + + //! template version of the above method + template _Tp* ptr(int i0=0); + template const _Tp* ptr(int i0=0) const; + + template _Tp* ptr(int i0, int i1); + template const _Tp* ptr(int i0, int i1) const; + + template _Tp* ptr(int i0, int i1, int i2); + template const _Tp* ptr(int i0, int i1, int i2) const; + + template _Tp* ptr(const int* idx); + template const _Tp* ptr(const int* idx) const; + + template _Tp* ptr(const Vec& idx); + template const _Tp* ptr(const Vec& idx) const; + + //! the same as above, with the pointer dereferencing + template _Tp& at(int i0=0); + template const _Tp& at(int i0=0) const; + + template _Tp& at(int i0, int i1); + template const _Tp& at(int i0, int i1) const; + + template _Tp& at(int i0, int i1, int i2); + template const _Tp& at(int i0, int i1, int i2) const; + + template _Tp& at(const int* idx); + template const _Tp& at(const int* idx) const; + + template _Tp& at(const Vec& idx); + template const _Tp& at(const Vec& idx) const; + + //! special versions for 2D arrays (especially convenient for referencing image pixels) + template _Tp& at(Point pt); + template const _Tp& at(Point pt) const; + + //! template methods for iteration over matrix elements. + // the iterators take care of skipping gaps in the end of rows (if any) + template MatIterator_<_Tp> begin(); + template MatIterator_<_Tp> end(); + template MatConstIterator_<_Tp> begin() const; + template MatConstIterator_<_Tp> end() const; + + enum { MAGIC_VAL=0x42FF0000, AUTO_STEP=0, CONTINUOUS_FLAG=CV_MAT_CONT_FLAG, SUBMATRIX_FLAG=CV_SUBMAT_FLAG }; + + /*! includes several bit-fields: + - the magic signature + - continuity flag + - depth + - number of channels + */ + int flags; + //! the matrix dimensionality, >= 2 + int dims; + //! the number of rows and columns or (-1, -1) when the matrix has more than 2 dimensions + int rows, cols; + //! pointer to the data + uchar* data; + + //! pointer to the reference counter; + // when matrix points to user-allocated data, the pointer is NULL + int* refcount; + + //! helper fields used in locateROI and adjustROI + uchar* datastart; + uchar* dataend; + uchar* datalimit; + + //! custom allocator + MatAllocator* allocator; + + struct CV_EXPORTS MSize + { + MSize(int* _p); + Size operator()() const; + const int& operator[](int i) const; + int& operator[](int i); + operator const int*() const; + bool operator == (const MSize& sz) const; + bool operator != (const MSize& sz) const; + + int* p; + }; + + struct CV_EXPORTS MStep + { + MStep(); + MStep(size_t s); + const size_t& operator[](int i) const; + size_t& operator[](int i); + operator size_t() const; + MStep& operator = (size_t s); + + size_t* p; + size_t buf[2]; + protected: + MStep& operator = (const MStep&); + }; + + MSize size; + MStep step; +}; + + +/*! + Random Number Generator + + The class implements RNG using Multiply-with-Carry algorithm +*/ +class CV_EXPORTS RNG +{ +public: + enum { UNIFORM=0, NORMAL=1 }; + + RNG(); + RNG(uint64 _state); + //! updates the state and returns the next 32-bit unsigned integer random number + unsigned next(); + + operator uchar(); + operator schar(); + operator ushort(); + operator short(); + operator unsigned(); + //! returns a random integer sampled uniformly from [0, N). + unsigned operator()(unsigned N); + unsigned operator ()(); + operator int(); + operator float(); + operator double(); + //! returns uniformly distributed integer random number from [a,b) range + int uniform(int a, int b); + //! returns uniformly distributed floating-point random number from [a,b) range + float uniform(float a, float b); + //! returns uniformly distributed double-precision floating-point random number from [a,b) range + double uniform(double a, double b); + void fill( InputOutputArray mat, int distType, InputArray a, InputArray b ); + //! returns Gaussian random variate with mean zero. + double gaussian(double sigma); + + uint64 state; +}; + + +/*! + Termination criteria in iterative algorithms + */ +class CV_EXPORTS TermCriteria +{ +public: + enum + { + COUNT=1, //!< the maximum number of iterations or elements to compute + MAX_ITER=COUNT, //!< ditto + EPS=2 //!< the desired accuracy or change in parameters at which the iterative algorithm stops + }; + + //! default constructor + TermCriteria(); + //! full constructor + TermCriteria(int _type, int _maxCount, double _epsilon); + //! conversion from CvTermCriteria + TermCriteria(const CvTermCriteria& criteria); + //! conversion from CvTermCriteria + operator CvTermCriteria() const; + + int type; //!< the type of termination criteria: COUNT, EPS or COUNT + EPS + int maxCount; // the maximum number of iterations/elements + double epsilon; // the desired accuracy +}; + + +//! swaps two matrices +CV_EXPORTS void swap(Mat& a, Mat& b); + +//! converts array (CvMat or IplImage) to cv::Mat +CV_EXPORTS Mat cvarrToMat(const CvArr* arr, bool copyData=false, + bool allowND=true, int coiMode=0); +//! extracts Channel of Interest from CvMat or IplImage and makes cv::Mat out of it. +CV_EXPORTS void extractImageCOI(const CvArr* arr, OutputArray coiimg, int coi=-1); +//! inserts single-channel cv::Mat into a multi-channel CvMat or IplImage +CV_EXPORTS void insertImageCOI(InputArray coiimg, CvArr* arr, int coi=-1); + +//! adds one matrix to another (dst = src1 + src2) +CV_EXPORTS_W void add(InputArray src1, InputArray src2, OutputArray dst, + InputArray mask=noArray(), int dtype=-1); +//! subtracts one matrix from another (dst = src1 - src2) +CV_EXPORTS_W void subtract(InputArray src1, InputArray src2, OutputArray dst, + InputArray mask=noArray(), int dtype=-1); + +//! computes element-wise weighted product of the two arrays (dst = scale*src1*src2) +CV_EXPORTS_W void multiply(InputArray src1, InputArray src2, + OutputArray dst, double scale=1, int dtype=-1); + +//! computes element-wise weighted quotient of the two arrays (dst = scale*src1/src2) +CV_EXPORTS_W void divide(InputArray src1, InputArray src2, OutputArray dst, + double scale=1, int dtype=-1); + +//! computes element-wise weighted reciprocal of an array (dst = scale/src2) +CV_EXPORTS_W void divide(double scale, InputArray src2, + OutputArray dst, int dtype=-1); + +//! adds scaled array to another one (dst = alpha*src1 + src2) +CV_EXPORTS_W void scaleAdd(InputArray src1, double alpha, InputArray src2, OutputArray dst); + +//! computes weighted sum of two arrays (dst = alpha*src1 + beta*src2 + gamma) +CV_EXPORTS_W void addWeighted(InputArray src1, double alpha, InputArray src2, + double beta, double gamma, OutputArray dst, int dtype=-1); + +//! scales array elements, computes absolute values and converts the results to 8-bit unsigned integers: dst(i)=saturate_castabs(src(i)*alpha+beta) +CV_EXPORTS_W void convertScaleAbs(InputArray src, OutputArray dst, + double alpha=1, double beta=0); +//! transforms array of numbers using a lookup table: dst(i)=lut(src(i)) +CV_EXPORTS_W void LUT(InputArray src, InputArray lut, OutputArray dst, + int interpolation=0); + +//! computes sum of array elements +CV_EXPORTS_AS(sumElems) Scalar sum(InputArray src); +//! computes the number of nonzero array elements +CV_EXPORTS_W int countNonZero( InputArray src ); +//! computes mean value of selected array elements +CV_EXPORTS_W Scalar mean(InputArray src, InputArray mask=noArray()); +//! computes mean value and standard deviation of all or selected array elements +CV_EXPORTS_W void meanStdDev(InputArray src, OutputArray mean, OutputArray stddev, + InputArray mask=noArray()); +//! computes norm of the selected array part +CV_EXPORTS_W double norm(InputArray src1, int normType=NORM_L2, InputArray mask=noArray()); +//! computes norm of selected part of the difference between two arrays +CV_EXPORTS_W double norm(InputArray src1, InputArray src2, + int normType=NORM_L2, InputArray mask=noArray()); +//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values +CV_EXPORTS_W void normalize( InputArray src, OutputArray dst, double alpha=1, double beta=0, + int norm_type=NORM_L2, int dtype=-1, InputArray mask=noArray()); + +//! finds global minimum and maximum array elements and returns their values and their locations +CV_EXPORTS_W void minMaxLoc(InputArray src, CV_OUT double* minVal, + CV_OUT double* maxVal=0, CV_OUT Point* minLoc=0, + CV_OUT Point* maxLoc=0, InputArray mask=noArray()); +CV_EXPORTS void minMaxIdx(InputArray src, double* minVal, double* maxVal, + int* minIdx=0, int* maxIdx=0, InputArray mask=noArray()); + +//! transforms 2D matrix to 1D row or column vector by taking sum, minimum, maximum or mean value over all the rows +CV_EXPORTS_W void reduce(InputArray src, OutputArray dst, int dim, int rtype, int dtype=-1); + +//! makes multi-channel array out of several single-channel arrays +CV_EXPORTS void merge(const Mat* mv, size_t count, OutputArray dst); +//! makes multi-channel array out of several single-channel arrays +CV_EXPORTS_W void merge(const vector& mv, OutputArray dst); + +//! copies each plane of a multi-channel array to a dedicated array +CV_EXPORTS void split(const Mat& src, Mat* mvbegin); +//! copies each plane of a multi-channel array to a dedicated array +CV_EXPORTS_W void split(const Mat& m, CV_OUT vector& mv); + +//! copies selected channels from the input arrays to the selected channels of the output arrays +CV_EXPORTS void mixChannels(const Mat* src, size_t nsrcs, Mat* dst, size_t ndsts, + const int* fromTo, size_t npairs); +CV_EXPORTS void mixChannels(const vector& src, vector& dst, + const int* fromTo, size_t npairs); +CV_EXPORTS_W void mixChannels(InputArrayOfArrays src, InputArrayOfArrays dst, + const vector& fromTo); + +//! extracts a single channel from src (coi is 0-based index) +CV_EXPORTS_W void extractChannel(InputArray src, OutputArray dst, int coi); + +//! inserts a single channel to dst (coi is 0-based index) +CV_EXPORTS_W void insertChannel(InputArray src, InputOutputArray dst, int coi); + +//! reverses the order of the rows, columns or both in a matrix +CV_EXPORTS_W void flip(InputArray src, OutputArray dst, int flipCode); + +//! replicates the input matrix the specified number of times in the horizontal and/or vertical direction +CV_EXPORTS_W void repeat(InputArray src, int ny, int nx, OutputArray dst); +CV_EXPORTS Mat repeat(const Mat& src, int ny, int nx); + +CV_EXPORTS void hconcat(const Mat* src, size_t nsrc, OutputArray dst); +CV_EXPORTS void hconcat(InputArray src1, InputArray src2, OutputArray dst); +CV_EXPORTS_W void hconcat(InputArray src, OutputArray dst); + +CV_EXPORTS void vconcat(const Mat* src, size_t nsrc, OutputArray dst); +CV_EXPORTS void vconcat(InputArray src1, InputArray src2, OutputArray dst); +CV_EXPORTS_W void vconcat(InputArray src, OutputArray dst); + +//! computes bitwise conjunction of the two arrays (dst = src1 & src2) +CV_EXPORTS_W void bitwise_and(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! computes bitwise disjunction of the two arrays (dst = src1 | src2) +CV_EXPORTS_W void bitwise_or(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! computes bitwise exclusive-or of the two arrays (dst = src1 ^ src2) +CV_EXPORTS_W void bitwise_xor(InputArray src1, InputArray src2, + OutputArray dst, InputArray mask=noArray()); +//! inverts each bit of array (dst = ~src) +CV_EXPORTS_W void bitwise_not(InputArray src, OutputArray dst, + InputArray mask=noArray()); +//! computes element-wise absolute difference of two arrays (dst = abs(src1 - src2)) +CV_EXPORTS_W void absdiff(InputArray src1, InputArray src2, OutputArray dst); +//! set mask elements for those array elements which are within the element-specific bounding box (dst = lowerb <= src && src < upperb) +CV_EXPORTS_W void inRange(InputArray src, InputArray lowerb, + InputArray upperb, OutputArray dst); +//! compares elements of two arrays (dst = src1 src2) +CV_EXPORTS_W void compare(InputArray src1, InputArray src2, OutputArray dst, int cmpop); +//! computes per-element minimum of two arrays (dst = min(src1, src2)) +CV_EXPORTS_W void min(InputArray src1, InputArray src2, OutputArray dst); +//! computes per-element maximum of two arrays (dst = max(src1, src2)) +CV_EXPORTS_W void max(InputArray src1, InputArray src2, OutputArray dst); + +//! computes per-element minimum of two arrays (dst = min(src1, src2)) +CV_EXPORTS void min(const Mat& src1, const Mat& src2, Mat& dst); +//! computes per-element minimum of array and scalar (dst = min(src1, src2)) +CV_EXPORTS void min(const Mat& src1, double src2, Mat& dst); +//! computes per-element maximum of two arrays (dst = max(src1, src2)) +CV_EXPORTS void max(const Mat& src1, const Mat& src2, Mat& dst); +//! computes per-element maximum of array and scalar (dst = max(src1, src2)) +CV_EXPORTS void max(const Mat& src1, double src2, Mat& dst); + +//! computes square root of each matrix element (dst = src**0.5) +CV_EXPORTS_W void sqrt(InputArray src, OutputArray dst); +//! raises the input matrix elements to the specified power (b = a**power) +CV_EXPORTS_W void pow(InputArray src, double power, OutputArray dst); +//! computes exponent of each matrix element (dst = e**src) +CV_EXPORTS_W void exp(InputArray src, OutputArray dst); +//! computes natural logarithm of absolute value of each matrix element: dst = log(abs(src)) +CV_EXPORTS_W void log(InputArray src, OutputArray dst); +//! computes cube root of the argument +CV_EXPORTS_W float cubeRoot(float val); +//! computes the angle in degrees (0..360) of the vector (x,y) +CV_EXPORTS_W float fastAtan2(float y, float x); +//! converts polar coordinates to Cartesian +CV_EXPORTS_W void polarToCart(InputArray magnitude, InputArray angle, + OutputArray x, OutputArray y, bool angleInDegrees=false); +//! converts Cartesian coordinates to polar +CV_EXPORTS_W void cartToPolar(InputArray x, InputArray y, + OutputArray magnitude, OutputArray angle, + bool angleInDegrees=false); +//! computes angle (angle(i)) of each (x(i), y(i)) vector +CV_EXPORTS_W void phase(InputArray x, InputArray y, OutputArray angle, + bool angleInDegrees=false); +//! computes magnitude (magnitude(i)) of each (x(i), y(i)) vector +CV_EXPORTS_W void magnitude(InputArray x, InputArray y, OutputArray magnitude); +//! checks that each matrix element is within the specified range. +CV_EXPORTS_W bool checkRange(InputArray a, bool quiet=true, CV_OUT Point* pt=0, + double minVal=-DBL_MAX, double maxVal=DBL_MAX); +//! implements generalized matrix product algorithm GEMM from BLAS +CV_EXPORTS_W void gemm(InputArray src1, InputArray src2, double alpha, + InputArray src3, double gamma, OutputArray dst, int flags=0); +//! multiplies matrix by its transposition from the left or from the right +CV_EXPORTS_W void mulTransposed( InputArray src, OutputArray dst, bool aTa, + InputArray delta=noArray(), + double scale=1, int dtype=-1 ); +//! transposes the matrix +CV_EXPORTS_W void transpose(InputArray src, OutputArray dst); +//! performs affine transformation of each element of multi-channel input matrix +CV_EXPORTS_W void transform(InputArray src, OutputArray dst, InputArray m ); +//! performs perspective transformation of each element of multi-channel input matrix +CV_EXPORTS_W void perspectiveTransform(InputArray src, OutputArray dst, InputArray m ); + +//! extends the symmetrical matrix from the lower half or from the upper half +CV_EXPORTS_W void completeSymm(InputOutputArray mtx, bool lowerToUpper=false); +//! initializes scaled identity matrix +CV_EXPORTS_W void setIdentity(InputOutputArray mtx, const Scalar& s=Scalar(1)); +//! computes determinant of a square matrix +CV_EXPORTS_W double determinant(InputArray mtx); +//! computes trace of a matrix +CV_EXPORTS_W Scalar trace(InputArray mtx); +//! computes inverse or pseudo-inverse matrix +CV_EXPORTS_W double invert(InputArray src, OutputArray dst, int flags=DECOMP_LU); +//! solves linear system or a least-square problem +CV_EXPORTS_W bool solve(InputArray src1, InputArray src2, + OutputArray dst, int flags=DECOMP_LU); + +enum +{ + SORT_EVERY_ROW=0, + SORT_EVERY_COLUMN=1, + SORT_ASCENDING=0, + SORT_DESCENDING=16 +}; + +//! sorts independently each matrix row or each matrix column +CV_EXPORTS_W void sort(InputArray src, OutputArray dst, int flags); +//! sorts independently each matrix row or each matrix column +CV_EXPORTS_W void sortIdx(InputArray src, OutputArray dst, int flags); +//! finds real roots of a cubic polynomial +CV_EXPORTS_W int solveCubic(InputArray coeffs, OutputArray roots); +//! finds real and complex roots of a polynomial +CV_EXPORTS_W double solvePoly(InputArray coeffs, OutputArray roots, int maxIters=300); +//! finds eigenvalues of a symmetric matrix +CV_EXPORTS bool eigen(InputArray src, OutputArray eigenvalues, int lowindex=-1, + int highindex=-1); +//! finds eigenvalues and eigenvectors of a symmetric matrix +CV_EXPORTS bool eigen(InputArray src, OutputArray eigenvalues, + OutputArray eigenvectors, + int lowindex=-1, int highindex=-1); +CV_EXPORTS_W bool eigen(InputArray src, bool computeEigenvectors, + OutputArray eigenvalues, OutputArray eigenvectors); + +enum +{ + COVAR_SCRAMBLED=0, + COVAR_NORMAL=1, + COVAR_USE_AVG=2, + COVAR_SCALE=4, + COVAR_ROWS=8, + COVAR_COLS=16 +}; + +//! computes covariation matrix of a set of samples +CV_EXPORTS void calcCovarMatrix( const Mat* samples, int nsamples, Mat& covar, Mat& mean, + int flags, int ctype=CV_64F); +//! computes covariation matrix of a set of samples +CV_EXPORTS_W void calcCovarMatrix( InputArray samples, OutputArray covar, + OutputArray mean, int flags, int ctype=CV_64F); + +/*! + Principal Component Analysis + + The class PCA is used to compute the special basis for a set of vectors. + The basis will consist of eigenvectors of the covariance matrix computed + from the input set of vectors. After PCA is performed, vectors can be transformed from + the original high-dimensional space to the subspace formed by a few most + prominent eigenvectors (called the principal components), + corresponding to the largest eigenvalues of the covariation matrix. + Thus the dimensionality of the vector and the correlation between the coordinates is reduced. + + The following sample is the function that takes two matrices. The first one stores the set + of vectors (a row per vector) that is used to compute PCA, the second one stores another + "test" set of vectors (a row per vector) that are first compressed with PCA, + then reconstructed back and then the reconstruction error norm is computed and printed for each vector. + + \code + using namespace cv; + + PCA compressPCA(const Mat& pcaset, int maxComponents, + const Mat& testset, Mat& compressed) + { + PCA pca(pcaset, // pass the data + Mat(), // we do not have a pre-computed mean vector, + // so let the PCA engine to compute it + CV_PCA_DATA_AS_ROW, // indicate that the vectors + // are stored as matrix rows + // (use CV_PCA_DATA_AS_COL if the vectors are + // the matrix columns) + maxComponents // specify, how many principal components to retain + ); + // if there is no test data, just return the computed basis, ready-to-use + if( !testset.data ) + return pca; + CV_Assert( testset.cols == pcaset.cols ); + + compressed.create(testset.rows, maxComponents, testset.type()); + + Mat reconstructed; + for( int i = 0; i < testset.rows; i++ ) + { + Mat vec = testset.row(i), coeffs = compressed.row(i), reconstructed; + // compress the vector, the result will be stored + // in the i-th row of the output matrix + pca.project(vec, coeffs); + // and then reconstruct it + pca.backProject(coeffs, reconstructed); + // and measure the error + printf("%d. diff = %g\n", i, norm(vec, reconstructed, NORM_L2)); + } + return pca; + } + \endcode +*/ +class CV_EXPORTS PCA +{ +public: + //! default constructor + PCA(); + //! the constructor that performs PCA + PCA(InputArray data, InputArray mean, int flags, int maxComponents=0); + //! operator that performs PCA. The previously stored data, if any, is released + PCA& operator()(InputArray data, InputArray mean, int flags, int maxComponents=0); + //! projects vector from the original space to the principal components subspace + Mat project(InputArray vec) const; + //! projects vector from the original space to the principal components subspace + void project(InputArray vec, OutputArray result) const; + //! reconstructs the original vector from the projection + Mat backProject(InputArray vec) const; + //! reconstructs the original vector from the projection + void backProject(InputArray vec, OutputArray result) const; + + Mat eigenvectors; //!< eigenvectors of the covariation matrix + Mat eigenvalues; //!< eigenvalues of the covariation matrix + Mat mean; //!< mean value subtracted before the projection and added after the back projection +}; + +CV_EXPORTS_W void PCACompute(InputArray data, CV_OUT InputOutputArray mean, + OutputArray eigenvectors, int maxComponents=0); + +CV_EXPORTS_W void PCAProject(InputArray data, InputArray mean, + InputArray eigenvectors, OutputArray result); + +CV_EXPORTS_W void PCABackProject(InputArray data, InputArray mean, + InputArray eigenvectors, OutputArray result); + + +/*! + Singular Value Decomposition class + + The class is used to compute Singular Value Decomposition of a floating-point matrix and then + use it to solve least-square problems, under-determined linear systems, invert matrices, + compute condition numbers etc. + + For a bit faster operation you can pass flags=SVD::MODIFY_A|... to modify the decomposed matrix + when it is not necessarily to preserve it. If you want to compute condition number of a matrix + or absolute value of its determinant - you do not need SVD::u or SVD::vt, + so you can pass flags=SVD::NO_UV|... . Another flag SVD::FULL_UV indicates that the full-size SVD::u and SVD::vt + must be computed, which is not necessary most of the time. +*/ +class CV_EXPORTS SVD +{ +public: + enum { MODIFY_A=1, NO_UV=2, FULL_UV=4 }; + //! the default constructor + SVD(); + //! the constructor that performs SVD + SVD( InputArray src, int flags=0 ); + //! the operator that performs SVD. The previously allocated SVD::u, SVD::w are SVD::vt are released. + SVD& operator ()( InputArray src, int flags=0 ); + + //! decomposes matrix and stores the results to user-provided matrices + static void compute( InputArray src, OutputArray w, + OutputArray u, OutputArray vt, int flags=0 ); + //! computes singular values of a matrix + static void compute( InputArray src, OutputArray w, int flags=0 ); + //! performs back substitution + static void backSubst( InputArray w, InputArray u, + InputArray vt, InputArray rhs, + OutputArray dst ); + + template static void compute( const Matx<_Tp, m, n>& a, + Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt ); + template static void compute( const Matx<_Tp, m, n>& a, + Matx<_Tp, nm, 1>& w ); + template static void backSubst( const Matx<_Tp, nm, 1>& w, + const Matx<_Tp, m, nm>& u, const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, Matx<_Tp, n, nb>& dst ); + + //! finds dst = arg min_{|dst|=1} |m*dst| + static void solveZ( InputArray src, OutputArray dst ); + //! performs back substitution, so that dst is the solution or pseudo-solution of m*dst = rhs, where m is the decomposed matrix + void backSubst( InputArray rhs, OutputArray dst ) const; + + Mat u, w, vt; +}; + +//! computes SVD of src +CV_EXPORTS_W void SVDecomp( InputArray src, CV_OUT OutputArray w, + CV_OUT OutputArray u, CV_OUT OutputArray vt, int flags=0 ); + +//! performs back substitution for the previously computed SVD +CV_EXPORTS_W void SVBackSubst( InputArray w, InputArray u, InputArray vt, + InputArray rhs, CV_OUT OutputArray dst ); + +//! computes Mahalanobis distance between two vectors: sqrt((v1-v2)'*icovar*(v1-v2)), where icovar is the inverse covariation matrix +CV_EXPORTS_W double Mahalanobis(InputArray v1, InputArray v2, InputArray icovar); +//! a synonym for Mahalanobis +CV_EXPORTS double Mahalonobis(InputArray v1, InputArray v2, InputArray icovar); + +//! performs forward or inverse 1D or 2D Discrete Fourier Transformation +CV_EXPORTS_W void dft(InputArray src, OutputArray dst, int flags=0, int nonzeroRows=0); +//! performs inverse 1D or 2D Discrete Fourier Transformation +CV_EXPORTS_W void idft(InputArray src, OutputArray dst, int flags=0, int nonzeroRows=0); +//! performs forward or inverse 1D or 2D Discrete Cosine Transformation +CV_EXPORTS_W void dct(InputArray src, OutputArray dst, int flags=0); +//! performs inverse 1D or 2D Discrete Cosine Transformation +CV_EXPORTS_W void idct(InputArray src, OutputArray dst, int flags=0); +//! computes element-wise product of the two Fourier spectrums. The second spectrum can optionally be conjugated before the multiplication +CV_EXPORTS_W void mulSpectrums(InputArray a, InputArray b, OutputArray c, + int flags, bool conjB=false); +//! computes the minimal vector size vecsize1 >= vecsize so that the dft() of the vector of length vecsize1 can be computed efficiently +CV_EXPORTS_W int getOptimalDFTSize(int vecsize); + +/*! + Various k-Means flags +*/ +enum +{ + KMEANS_RANDOM_CENTERS=0, // Chooses random centers for k-Means initialization + KMEANS_PP_CENTERS=2, // Uses k-Means++ algorithm for initialization + KMEANS_USE_INITIAL_LABELS=1 // Uses the user-provided labels for K-Means initialization +}; +//! clusters the input data using k-Means algorithm +CV_EXPORTS_W double kmeans( InputArray data, int K, CV_OUT InputOutputArray bestLabels, + TermCriteria criteria, int attempts, + int flags, OutputArray centers=noArray() ); + +//! returns the thread-local Random number generator +CV_EXPORTS RNG& theRNG(); + +//! returns the next unifomly-distributed random number of the specified type +template static inline _Tp randu() { return (_Tp)theRNG(); } + +//! fills array with uniformly-distributed random numbers from the range [low, high) +CV_EXPORTS_W void randu(InputOutputArray dst, InputArray low, InputArray high); + +//! fills array with normally-distributed random numbers with the specified mean and the standard deviation +CV_EXPORTS_W void randn(InputOutputArray dst, InputArray mean, InputArray stddev); + +//! shuffles the input array elements +CV_EXPORTS void randShuffle(InputOutputArray dst, double iterFactor=1., RNG* rng=0); +CV_EXPORTS_AS(randShuffle) void randShuffle_(InputOutputArray dst, double iterFactor=1.); + +//! draws the line segment (pt1, pt2) in the image +CV_EXPORTS_W void line(Mat& img, Point pt1, Point pt2, const Scalar& color, + int thickness=1, int lineType=8, int shift=0); + +//! draws the rectangle outline or a solid rectangle with the opposite corners pt1 and pt2 in the image +CV_EXPORTS_W void rectangle(Mat& img, Point pt1, Point pt2, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws the rectangle outline or a solid rectangle covering rec in the image +CV_EXPORTS void rectangle(Mat& img, Rect rec, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws the circle outline or a solid circle in the image +CV_EXPORTS_W void circle(Mat& img, Point center, int radius, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws an elliptic arc, ellipse sector or a rotated ellipse in the image +CV_EXPORTS_W void ellipse(Mat& img, Point center, Size axes, + double angle, double startAngle, double endAngle, + const Scalar& color, int thickness=1, + int lineType=8, int shift=0); + +//! draws a rotated ellipse in the image +CV_EXPORTS_W void ellipse(Mat& img, const RotatedRect& box, const Scalar& color, + int thickness=1, int lineType=8); + +//! draws a filled convex polygon in the image +CV_EXPORTS void fillConvexPoly(Mat& img, const Point* pts, int npts, + const Scalar& color, int lineType=8, + int shift=0); +CV_EXPORTS_W void fillConvexPoly(InputOutputArray img, InputArray points, + const Scalar& color, int lineType=8, + int shift=0); + +//! fills an area bounded by one or more polygons +CV_EXPORTS void fillPoly(Mat& img, const Point** pts, + const int* npts, int ncontours, + const Scalar& color, int lineType=8, int shift=0, + Point offset=Point() ); + +CV_EXPORTS_W void fillPoly(InputOutputArray img, InputArrayOfArrays pts, + const Scalar& color, int lineType=8, int shift=0, + Point offset=Point() ); + +//! draws one or more polygonal curves +CV_EXPORTS void polylines(Mat& img, const Point** pts, const int* npts, + int ncontours, bool isClosed, const Scalar& color, + int thickness=1, int lineType=8, int shift=0 ); + +CV_EXPORTS_W void polylines(InputOutputArray img, InputArrayOfArrays pts, + bool isClosed, const Scalar& color, + int thickness=1, int lineType=8, int shift=0 ); + +//! clips the line segment by the rectangle Rect(0, 0, imgSize.width, imgSize.height) +CV_EXPORTS bool clipLine(Size imgSize, CV_IN_OUT Point& pt1, CV_IN_OUT Point& pt2); + +//! clips the line segment by the rectangle imgRect +CV_EXPORTS_W bool clipLine(Rect imgRect, CV_OUT CV_IN_OUT Point& pt1, CV_OUT CV_IN_OUT Point& pt2); + +/*! + Line iterator class + + The class is used to iterate over all the pixels on the raster line + segment connecting two specified points. +*/ +class CV_EXPORTS LineIterator +{ +public: + //! intializes the iterator + LineIterator( const Mat& img, Point pt1, Point pt2, + int connectivity=8, bool leftToRight=false ); + //! returns pointer to the current pixel + uchar* operator *(); + //! prefix increment operator (++it). shifts iterator to the next pixel + LineIterator& operator ++(); + //! postfix increment operator (it++). shifts iterator to the next pixel + LineIterator operator ++(int); + //! returns coordinates of the current pixel + Point pos() const; + + uchar* ptr; + const uchar* ptr0; + int step, elemSize; + int err, count; + int minusDelta, plusDelta; + int minusStep, plusStep; +}; + +//! converts elliptic arc to a polygonal curve +CV_EXPORTS_W void ellipse2Poly( Point center, Size axes, int angle, + int arcStart, int arcEnd, int delta, + CV_OUT vector& pts ); + +enum +{ + FONT_HERSHEY_SIMPLEX = 0, + FONT_HERSHEY_PLAIN = 1, + FONT_HERSHEY_DUPLEX = 2, + FONT_HERSHEY_COMPLEX = 3, + FONT_HERSHEY_TRIPLEX = 4, + FONT_HERSHEY_COMPLEX_SMALL = 5, + FONT_HERSHEY_SCRIPT_SIMPLEX = 6, + FONT_HERSHEY_SCRIPT_COMPLEX = 7, + FONT_ITALIC = 16 +}; + +//! renders text string in the image +CV_EXPORTS_W void putText( Mat& img, const string& text, Point org, + int fontFace, double fontScale, Scalar color, + int thickness=1, int linetype=8, + bool bottomLeftOrigin=false ); + +//! returns bounding box of the text string +CV_EXPORTS_W Size getTextSize(const string& text, int fontFace, + double fontScale, int thickness, + CV_OUT int* baseLine); + +///////////////////////////////// Mat_<_Tp> //////////////////////////////////// + +/*! + Template matrix class derived from Mat + + The class Mat_ is a "thin" template wrapper on top of cv::Mat. It does not have any extra data fields, + nor it or cv::Mat have any virtual methods and thus references or pointers to these two classes + can be safely converted one to another. But do it with care, for example: + + \code + // create 100x100 8-bit matrix + Mat M(100,100,CV_8U); + // this will compile fine. no any data conversion will be done. + Mat_& M1 = (Mat_&)M; + // the program will likely crash at the statement below + M1(99,99) = 1.f; + \endcode + + While cv::Mat is sufficient in most cases, cv::Mat_ can be more convenient if you use a lot of element + access operations and if you know matrix type at compile time. + Note that cv::Mat::at<_Tp>(int y, int x) and cv::Mat_<_Tp>::operator ()(int y, int x) do absolutely the + same thing and run at the same speed, but the latter is certainly shorter: + + \code + Mat_ M(20,20); + for(int i = 0; i < M.rows; i++) + for(int j = 0; j < M.cols; j++) + M(i,j) = 1./(i+j+1); + Mat E, V; + eigen(M,E,V); + cout << E.at(0,0)/E.at(M.rows-1,0); + \endcode + + It is easy to use Mat_ for multi-channel images/matrices - just pass cv::Vec as cv::Mat_ template parameter: + + \code + // allocate 320x240 color image and fill it with green (in RGB space) + Mat_ img(240, 320, Vec3b(0,255,0)); + // now draw a diagonal white line + for(int i = 0; i < 100; i++) + img(i,i)=Vec3b(255,255,255); + // and now modify the 2nd (red) channel of each pixel + for(int i = 0; i < img.rows; i++) + for(int j = 0; j < img.cols; j++) + img(i,j)[2] ^= (uchar)(i ^ j); // img(y,x)[c] accesses c-th channel of the pixel (x,y) + \endcode +*/ +template class CV_EXPORTS Mat_ : public Mat +{ +public: + typedef _Tp value_type; + typedef typename DataType<_Tp>::channel_type channel_type; + typedef MatIterator_<_Tp> iterator; + typedef MatConstIterator_<_Tp> const_iterator; + + //! default constructor + Mat_(); + //! equivalent to Mat(_rows, _cols, DataType<_Tp>::type) + Mat_(int _rows, int _cols); + //! constructor that sets each matrix element to specified value + Mat_(int _rows, int _cols, const _Tp& value); + //! equivalent to Mat(_size, DataType<_Tp>::type) + explicit Mat_(Size _size); + //! constructor that sets each matrix element to specified value + Mat_(Size _size, const _Tp& value); + //! n-dim array constructor + Mat_(int _ndims, const int* _sizes); + //! n-dim array constructor that sets each matrix element to specified value + Mat_(int _ndims, const int* _sizes, const _Tp& value); + //! copy/conversion contructor. If m is of different type, it's converted + Mat_(const Mat& m); + //! copy constructor + Mat_(const Mat_& m); + //! constructs a matrix on top of user-allocated data. step is in bytes(!!!), regardless of the type + Mat_(int _rows, int _cols, _Tp* _data, size_t _step=AUTO_STEP); + //! constructs n-dim matrix on top of user-allocated data. steps are in bytes(!!!), regardless of the type + Mat_(int _ndims, const int* _sizes, _Tp* _data, const size_t* _steps=0); + //! selects a submatrix + Mat_(const Mat_& m, const Range& rowRange, const Range& colRange=Range::all()); + //! selects a submatrix + Mat_(const Mat_& m, const Rect& roi); + //! selects a submatrix, n-dim version + Mat_(const Mat_& m, const Range* ranges); + //! from a matrix expression + explicit Mat_(const MatExpr& e); + //! makes a matrix out of Vec, std::vector, Point_ or Point3_. The matrix will have a single column + explicit Mat_(const vector<_Tp>& vec, bool copyData=false); + template explicit Mat_(const Vec::channel_type, n>& vec, bool copyData=true); + template explicit Mat_(const Matx::channel_type, m, n>& mtx, bool copyData=true); + explicit Mat_(const Point_::channel_type>& pt, bool copyData=true); + explicit Mat_(const Point3_::channel_type>& pt, bool copyData=true); + explicit Mat_(const MatCommaInitializer_<_Tp>& commaInitializer); + + Mat_& operator = (const Mat& m); + Mat_& operator = (const Mat_& m); + //! set all the elements to s. + Mat_& operator = (const _Tp& s); + //! assign a matrix expression + Mat_& operator = (const MatExpr& e); + + //! iterators; they are smart enough to skip gaps in the end of rows + iterator begin(); + iterator end(); + const_iterator begin() const; + const_iterator end() const; + + //! equivalent to Mat::create(_rows, _cols, DataType<_Tp>::type) + void create(int _rows, int _cols); + //! equivalent to Mat::create(_size, DataType<_Tp>::type) + void create(Size _size); + //! equivalent to Mat::create(_ndims, _sizes, DatType<_Tp>::type) + void create(int _ndims, const int* _sizes); + //! cross-product + Mat_ cross(const Mat_& m) const; + //! data type conversion + template operator Mat_() const; + //! overridden forms of Mat::row() etc. + Mat_ row(int y) const; + Mat_ col(int x) const; + Mat_ diag(int d=0) const; + Mat_ clone() const; + + //! overridden forms of Mat::elemSize() etc. + size_t elemSize() const; + size_t elemSize1() const; + int type() const; + int depth() const; + int channels() const; + size_t step1(int i=0) const; + //! returns step()/sizeof(_Tp) + size_t stepT(int i=0) const; + + //! overridden forms of Mat::zeros() etc. Data type is omitted, of course + static MatExpr zeros(int rows, int cols); + static MatExpr zeros(Size size); + static MatExpr zeros(int _ndims, const int* _sizes); + static MatExpr ones(int rows, int cols); + static MatExpr ones(Size size); + static MatExpr ones(int _ndims, const int* _sizes); + static MatExpr eye(int rows, int cols); + static MatExpr eye(Size size); + + //! some more overriden methods + Mat_ reshape(int _rows) const; + Mat_& adjustROI( int dtop, int dbottom, int dleft, int dright ); + Mat_ operator()( const Range& rowRange, const Range& colRange ) const; + Mat_ operator()( const Rect& roi ) const; + Mat_ operator()( const Range* ranges ) const; + + //! more convenient forms of row and element access operators + _Tp* operator [](int y); + const _Tp* operator [](int y) const; + + //! returns reference to the specified element + _Tp& operator ()(const int* idx); + //! returns read-only reference to the specified element + const _Tp& operator ()(const int* idx) const; + + //! returns reference to the specified element + template _Tp& operator ()(const Vec& idx); + //! returns read-only reference to the specified element + template const _Tp& operator ()(const Vec& idx) const; + + //! returns reference to the specified element (1D case) + _Tp& operator ()(int idx0); + //! returns read-only reference to the specified element (1D case) + const _Tp& operator ()(int idx0) const; + //! returns reference to the specified element (2D case) + _Tp& operator ()(int idx0, int idx1); + //! returns read-only reference to the specified element (2D case) + const _Tp& operator ()(int idx0, int idx1) const; + //! returns reference to the specified element (3D case) + _Tp& operator ()(int idx0, int idx1, int idx2); + //! returns read-only reference to the specified element (3D case) + const _Tp& operator ()(int idx0, int idx1, int idx2) const; + + _Tp& operator ()(Point pt); + const _Tp& operator ()(Point pt) const; + + //! conversion to vector. + operator vector<_Tp>() const; + //! conversion to Vec + template operator Vec::channel_type, n>() const; + //! conversion to Matx + template operator Matx::channel_type, m, n>() const; +}; + +typedef Mat_ Mat1b; +typedef Mat_ Mat2b; +typedef Mat_ Mat3b; +typedef Mat_ Mat4b; + +typedef Mat_ Mat1s; +typedef Mat_ Mat2s; +typedef Mat_ Mat3s; +typedef Mat_ Mat4s; + +typedef Mat_ Mat1w; +typedef Mat_ Mat2w; +typedef Mat_ Mat3w; +typedef Mat_ Mat4w; + +typedef Mat_ Mat1i; +typedef Mat_ Mat2i; +typedef Mat_ Mat3i; +typedef Mat_ Mat4i; + +typedef Mat_ Mat1f; +typedef Mat_ Mat2f; +typedef Mat_ Mat3f; +typedef Mat_ Mat4f; + +typedef Mat_ Mat1d; +typedef Mat_ Mat2d; +typedef Mat_ Mat3d; +typedef Mat_ Mat4d; + +//////////// Iterators & Comma initializers ////////////////// + +class CV_EXPORTS MatConstIterator +{ +public: + typedef uchar* value_type; + typedef ptrdiff_t difference_type; + typedef const uchar** pointer; + typedef uchar* reference; + typedef std::random_access_iterator_tag iterator_category; + + //! default constructor + MatConstIterator(); + //! constructor that sets the iterator to the beginning of the matrix + MatConstIterator(const Mat* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator(const Mat* _m, const int* _idx); + //! copy constructor + MatConstIterator(const MatConstIterator& it); + + //! copy operator + MatConstIterator& operator = (const MatConstIterator& it); + //! returns the current matrix element + uchar* operator *() const; + //! returns the i-th matrix element, relative to the current + uchar* operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatConstIterator& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatConstIterator& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatConstIterator& operator --(); + //! decrements the iterator + MatConstIterator operator --(int); + //! increments the iterator + MatConstIterator& operator ++(); + //! increments the iterator + MatConstIterator operator ++(int); + //! returns the current iterator position + Point pos() const; + //! returns the current iterator position + void pos(int* _idx) const; + ptrdiff_t lpos() const; + void seek(ptrdiff_t ofs, bool relative=false); + void seek(const int* _idx, bool relative=false); + + const Mat* m; + size_t elemSize; + uchar* ptr; + uchar* sliceStart; + uchar* sliceEnd; +}; + +/*! + Matrix read-only iterator + + */ +template +class CV_EXPORTS MatConstIterator_ : public MatConstIterator +{ +public: + typedef _Tp value_type; + typedef ptrdiff_t difference_type; + typedef const _Tp* pointer; + typedef const _Tp& reference; + typedef std::random_access_iterator_tag iterator_category; + + //! default constructor + MatConstIterator_(); + //! constructor that sets the iterator to the beginning of the matrix + MatConstIterator_(const Mat_<_Tp>* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatConstIterator_(const Mat_<_Tp>* _m, const int* _idx); + //! copy constructor + MatConstIterator_(const MatConstIterator_& it); + + //! copy operator + MatConstIterator_& operator = (const MatConstIterator_& it); + //! returns the current matrix element + _Tp operator *() const; + //! returns the i-th matrix element, relative to the current + _Tp operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatConstIterator_& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatConstIterator_& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatConstIterator_& operator --(); + //! decrements the iterator + MatConstIterator_ operator --(int); + //! increments the iterator + MatConstIterator_& operator ++(); + //! increments the iterator + MatConstIterator_ operator ++(int); + //! returns the current iterator position + Point pos() const; +}; + + +/*! + Matrix read-write iterator + +*/ +template +class CV_EXPORTS MatIterator_ : public MatConstIterator_<_Tp> +{ +public: + typedef _Tp* pointer; + typedef _Tp& reference; + typedef std::random_access_iterator_tag iterator_category; + + //! the default constructor + MatIterator_(); + //! constructor that sets the iterator to the beginning of the matrix + MatIterator_(Mat_<_Tp>* _m); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(Mat_<_Tp>* _m, int _row, int _col=0); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(const Mat_<_Tp>* _m, Point _pt); + //! constructor that sets the iterator to the specified element of the matrix + MatIterator_(const Mat_<_Tp>* _m, const int* _idx); + //! copy constructor + MatIterator_(const MatIterator_& it); + //! copy operator + MatIterator_& operator = (const MatIterator_<_Tp>& it ); + + //! returns the current matrix element + _Tp& operator *() const; + //! returns the i-th matrix element, relative to the current + _Tp& operator [](ptrdiff_t i) const; + + //! shifts the iterator forward by the specified number of elements + MatIterator_& operator += (ptrdiff_t ofs); + //! shifts the iterator backward by the specified number of elements + MatIterator_& operator -= (ptrdiff_t ofs); + //! decrements the iterator + MatIterator_& operator --(); + //! decrements the iterator + MatIterator_ operator --(int); + //! increments the iterator + MatIterator_& operator ++(); + //! increments the iterator + MatIterator_ operator ++(int); +}; + +template class CV_EXPORTS MatOp_Iter_; + +/*! + Comma-separated Matrix Initializer + + The class instances are usually not created explicitly. + Instead, they are created on "matrix << firstValue" operator. + + The sample below initializes 2x2 rotation matrix: + + \code + double angle = 30, a = cos(angle*CV_PI/180), b = sin(angle*CV_PI/180); + Mat R = (Mat_(2,2) << a, -b, b, a); + \endcode +*/ +template class CV_EXPORTS MatCommaInitializer_ +{ +public: + //! the constructor, created by "matrix << firstValue" operator, where matrix is cv::Mat + MatCommaInitializer_(Mat_<_Tp>* _m); + //! the operator that takes the next value and put it to the matrix + template MatCommaInitializer_<_Tp>& operator , (T2 v); + //! another form of conversion operator + Mat_<_Tp> operator *() const; + operator Mat_<_Tp>() const; +protected: + MatIterator_<_Tp> it; +}; + + +template class CV_EXPORTS MatxCommaInitializer +{ +public: + MatxCommaInitializer(Matx<_Tp, m, n>* _mtx); + template MatxCommaInitializer<_Tp, m, n>& operator , (T2 val); + Matx<_Tp, m, n> operator *() const; + + Matx<_Tp, m, n>* dst; + int idx; +}; + +template class CV_EXPORTS VecCommaInitializer : public MatxCommaInitializer<_Tp, m, 1> +{ +public: + VecCommaInitializer(Vec<_Tp, m>* _vec); + template VecCommaInitializer<_Tp, m>& operator , (T2 val); + Vec<_Tp, m> operator *() const; +}; + +/*! + Automatically Allocated Buffer Class + + The class is used for temporary buffers in functions and methods. + If a temporary buffer is usually small (a few K's of memory), + but its size depends on the parameters, it makes sense to create a small + fixed-size array on stack and use it if it's large enough. If the required buffer size + is larger than the fixed size, another buffer of sufficient size is allocated dynamically + and released after the processing. Therefore, in typical cases, when the buffer size is small, + there is no overhead associated with malloc()/free(). + At the same time, there is no limit on the size of processed data. + + This is what AutoBuffer does. The template takes 2 parameters - type of the buffer elements and + the number of stack-allocated elements. Here is how the class is used: + + \code + void my_func(const cv::Mat& m) + { + cv::AutoBuffer buf; // create automatic buffer containing 1000 floats + + buf.allocate(m.rows); // if m.rows <= 1000, the pre-allocated buffer is used, + // otherwise the buffer of "m.rows" floats will be allocated + // dynamically and deallocated in cv::AutoBuffer destructor + ... + } + \endcode +*/ +template class CV_EXPORTS AutoBuffer +{ +public: + typedef _Tp value_type; + enum { buffer_padding = (int)((16 + sizeof(_Tp) - 1)/sizeof(_Tp)) }; + + //! the default contructor + AutoBuffer(); + //! constructor taking the real buffer size + AutoBuffer(size_t _size); + //! destructor. calls deallocate() + ~AutoBuffer(); + + //! allocates the new buffer of size _size. if the _size is small enough, stack-allocated buffer is used + void allocate(size_t _size); + //! deallocates the buffer if it was dynamically allocated + void deallocate(); + //! returns pointer to the real buffer, stack-allocated or head-allocated + operator _Tp* (); + //! returns read-only pointer to the real buffer, stack-allocated or head-allocated + operator const _Tp* () const; + +protected: + //! pointer to the real buffer, can point to buf if the buffer is small enough + _Tp* ptr; + //! size of the real buffer + size_t size; + //! pre-allocated buffer + _Tp buf[fixed_size+buffer_padding]; +}; + +/////////////////////////// multi-dimensional dense matrix ////////////////////////// + +/*! + n-Dimensional Dense Matrix Iterator Class. + + The class cv::NAryMatIterator is used for iterating over one or more n-dimensional dense arrays (cv::Mat's). + + The iterator is completely different from cv::Mat_ and cv::SparseMat_ iterators. + It iterates through the slices (or planes), not the elements, where "slice" is a continuous part of the arrays. + + Here is the example on how the iterator can be used to normalize 3D histogram: + + \code + void normalizeColorHist(Mat& hist) + { + #if 1 + // intialize iterator (the style is different from STL). + // after initialization the iterator will contain + // the number of slices or planes + // the iterator will go through + Mat* arrays[] = { &hist, 0 }; + Mat planes[1]; + NAryMatIterator it(arrays, planes); + double s = 0; + // iterate through the matrix. on each iteration + // it.planes[i] (of type Mat) will be set to the current plane of + // i-th n-dim matrix passed to the iterator constructor. + for(int p = 0; p < it.nplanes; p++, ++it) + s += sum(it.planes[0])[0]; + it = NAryMatIterator(hist); + s = 1./s; + for(int p = 0; p < it.nplanes; p++, ++it) + it.planes[0] *= s; + #elif 1 + // this is a shorter implementation of the above + // using built-in operations on Mat + double s = sum(hist)[0]; + hist.convertTo(hist, hist.type(), 1./s, 0); + #else + // and this is even shorter one + // (assuming that the histogram elements are non-negative) + normalize(hist, hist, 1, 0, NORM_L1); + #endif + } + \endcode + + You can iterate through several matrices simultaneously as long as they have the same geometry + (dimensionality and all the dimension sizes are the same), which is useful for binary + and n-ary operations on such matrices. Just pass those matrices to cv::MatNDIterator. + Then, during the iteration it.planes[0], it.planes[1], ... will + be the slices of the corresponding matrices +*/ +class CV_EXPORTS NAryMatIterator +{ +public: + //! the default constructor + NAryMatIterator(); + //! the full constructor taking arbitrary number of n-dim matrices + NAryMatIterator(const Mat** arrays, uchar** ptrs, int narrays=-1); + //! the full constructor taking arbitrary number of n-dim matrices + NAryMatIterator(const Mat** arrays, Mat* planes, int narrays=-1); + //! the separate iterator initialization method + void init(const Mat** arrays, Mat* planes, uchar** ptrs, int narrays=-1); + + //! proceeds to the next plane of every iterated matrix + NAryMatIterator& operator ++(); + //! proceeds to the next plane of every iterated matrix (postfix increment operator) + NAryMatIterator operator ++(int); + + //! the iterated arrays + const Mat** arrays; + //! the current planes + Mat* planes; + //! data pointers + uchar** ptrs; + //! the number of arrays + int narrays; + //! the number of hyper-planes that the iterator steps through + size_t nplanes; + //! the size of each segment (in elements) + size_t size; +protected: + int iterdepth; + size_t idx; +}; + +//typedef NAryMatIterator NAryMatNDIterator; + +typedef void (*ConvertData)(const void* from, void* to, int cn); +typedef void (*ConvertScaleData)(const void* from, void* to, int cn, double alpha, double beta); + +//! returns the function for converting pixels from one data type to another +CV_EXPORTS ConvertData getConvertElem(int fromType, int toType); +//! returns the function for converting pixels from one data type to another with the optional scaling +CV_EXPORTS ConvertScaleData getConvertScaleElem(int fromType, int toType); + + +/////////////////////////// multi-dimensional sparse matrix ////////////////////////// + +class SparseMatIterator; +class SparseMatConstIterator; +template class SparseMatIterator_; +template class SparseMatConstIterator_; + +/*! + Sparse matrix class. + + The class represents multi-dimensional sparse numerical arrays. Such a sparse array can store elements + of any type that cv::Mat is able to store. "Sparse" means that only non-zero elements + are stored (though, as a result of some operations on a sparse matrix, some of its stored elements + can actually become 0. It's user responsibility to detect such elements and delete them using cv::SparseMat::erase(). + The non-zero elements are stored in a hash table that grows when it's filled enough, + so that the search time remains O(1) in average. Elements can be accessed using the following methods: + +
    +
  1. Query operations: cv::SparseMat::ptr() and the higher-level cv::SparseMat::ref(), + cv::SparseMat::value() and cv::SparseMat::find, for example: + \code + const int dims = 5; + int size[] = {10, 10, 10, 10, 10}; + SparseMat sparse_mat(dims, size, CV_32F); + for(int i = 0; i < 1000; i++) + { + int idx[dims]; + for(int k = 0; k < dims; k++) + idx[k] = rand()%sparse_mat.size(k); + sparse_mat.ref(idx) += 1.f; + } + \endcode + +
  2. Sparse matrix iterators. Like cv::Mat iterators and unlike cv::Mat iterators, the sparse matrix iterators are STL-style, + that is, the iteration is done as following: + \code + // prints elements of a sparse floating-point matrix and the sum of elements. + SparseMatConstIterator_ + it = sparse_mat.begin(), + it_end = sparse_mat.end(); + double s = 0; + int dims = sparse_mat.dims(); + for(; it != it_end; ++it) + { + // print element indices and the element value + const Node* n = it.node(); + printf("(") + for(int i = 0; i < dims; i++) + printf("%3d%c", n->idx[i], i < dims-1 ? ',' : ')'); + printf(": %f\n", *it); + s += *it; + } + printf("Element sum is %g\n", s); + \endcode + If you run this loop, you will notice that elements are enumerated + in no any logical order (lexicographical etc.), + they come in the same order as they stored in the hash table, i.e. semi-randomly. + + You may collect pointers to the nodes and sort them to get the proper ordering. + Note, however, that pointers to the nodes may become invalid when you add more + elements to the matrix; this is because of possible buffer reallocation. + +
  3. A combination of the above 2 methods when you need to process 2 or more sparse + matrices simultaneously, e.g. this is how you can compute unnormalized + cross-correlation of the 2 floating-point sparse matrices: + \code + double crossCorr(const SparseMat& a, const SparseMat& b) + { + const SparseMat *_a = &a, *_b = &b; + // if b contains less elements than a, + // it's faster to iterate through b + if(_a->nzcount() > _b->nzcount()) + std::swap(_a, _b); + SparseMatConstIterator_ it = _a->begin(), + it_end = _a->end(); + double ccorr = 0; + for(; it != it_end; ++it) + { + // take the next element from the first matrix + float avalue = *it; + const Node* anode = it.node(); + // and try to find element with the same index in the second matrix. + // since the hash value depends only on the element index, + // we reuse hashvalue stored in the node + float bvalue = _b->value(anode->idx,&anode->hashval); + ccorr += avalue*bvalue; + } + return ccorr; + } + \endcode +
+*/ +class CV_EXPORTS SparseMat +{ +public: + typedef SparseMatIterator iterator; + typedef SparseMatConstIterator const_iterator; + + //! the sparse matrix header + struct CV_EXPORTS Hdr + { + Hdr(int _dims, const int* _sizes, int _type); + void clear(); + int refcount; + int dims; + int valueOffset; + size_t nodeSize; + size_t nodeCount; + size_t freeList; + vector pool; + vector hashtab; + int size[CV_MAX_DIM]; + }; + + //! sparse matrix node - element of a hash table + struct CV_EXPORTS Node + { + //! hash value + size_t hashval; + //! index of the next node in the same hash table entry + size_t next; + //! index of the matrix element + int idx[CV_MAX_DIM]; + }; + + //! default constructor + SparseMat(); + //! creates matrix of the specified size and type + SparseMat(int dims, const int* _sizes, int _type); + //! copy constructor + SparseMat(const SparseMat& m); + //! converts dense 2d matrix to the sparse form + /*! + \param m the input matrix + \param try1d if true and m is a single-column matrix (Nx1), + then the sparse matrix will be 1-dimensional. + */ + explicit SparseMat(const Mat& m); + //! converts old-style sparse matrix to the new-style. All the data is copied + SparseMat(const CvSparseMat* m); + //! the destructor + ~SparseMat(); + + //! assignment operator. This is O(1) operation, i.e. no data is copied + SparseMat& operator = (const SparseMat& m); + //! equivalent to the corresponding constructor + SparseMat& operator = (const Mat& m); + + //! creates full copy of the matrix + SparseMat clone() const; + + //! copies all the data to the destination matrix. All the previous content of m is erased + void copyTo( SparseMat& m ) const; + //! converts sparse matrix to dense matrix. + void copyTo( Mat& m ) const; + //! multiplies all the matrix elements by the specified scale factor alpha and converts the results to the specified data type + void convertTo( SparseMat& m, int rtype, double alpha=1 ) const; + //! converts sparse matrix to dense n-dim matrix with optional type conversion and scaling. + /*! + \param rtype The output matrix data type. When it is =-1, the output array will have the same data type as (*this) + \param alpha The scale factor + \param beta The optional delta added to the scaled values before the conversion + */ + void convertTo( Mat& m, int rtype, double alpha=1, double beta=0 ) const; + + // not used now + void assignTo( SparseMat& m, int type=-1 ) const; + + //! reallocates sparse matrix. + /*! + If the matrix already had the proper size and type, + it is simply cleared with clear(), otherwise, + the old matrix is released (using release()) and the new one is allocated. + */ + void create(int dims, const int* _sizes, int _type); + //! sets all the sparse matrix elements to 0, which means clearing the hash table. + void clear(); + //! manually increments the reference counter to the header. + void addref(); + // decrements the header reference counter. When the counter reaches 0, the header and all the underlying data are deallocated. + void release(); + + //! converts sparse matrix to the old-style representation; all the elements are copied. + operator CvSparseMat*() const; + //! returns the size of each element in bytes (not including the overhead - the space occupied by SparseMat::Node elements) + size_t elemSize() const; + //! returns elemSize()/channels() + size_t elemSize1() const; + + //! returns type of sparse matrix elements + int type() const; + //! returns the depth of sparse matrix elements + int depth() const; + //! returns the number of channels + int channels() const; + + //! returns the array of sizes, or NULL if the matrix is not allocated + const int* size() const; + //! returns the size of i-th matrix dimension (or 0) + int size(int i) const; + //! returns the matrix dimensionality + int dims() const; + //! returns the number of non-zero elements (=the number of hash table nodes) + size_t nzcount() const; + + //! computes the element hash value (1D case) + size_t hash(int i0) const; + //! computes the element hash value (2D case) + size_t hash(int i0, int i1) const; + //! computes the element hash value (3D case) + size_t hash(int i0, int i1, int i2) const; + //! computes the element hash value (nD case) + size_t hash(const int* idx) const; + + //@{ + /*! + specialized variants for 1D, 2D, 3D cases and the generic_type one for n-D case. + + return pointer to the matrix element. +
    +
  • if the element is there (it's non-zero), the pointer to it is returned +
  • if it's not there and createMissing=false, NULL pointer is returned +
  • if it's not there and createMissing=true, then the new element + is created and initialized with 0. Pointer to it is returned +
  • if the optional hashval pointer is not NULL, the element hash value is + not computed, but *hashval is taken instead. +
+ */ + //! returns pointer to the specified element (1D case) + uchar* ptr(int i0, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (2D case) + uchar* ptr(int i0, int i1, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (3D case) + uchar* ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval=0); + //! returns pointer to the specified element (nD case) + uchar* ptr(const int* idx, bool createMissing, size_t* hashval=0); + //@} + + //@{ + /*! + return read-write reference to the specified sparse matrix element. + + ref<_Tp>(i0,...[,hashval]) is equivalent to *(_Tp*)ptr(i0,...,true[,hashval]). + The methods always return a valid reference. + If the element did not exist, it is created and initialiazed with 0. + */ + //! returns reference to the specified element (1D case) + template _Tp& ref(int i0, size_t* hashval=0); + //! returns reference to the specified element (2D case) + template _Tp& ref(int i0, int i1, size_t* hashval=0); + //! returns reference to the specified element (3D case) + template _Tp& ref(int i0, int i1, int i2, size_t* hashval=0); + //! returns reference to the specified element (nD case) + template _Tp& ref(const int* idx, size_t* hashval=0); + //@} + + //@{ + /*! + return value of the specified sparse matrix element. + + value<_Tp>(i0,...[,hashval]) is equivalent + + \code + { const _Tp* p = find<_Tp>(i0,...[,hashval]); return p ? *p : _Tp(); } + \endcode + + That is, if the element did not exist, the methods return 0. + */ + //! returns value of the specified element (1D case) + template _Tp value(int i0, size_t* hashval=0) const; + //! returns value of the specified element (2D case) + template _Tp value(int i0, int i1, size_t* hashval=0) const; + //! returns value of the specified element (3D case) + template _Tp value(int i0, int i1, int i2, size_t* hashval=0) const; + //! returns value of the specified element (nD case) + template _Tp value(const int* idx, size_t* hashval=0) const; + //@} + + //@{ + /*! + Return pointer to the specified sparse matrix element if it exists + + find<_Tp>(i0,...[,hashval]) is equivalent to (_const Tp*)ptr(i0,...false[,hashval]). + + If the specified element does not exist, the methods return NULL. + */ + //! returns pointer to the specified element (1D case) + template const _Tp* find(int i0, size_t* hashval=0) const; + //! returns pointer to the specified element (2D case) + template const _Tp* find(int i0, int i1, size_t* hashval=0) const; + //! returns pointer to the specified element (3D case) + template const _Tp* find(int i0, int i1, int i2, size_t* hashval=0) const; + //! returns pointer to the specified element (nD case) + template const _Tp* find(const int* idx, size_t* hashval=0) const; + + //! erases the specified element (2D case) + void erase(int i0, int i1, size_t* hashval=0); + //! erases the specified element (3D case) + void erase(int i0, int i1, int i2, size_t* hashval=0); + //! erases the specified element (nD case) + void erase(const int* idx, size_t* hashval=0); + + //@{ + /*! + return the sparse matrix iterator pointing to the first sparse matrix element + */ + //! returns the sparse matrix iterator at the matrix beginning + SparseMatIterator begin(); + //! returns the sparse matrix iterator at the matrix beginning + template SparseMatIterator_<_Tp> begin(); + //! returns the read-only sparse matrix iterator at the matrix beginning + SparseMatConstIterator begin() const; + //! returns the read-only sparse matrix iterator at the matrix beginning + template SparseMatConstIterator_<_Tp> begin() const; + //@} + /*! + return the sparse matrix iterator pointing to the element following the last sparse matrix element + */ + //! returns the sparse matrix iterator at the matrix end + SparseMatIterator end(); + //! returns the read-only sparse matrix iterator at the matrix end + SparseMatConstIterator end() const; + //! returns the typed sparse matrix iterator at the matrix end + template SparseMatIterator_<_Tp> end(); + //! returns the typed read-only sparse matrix iterator at the matrix end + template SparseMatConstIterator_<_Tp> end() const; + + //! returns the value stored in the sparse martix node + template _Tp& value(Node* n); + //! returns the value stored in the sparse martix node + template const _Tp& value(const Node* n) const; + + ////////////// some internal-use methods /////////////// + Node* node(size_t nidx); + const Node* node(size_t nidx) const; + + uchar* newNode(const int* idx, size_t hashval); + void removeNode(size_t hidx, size_t nidx, size_t previdx); + void resizeHashTab(size_t newsize); + + enum { MAGIC_VAL=0x42FD0000, MAX_DIM=CV_MAX_DIM, HASH_SCALE=0x5bd1e995, HASH_BIT=0x80000000 }; + + int flags; + Hdr* hdr; +}; + +//! finds global minimum and maximum sparse array elements and returns their values and their locations +CV_EXPORTS void minMaxLoc(const SparseMat& a, double* minVal, + double* maxVal, int* minIdx=0, int* maxIdx=0); +//! computes norm of a sparse matrix +CV_EXPORTS double norm( const SparseMat& src, int normType ); +//! scales and shifts array elements so that either the specified norm (alpha) or the minimum (alpha) and maximum (beta) array values get the specified values +CV_EXPORTS void normalize( const SparseMat& src, SparseMat& dst, double alpha, int normType ); + +/*! + Read-Only Sparse Matrix Iterator. + Here is how to use the iterator to compute the sum of floating-point sparse matrix elements: + + \code + SparseMatConstIterator it = m.begin(), it_end = m.end(); + double s = 0; + CV_Assert( m.type() == CV_32F ); + for( ; it != it_end; ++it ) + s += it.value(); + \endcode +*/ +class CV_EXPORTS SparseMatConstIterator +{ +public: + //! the default constructor + SparseMatConstIterator(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatConstIterator(const SparseMat* _m); + //! the copy constructor + SparseMatConstIterator(const SparseMatConstIterator& it); + + //! the assignment operator + SparseMatConstIterator& operator = (const SparseMatConstIterator& it); + + //! template method returning the current matrix element + template const _Tp& value() const; + //! returns the current node of the sparse matrix. it.node->idx is the current element index + const SparseMat::Node* node() const; + + //! moves iterator to the previous element + SparseMatConstIterator& operator --(); + //! moves iterator to the previous element + SparseMatConstIterator operator --(int); + //! moves iterator to the next element + SparseMatConstIterator& operator ++(); + //! moves iterator to the next element + SparseMatConstIterator operator ++(int); + + //! moves iterator to the element after the last element + void seekEnd(); + + const SparseMat* m; + size_t hashidx; + uchar* ptr; +}; + +/*! + Read-write Sparse Matrix Iterator + + The class is similar to cv::SparseMatConstIterator, + but can be used for in-place modification of the matrix elements. +*/ +class CV_EXPORTS SparseMatIterator : public SparseMatConstIterator +{ +public: + //! the default constructor + SparseMatIterator(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatIterator(SparseMat* _m); + //! the full constructor setting the iterator to the specified sparse matrix element + SparseMatIterator(SparseMat* _m, const int* idx); + //! the copy constructor + SparseMatIterator(const SparseMatIterator& it); + + //! the assignment operator + SparseMatIterator& operator = (const SparseMatIterator& it); + //! returns read-write reference to the current sparse matrix element + template _Tp& value() const; + //! returns pointer to the current sparse matrix node. it.node->idx is the index of the current element (do not modify it!) + SparseMat::Node* node() const; + + //! moves iterator to the next element + SparseMatIterator& operator ++(); + //! moves iterator to the next element + SparseMatIterator operator ++(int); +}; + +/*! + The Template Sparse Matrix class derived from cv::SparseMat + + The class provides slightly more convenient operations for accessing elements. + + \code + SparseMat m; + ... + SparseMat_ m_ = (SparseMat_&)m; + m_.ref(1)++; // equivalent to m.ref(1)++; + m_.ref(2) += m_(3); // equivalent to m.ref(2) += m.value(3); + \endcode +*/ +template class CV_EXPORTS SparseMat_ : public SparseMat +{ +public: + typedef SparseMatIterator_<_Tp> iterator; + typedef SparseMatConstIterator_<_Tp> const_iterator; + + //! the default constructor + SparseMat_(); + //! the full constructor equivelent to SparseMat(dims, _sizes, DataType<_Tp>::type) + SparseMat_(int dims, const int* _sizes); + //! the copy constructor. If DataType<_Tp>.type != m.type(), the m elements are converted + SparseMat_(const SparseMat& m); + //! the copy constructor. This is O(1) operation - no data is copied + SparseMat_(const SparseMat_& m); + //! converts dense matrix to the sparse form + SparseMat_(const Mat& m); + //! converts the old-style sparse matrix to the C++ class. All the elements are copied + SparseMat_(const CvSparseMat* m); + //! the assignment operator. If DataType<_Tp>.type != m.type(), the m elements are converted + SparseMat_& operator = (const SparseMat& m); + //! the assignment operator. This is O(1) operation - no data is copied + SparseMat_& operator = (const SparseMat_& m); + //! converts dense matrix to the sparse form + SparseMat_& operator = (const Mat& m); + + //! makes full copy of the matrix. All the elements are duplicated + SparseMat_ clone() const; + //! equivalent to cv::SparseMat::create(dims, _sizes, DataType<_Tp>::type) + void create(int dims, const int* _sizes); + //! converts sparse matrix to the old-style CvSparseMat. All the elements are copied + operator CvSparseMat*() const; + + //! returns type of the matrix elements + int type() const; + //! returns depth of the matrix elements + int depth() const; + //! returns the number of channels in each matrix element + int channels() const; + + //! equivalent to SparseMat::ref<_Tp>(i0, hashval) + _Tp& ref(int i0, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(i0, i1, hashval) + _Tp& ref(int i0, int i1, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(i0, i1, i2, hashval) + _Tp& ref(int i0, int i1, int i2, size_t* hashval=0); + //! equivalent to SparseMat::ref<_Tp>(idx, hashval) + _Tp& ref(const int* idx, size_t* hashval=0); + + //! equivalent to SparseMat::value<_Tp>(i0, hashval) + _Tp operator()(int i0, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(i0, i1, hashval) + _Tp operator()(int i0, int i1, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(i0, i1, i2, hashval) + _Tp operator()(int i0, int i1, int i2, size_t* hashval=0) const; + //! equivalent to SparseMat::value<_Tp>(idx, hashval) + _Tp operator()(const int* idx, size_t* hashval=0) const; + + //! returns sparse matrix iterator pointing to the first sparse matrix element + SparseMatIterator_<_Tp> begin(); + //! returns read-only sparse matrix iterator pointing to the first sparse matrix element + SparseMatConstIterator_<_Tp> begin() const; + //! returns sparse matrix iterator pointing to the element following the last sparse matrix element + SparseMatIterator_<_Tp> end(); + //! returns read-only sparse matrix iterator pointing to the element following the last sparse matrix element + SparseMatConstIterator_<_Tp> end() const; +}; + + +/*! + Template Read-Only Sparse Matrix Iterator Class. + + This is the derived from SparseMatConstIterator class that + introduces more convenient operator *() for accessing the current element. +*/ +template class CV_EXPORTS SparseMatConstIterator_ : public SparseMatConstIterator +{ +public: + typedef std::forward_iterator_tag iterator_category; + + //! the default constructor + SparseMatConstIterator_(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatConstIterator_(const SparseMat_<_Tp>* _m); + //! the copy constructor + SparseMatConstIterator_(const SparseMatConstIterator_& it); + + //! the assignment operator + SparseMatConstIterator_& operator = (const SparseMatConstIterator_& it); + //! the element access operator + const _Tp& operator *() const; + + //! moves iterator to the next element + SparseMatConstIterator_& operator ++(); + //! moves iterator to the next element + SparseMatConstIterator_ operator ++(int); +}; + +/*! + Template Read-Write Sparse Matrix Iterator Class. + + This is the derived from cv::SparseMatConstIterator_ class that + introduces more convenient operator *() for accessing the current element. +*/ +template class CV_EXPORTS SparseMatIterator_ : public SparseMatConstIterator_<_Tp> +{ +public: + typedef std::forward_iterator_tag iterator_category; + + //! the default constructor + SparseMatIterator_(); + //! the full constructor setting the iterator to the first sparse matrix element + SparseMatIterator_(SparseMat_<_Tp>* _m); + //! the copy constructor + SparseMatIterator_(const SparseMatIterator_& it); + + //! the assignment operator + SparseMatIterator_& operator = (const SparseMatIterator_& it); + //! returns the reference to the current element + _Tp& operator *() const; + + //! moves the iterator to the next element + SparseMatIterator_& operator ++(); + //! moves the iterator to the next element + SparseMatIterator_ operator ++(int); +}; + +//////////////////// Fast Nearest-Neighbor Search Structure //////////////////// + +/*! + Fast Nearest Neighbor Search Class. + + The class implements D. Lowe BBF (Best-Bin-First) algorithm for the last + approximate (or accurate) nearest neighbor search in multi-dimensional spaces. + + First, a set of vectors is passed to KDTree::KDTree() constructor + or KDTree::build() method, where it is reordered. + + Then arbitrary vectors can be passed to KDTree::findNearest() methods, which + find the K nearest neighbors among the vectors from the initial set. + The user can balance between the speed and accuracy of the search by varying Emax + parameter, which is the number of leaves that the algorithm checks. + The larger parameter values yield more accurate results at the expense of lower processing speed. + + \code + KDTree T(points, false); + const int K = 3, Emax = INT_MAX; + int idx[K]; + float dist[K]; + T.findNearest(query_vec, K, Emax, idx, 0, dist); + CV_Assert(dist[0] <= dist[1] && dist[1] <= dist[2]); + \endcode +*/ +class CV_EXPORTS_W KDTree +{ +public: + /*! + The node of the search tree. + */ + struct Node + { + Node() : idx(-1), left(-1), right(-1), boundary(0.f) {} + Node(int _idx, int _left, int _right, float _boundary) + : idx(_idx), left(_left), right(_right), boundary(_boundary) {} + //! split dimension; >=0 for nodes (dim), < 0 for leaves (index of the point) + int idx; + //! node indices of the left and the right branches + int left, right; + //! go to the left if query_vec[node.idx]<=node.boundary, otherwise go to the right + float boundary; + }; + + //! the default constructor + CV_WRAP KDTree(); + //! the full constructor that builds the search tree + CV_WRAP KDTree(InputArray points, bool copyAndReorderPoints=false); + //! the full constructor that builds the search tree + CV_WRAP KDTree(InputArray points, InputArray _labels, + bool copyAndReorderPoints=false); + //! builds the search tree + CV_WRAP void build(InputArray points, bool copyAndReorderPoints=false); + //! builds the search tree + CV_WRAP void build(InputArray points, InputArray labels, + bool copyAndReorderPoints=false); + //! finds the K nearest neighbors of "vec" while looking at Emax (at most) leaves + CV_WRAP int findNearest(InputArray vec, int K, int Emax, + OutputArray neighborsIdx, + OutputArray neighbors=noArray(), + OutputArray dist=noArray(), + OutputArray labels=noArray()) const; + //! finds all the points from the initial set that belong to the specified box + CV_WRAP void findOrthoRange(InputArray minBounds, + InputArray maxBounds, + OutputArray neighborsIdx, + OutputArray neighbors=noArray(), + OutputArray labels=noArray()) const; + //! returns vectors with the specified indices + CV_WRAP void getPoints(InputArray idx, OutputArray pts, + OutputArray labels=noArray()) const; + //! return a vector with the specified index + const float* getPoint(int ptidx, int* label=0) const; + //! returns the search space dimensionality + CV_WRAP int dims() const; + + vector nodes; //!< all the tree nodes + CV_PROP Mat points; //!< all the points. It can be a reordered copy of the input vector set or the original vector set. + CV_PROP vector labels; //!< the parallel array of labels. + CV_PROP int maxDepth; //!< maximum depth of the search tree. Do not modify it + CV_PROP_RW int normType; //!< type of the distance (cv::NORM_L1 or cv::NORM_L2) used for search. Initially set to cv::NORM_L2, but you can modify it +}; + +//////////////////////////////////////// XML & YAML I/O //////////////////////////////////// + +class CV_EXPORTS FileNode; + +/*! + XML/YAML File Storage Class. + + The class describes an object associated with XML or YAML file. + It can be used to store data to such a file or read and decode the data. + + The storage is organized as a tree of nested sequences (or lists) and mappings. + Sequence is a heterogenious array, which elements are accessed by indices or sequentially using an iterator. + Mapping is analogue of std::map or C structure, which elements are accessed by names. + The most top level structure is a mapping. + Leaves of the file storage tree are integers, floating-point numbers and text strings. + + For example, the following code: + + \code + // open file storage for writing. Type of the file is determined from the extension + FileStorage fs("test.yml", FileStorage::WRITE); + fs << "test_int" << 5 << "test_real" << 3.1 << "test_string" << "ABCDEFGH"; + fs << "test_mat" << Mat::eye(3,3,CV_32F); + + fs << "test_list" << "[" << 0.0000000000001 << 2 << CV_PI << -3435345 << "2-502 2-029 3egegeg" << + "{:" << "month" << 12 << "day" << 31 << "year" << 1969 << "}" << "]"; + fs << "test_map" << "{" << "x" << 1 << "y" << 2 << "width" << 100 << "height" << 200 << "lbp" << "[:"; + + const uchar arr[] = {0, 1, 1, 0, 1, 1, 0, 1}; + fs.writeRaw("u", arr, (int)(sizeof(arr)/sizeof(arr[0]))); + + fs << "]" << "}"; + \endcode + + will produce the following file: + + \verbatim + %YAML:1.0 + test_int: 5 + test_real: 3.1000000000000001e+00 + test_string: ABCDEFGH + test_mat: !!opencv-matrix + rows: 3 + cols: 3 + dt: f + data: [ 1., 0., 0., 0., 1., 0., 0., 0., 1. ] + test_list: + - 1.0000000000000000e-13 + - 2 + - 3.1415926535897931e+00 + - -3435345 + - "2-502 2-029 3egegeg" + - { month:12, day:31, year:1969 } + test_map: + x: 1 + y: 2 + width: 100 + height: 200 + lbp: [ 0, 1, 1, 0, 1, 1, 0, 1 ] + \endverbatim + + and to read the file above, the following code can be used: + + \code + // open file storage for reading. + // Type of the file is determined from the content, not the extension + FileStorage fs("test.yml", FileStorage::READ); + int test_int = (int)fs["test_int"]; + double test_real = (double)fs["test_real"]; + string test_string = (string)fs["test_string"]; + + Mat M; + fs["test_mat"] >> M; + + FileNode tl = fs["test_list"]; + CV_Assert(tl.type() == FileNode::SEQ && tl.size() == 6); + double tl0 = (double)tl[0]; + int tl1 = (int)tl[1]; + double tl2 = (double)tl[2]; + int tl3 = (int)tl[3]; + string tl4 = (string)tl[4]; + CV_Assert(tl[5].type() == FileNode::MAP && tl[5].size() == 3); + + int month = (int)tl[5]["month"]; + int day = (int)tl[5]["day"]; + int year = (int)tl[5]["year"]; + + FileNode tm = fs["test_map"]; + + int x = (int)tm["x"]; + int y = (int)tm["y"]; + int width = (int)tm["width"]; + int height = (int)tm["height"]; + + int lbp_val = 0; + FileNodeIterator it = tm["lbp"].begin(); + + for(int k = 0; k < 8; k++, ++it) + lbp_val |= ((int)*it) << k; + \endcode +*/ +class CV_EXPORTS_W FileStorage +{ +public: + //! file storage mode + enum + { + READ=0, //! read mode + WRITE=1, //! write mode + APPEND=2 //! append mode + }; + enum + { + UNDEFINED=0, + VALUE_EXPECTED=1, + NAME_EXPECTED=2, + INSIDE_MAP=4 + }; + //! the default constructor + CV_WRAP FileStorage(); + //! the full constructor that opens file storage for reading or writing + CV_WRAP FileStorage(const string& filename, int flags, const string& encoding=string()); + //! the constructor that takes pointer to the C FileStorage structure + FileStorage(CvFileStorage* fs); + //! the destructor. calls release() + virtual ~FileStorage(); + + //! opens file storage for reading or writing. The previous storage is closed with release() + CV_WRAP virtual bool open(const string& filename, int flags, const string& encoding=string()); + //! returns true if the object is associated with currently opened file. + CV_WRAP virtual bool isOpened() const; + //! closes the file and releases all the memory buffers + CV_WRAP virtual void release(); + + //! returns the first element of the top-level mapping + CV_WRAP FileNode getFirstTopLevelNode() const; + //! returns the top-level mapping. YAML supports multiple streams + CV_WRAP FileNode root(int streamidx=0) const; + //! returns the specified element of the top-level mapping + FileNode operator[](const string& nodename) const; + //! returns the specified element of the top-level mapping + CV_WRAP FileNode operator[](const char* nodename) const; + + //! returns pointer to the underlying C FileStorage structure + CvFileStorage* operator *() { return fs; } + //! returns pointer to the underlying C FileStorage structure + const CvFileStorage* operator *() const { return fs; } + //! writes one or more numbers of the specified format to the currently written structure + void writeRaw( const string& fmt, const uchar* vec, size_t len ); + //! writes the registered C structure (CvMat, CvMatND, CvSeq). See cvWrite() + void writeObj( const string& name, const void* obj ); + + //! returns the normalized object name for the specified file name + static string getDefaultObjectName(const string& filename); + + Ptr fs; //!< the underlying C FileStorage structure + string elname; //!< the currently written element + vector structs; //!< the stack of written structures + int state; //!< the writer state +}; + +class CV_EXPORTS FileNodeIterator; + +/*! + File Storage Node class + + The node is used to store each and every element of the file storage opened for reading - + from the primitive objects, such as numbers and text strings, to the complex nodes: + sequences, mappings and the registered objects. + + Note that file nodes are only used for navigating file storages opened for reading. + When a file storage is opened for writing, no data is stored in memory after it is written. +*/ +class CV_EXPORTS_W_SIMPLE FileNode +{ +public: + //! type of the file storage node + enum + { + NONE=0, //!< empty node + INT=1, //!< an integer + REAL=2, //!< floating-point number + FLOAT=REAL, //!< synonym or REAL + STR=3, //!< text string in UTF-8 encoding + STRING=STR, //!< synonym for STR + REF=4, //!< integer of size size_t. Typically used for storing complex dynamic structures where some elements reference the others + SEQ=5, //!< sequence + MAP=6, //!< mapping + TYPE_MASK=7, + FLOW=8, //!< compact representation of a sequence or mapping. Used only by YAML writer + USER=16, //!< a registered object (e.g. a matrix) + EMPTY=32, //!< empty structure (sequence or mapping) + NAMED=64 //!< the node has a name (i.e. it is element of a mapping) + }; + //! the default constructor + CV_WRAP FileNode(); + //! the full constructor wrapping CvFileNode structure. + FileNode(const CvFileStorage* fs, const CvFileNode* node); + //! the copy constructor + FileNode(const FileNode& node); + //! returns element of a mapping node + FileNode operator[](const string& nodename) const; + //! returns element of a mapping node + CV_WRAP FileNode operator[](const char* nodename) const; + //! returns element of a sequence node + CV_WRAP FileNode operator[](int i) const; + //! returns type of the node + CV_WRAP int type() const; + + //! returns true if the node is empty + CV_WRAP bool empty() const; + //! returns true if the node is a "none" object + CV_WRAP bool isNone() const; + //! returns true if the node is a sequence + CV_WRAP bool isSeq() const; + //! returns true if the node is a mapping + CV_WRAP bool isMap() const; + //! returns true if the node is an integer + CV_WRAP bool isInt() const; + //! returns true if the node is a floating-point number + CV_WRAP bool isReal() const; + //! returns true if the node is a text string + CV_WRAP bool isString() const; + //! returns true if the node has a name + CV_WRAP bool isNamed() const; + //! returns the node name or an empty string if the node is nameless + CV_WRAP string name() const; + //! returns the number of elements in the node, if it is a sequence or mapping, or 1 otherwise. + CV_WRAP size_t size() const; + //! returns the node content as an integer. If the node stores floating-point number, it is rounded. + operator int() const; + //! returns the node content as float + operator float() const; + //! returns the node content as double + operator double() const; + //! returns the node content as text string + operator string() const; + + //! returns pointer to the underlying file node + CvFileNode* operator *(); + //! returns pointer to the underlying file node + const CvFileNode* operator* () const; + + //! returns iterator pointing to the first node element + FileNodeIterator begin() const; + //! returns iterator pointing to the element following the last node element + FileNodeIterator end() const; + + //! reads node elements to the buffer with the specified format + void readRaw( const string& fmt, uchar* vec, size_t len ) const; + //! reads the registered object and returns pointer to it + void* readObj() const; + + // do not use wrapper pointer classes for better efficiency + const CvFileStorage* fs; + const CvFileNode* node; +}; + + +/*! + File Node Iterator + + The class is used for iterating sequences (usually) and mappings. + */ +class CV_EXPORTS FileNodeIterator +{ +public: + //! the default constructor + FileNodeIterator(); + //! the full constructor set to the ofs-th element of the node + FileNodeIterator(const CvFileStorage* fs, const CvFileNode* node, size_t ofs=0); + //! the copy constructor + FileNodeIterator(const FileNodeIterator& it); + //! returns the currently observed element + FileNode operator *() const; + //! accesses the currently observed element methods + FileNode operator ->() const; + + //! moves iterator to the next node + FileNodeIterator& operator ++ (); + //! moves iterator to the next node + FileNodeIterator operator ++ (int); + //! moves iterator to the previous node + FileNodeIterator& operator -- (); + //! moves iterator to the previous node + FileNodeIterator operator -- (int); + //! moves iterator forward by the specified offset (possibly negative) + FileNodeIterator& operator += (int); + //! moves iterator backward by the specified offset (possibly negative) + FileNodeIterator& operator -= (int); + + //! reads the next maxCount elements (or less, if the sequence/mapping last element occurs earlier) to the buffer with the specified format + FileNodeIterator& readRaw( const string& fmt, uchar* vec, + size_t maxCount=(size_t)INT_MAX ); + + const CvFileStorage* fs; + const CvFileNode* container; + CvSeqReader reader; + size_t remaining; +}; + +////////////// convenient wrappers for operating old-style dynamic structures ////////////// + +template class SeqIterator; + +typedef Ptr MemStorage; + +/*! + Template Sequence Class derived from CvSeq + + The class provides more convenient access to sequence elements, + STL-style operations and iterators. + + \note The class is targeted for simple data types, + i.e. no constructors or destructors + are called for the sequence elements. +*/ +template class CV_EXPORTS Seq +{ +public: + typedef SeqIterator<_Tp> iterator; + typedef SeqIterator<_Tp> const_iterator; + + //! the default constructor + Seq(); + //! the constructor for wrapping CvSeq structure. The real element type in CvSeq should match _Tp. + Seq(const CvSeq* seq); + //! creates the empty sequence that resides in the specified storage + Seq(MemStorage& storage, int headerSize = sizeof(CvSeq)); + //! returns read-write reference to the specified element + _Tp& operator [](int idx); + //! returns read-only reference to the specified element + const _Tp& operator[](int idx) const; + //! returns iterator pointing to the beginning of the sequence + SeqIterator<_Tp> begin() const; + //! returns iterator pointing to the element following the last sequence element + SeqIterator<_Tp> end() const; + //! returns the number of elements in the sequence + size_t size() const; + //! returns the type of sequence elements (CV_8UC1 ... CV_64FC(CV_CN_MAX) ...) + int type() const; + //! returns the depth of sequence elements (CV_8U ... CV_64F) + int depth() const; + //! returns the number of channels in each sequence element + int channels() const; + //! returns the size of each sequence element + size_t elemSize() const; + //! returns index of the specified sequence element + size_t index(const _Tp& elem) const; + //! appends the specified element to the end of the sequence + void push_back(const _Tp& elem); + //! appends the specified element to the front of the sequence + void push_front(const _Tp& elem); + //! appends zero or more elements to the end of the sequence + void push_back(const _Tp* elems, size_t count); + //! appends zero or more elements to the front of the sequence + void push_front(const _Tp* elems, size_t count); + //! inserts the specified element to the specified position + void insert(int idx, const _Tp& elem); + //! inserts zero or more elements to the specified position + void insert(int idx, const _Tp* elems, size_t count); + //! removes element at the specified position + void remove(int idx); + //! removes the specified subsequence + void remove(const Range& r); + + //! returns reference to the first sequence element + _Tp& front(); + //! returns read-only reference to the first sequence element + const _Tp& front() const; + //! returns reference to the last sequence element + _Tp& back(); + //! returns read-only reference to the last sequence element + const _Tp& back() const; + //! returns true iff the sequence contains no elements + bool empty() const; + + //! removes all the elements from the sequence + void clear(); + //! removes the first element from the sequence + void pop_front(); + //! removes the last element from the sequence + void pop_back(); + //! removes zero or more elements from the beginning of the sequence + void pop_front(_Tp* elems, size_t count); + //! removes zero or more elements from the end of the sequence + void pop_back(_Tp* elems, size_t count); + + //! copies the whole sequence or the sequence slice to the specified vector + void copyTo(vector<_Tp>& vec, const Range& range=Range::all()) const; + //! returns the vector containing all the sequence elements + operator vector<_Tp>() const; + + CvSeq* seq; +}; + + +/*! + STL-style Sequence Iterator inherited from the CvSeqReader structure +*/ +template class CV_EXPORTS SeqIterator : public CvSeqReader +{ +public: + //! the default constructor + SeqIterator(); + //! the constructor setting the iterator to the beginning or to the end of the sequence + SeqIterator(const Seq<_Tp>& seq, bool seekEnd=false); + //! positions the iterator within the sequence + void seek(size_t pos); + //! reports the current iterator position + size_t tell() const; + //! returns reference to the current sequence element + _Tp& operator *(); + //! returns read-only reference to the current sequence element + const _Tp& operator *() const; + //! moves iterator to the next sequence element + SeqIterator& operator ++(); + //! moves iterator to the next sequence element + SeqIterator operator ++(int) const; + //! moves iterator to the previous sequence element + SeqIterator& operator --(); + //! moves iterator to the previous sequence element + SeqIterator operator --(int) const; + + //! moves iterator forward by the specified offset (possibly negative) + SeqIterator& operator +=(int); + //! moves iterator backward by the specified offset (possibly negative) + SeqIterator& operator -=(int); + + // this is index of the current element module seq->total*2 + // (to distinguish between 0 and seq->total) + int index; +}; + + +#if 0 +class CV_EXPORTS AlgorithmImpl; + +/*! + Base class for high-level OpenCV algorithms +*/ +class CV_EXPORTS Algorithm +{ +public: + virtual ~Algorithm(); + virtual string name() const; + + template _Tp get(int paramId) const; + template bool set(int paramId, const _Tp& value); + string paramName(int paramId) const; + string paramHelp(int paramId) const; + int paramType(int paramId) const; + int findParam(const string& name) const; + template _Tp paramDefaultValue(int paramId) const; + template bool paramRange(int paramId, _Tp& minVal, _Tp& maxVal) const; + + virtual void getParams(vector& ids) const; + virtual void write(vector& buf) const; + virtual bool read(const vector& buf); + + typedef Algorithm* (*Constructor)(void); + static void add(const string& name, Constructor create); + static void getList(vector& algorithms); + static Ptr create(const string& name); + +protected: + template void addParam(int propId, _Tp& value, bool readOnly, const string& name, + const string& help=string(), const _Tp& defaultValue=_Tp(), + _Tp (Algorithm::*getter)()=0, bool (Algorithm::*setter)(const _Tp&)=0); + template void setParamRange(int propId, const _Tp& minVal, const _Tp& maxVal); + + bool set_(int paramId, int argType, const void* value); + void get_(int paramId, int argType, void* value); + void paramDefaultValue_(int paramId, int argType, void* value); + void paramRange_(int paramId, int argType, void* minval, void* maxval); + void addParam_(int propId, int argType, void* value, bool readOnly, const string& name, + const string& help, const void* defaultValue, void* getter, void* setter); + void setParamRange_(int propId, int argType, const void* minVal, const void* maxVal); + + Ptr impl; +}; +#endif + +/*! +"\nThe CommandLineParser class is designed for command line arguments parsing\n" + "Keys map: \n" + "Before you start to work with CommandLineParser you have to create a map for keys.\n" + " It will look like this\n" + " const char* keys =\n" + " {\n" + " { s| string| 123asd |string parameter}\n" + " { d| digit | 100 |digit parameter }\n" + " { c|noCamera|false |without camera }\n" + " { 1| |some text|help }\n" + " { 2| |333 |another help }\n" + " };\n" + "Usage syntax: \n" + " \"{\" - start of parameter string.\n" + " \"}\" - end of parameter string\n" + " \"|\" - separator between short name, full name, default value and help\n" + "Supported syntax: \n" + " --key1=arg1 \n" + " -key2=arg2 \n" + "Usage: \n" + " Imagine that the input parameters are next:\n" + " -s=string_value --digit=250 --noCamera lena.jpg 10000\n" + " CommandLineParser parser(argc, argv, keys) - create a parser object\n" + " parser.get(\"s\" or \"string\") will return you first parameter value\n" + " parser.get(\"s\", false or \"string\", false) will return you first parameter value\n" + " without spaces in end and begin\n" + " parser.get(\"d\" or \"digit\") will return you second parameter value.\n" + " It also works with 'unsigned int', 'double', and 'float' types>\n" + " parser.get(\"c\" or \"noCamera\") will return you true .\n" + " If you enter this key in commandline>\n" + " It return you false otherwise.\n" + " parser.get(\"1\") will return you the first argument without parameter (lena.jpg) \n" + " parser.get(\"2\") will return you the second argument without parameter (10000)\n" + " It also works with 'unsigned int', 'double', and 'float' types \n" +*/ +class CV_EXPORTS CommandLineParser +{ + public: + + //! the default constructor + CommandLineParser(int argc, const char* argv[], const char* key_map); + + //! get parameter, you can choose: delete spaces in end and begin or not + template + _Tp get(const std::string& name, bool space_delete=true) + { + if (!has(name)) + { + return _Tp(); + } + std::string str = getString(name); + return analizeValue<_Tp>(str, space_delete); + } + + //! print short name, full name, current value and help for all params + void printParams(); + + protected: + std::map > data; + std::string getString(const std::string& name); + + bool has(const std::string& keys); + + template + _Tp analizeValue(const std::string& str, bool space_delete=false); + + template + static _Tp getData(const std::string& str) + { + _Tp res; + std::stringstream s1(str); + s1 >> res; + return res; + } + + template + _Tp fromStringNumber(const std::string& str);//the default conversion function for numbers + + }; + +template<> CV_EXPORTS +bool CommandLineParser::get(const std::string& name, bool space_delete); + +template<> CV_EXPORTS +std::string CommandLineParser::analizeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +int CommandLineParser::analizeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +unsigned CommandLineParser::analizeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +float CommandLineParser::analizeValue(const std::string& str, bool space_delete); + +template<> CV_EXPORTS +double CommandLineParser::analizeValue(const std::string& str, bool space_delete); + +} + +#endif // __cplusplus + +#include "opencv2/core/operations.hpp" +#include "opencv2/core/mat.hpp" + +#endif /*__OPENCV_CORE_HPP__*/ diff --git a/opencv2/core/core_c.h b/opencv2/core/core_c.h new file mode 100644 index 0000000..05d8c72 --- /dev/null +++ b/opencv2/core/core_c.h @@ -0,0 +1,1885 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + + +#ifndef __OPENCV_CORE_C_H__ +#define __OPENCV_CORE_C_H__ + +#include "opencv2/core/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************************\ +* Array allocation, deallocation, initialization and access to elements * +\****************************************************************************************/ + +/* wrapper. + If there is no enough memory, the function + (as well as other OpenCV functions that call cvAlloc) + raises an error. */ +CVAPI(void*) cvAlloc( size_t size ); + +/* wrapper. + Here and further all the memory releasing functions + (that all call cvFree) take double pointer in order to + to clear pointer to the data after releasing it. + Passing pointer to NULL pointer is Ok: nothing happens in this case +*/ +CVAPI(void) cvFree_( void* ptr ); +#define cvFree(ptr) (cvFree_(*(ptr)), *(ptr)=0) + +/* Allocates and initializes IplImage header */ +CVAPI(IplImage*) cvCreateImageHeader( CvSize size, int depth, int channels ); + +/* Inializes IplImage header */ +CVAPI(IplImage*) cvInitImageHeader( IplImage* image, CvSize size, int depth, + int channels, int origin CV_DEFAULT(0), + int align CV_DEFAULT(4)); + +/* Creates IPL image (header and data) */ +CVAPI(IplImage*) cvCreateImage( CvSize size, int depth, int channels ); + +/* Releases (i.e. deallocates) IPL image header */ +CVAPI(void) cvReleaseImageHeader( IplImage** image ); + +/* Releases IPL image header and data */ +CVAPI(void) cvReleaseImage( IplImage** image ); + +/* Creates a copy of IPL image (widthStep may differ) */ +CVAPI(IplImage*) cvCloneImage( const IplImage* image ); + +/* Sets a Channel Of Interest (only a few functions support COI) - + use cvCopy to extract the selected channel and/or put it back */ +CVAPI(void) cvSetImageCOI( IplImage* image, int coi ); + +/* Retrieves image Channel Of Interest */ +CVAPI(int) cvGetImageCOI( const IplImage* image ); + +/* Sets image ROI (region of interest) (COI is not changed) */ +CVAPI(void) cvSetImageROI( IplImage* image, CvRect rect ); + +/* Resets image ROI and COI */ +CVAPI(void) cvResetImageROI( IplImage* image ); + +/* Retrieves image ROI */ +CVAPI(CvRect) cvGetImageROI( const IplImage* image ); + +/* Allocates and initalizes CvMat header */ +CVAPI(CvMat*) cvCreateMatHeader( int rows, int cols, int type ); + +#define CV_AUTOSTEP 0x7fffffff + +/* Initializes CvMat header */ +CVAPI(CvMat*) cvInitMatHeader( CvMat* mat, int rows, int cols, + int type, void* data CV_DEFAULT(NULL), + int step CV_DEFAULT(CV_AUTOSTEP) ); + +/* Allocates and initializes CvMat header and allocates data */ +CVAPI(CvMat*) cvCreateMat( int rows, int cols, int type ); + +/* Releases CvMat header and deallocates matrix data + (reference counting is used for data) */ +CVAPI(void) cvReleaseMat( CvMat** mat ); + +/* Decrements CvMat data reference counter and deallocates the data if + it reaches 0 */ +CV_INLINE void cvDecRefData( CvArr* arr ) +{ + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + mat->data.ptr = NULL; + if( mat->refcount != NULL && --*mat->refcount == 0 ) + cvFree( &mat->refcount ); + mat->refcount = NULL; + } + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + mat->data.ptr = NULL; + if( mat->refcount != NULL && --*mat->refcount == 0 ) + cvFree( &mat->refcount ); + mat->refcount = NULL; + } +} + +/* Increments CvMat data reference counter */ +CV_INLINE int cvIncRefData( CvArr* arr ) +{ + int refcount = 0; + if( CV_IS_MAT( arr )) + { + CvMat* mat = (CvMat*)arr; + if( mat->refcount != NULL ) + refcount = ++*mat->refcount; + } + else if( CV_IS_MATND( arr )) + { + CvMatND* mat = (CvMatND*)arr; + if( mat->refcount != NULL ) + refcount = ++*mat->refcount; + } + return refcount; +} + + +/* Creates an exact copy of the input matrix (except, may be, step value) */ +CVAPI(CvMat*) cvCloneMat( const CvMat* mat ); + + +/* Makes a new matrix from subrectangle of input array. + No data is copied */ +CVAPI(CvMat*) cvGetSubRect( const CvArr* arr, CvMat* submat, CvRect rect ); +#define cvGetSubArr cvGetSubRect + +/* Selects row span of the input array: arr(start_row:delta_row:end_row,:) + (end_row is not included into the span). */ +CVAPI(CvMat*) cvGetRows( const CvArr* arr, CvMat* submat, + int start_row, int end_row, + int delta_row CV_DEFAULT(1)); + +CV_INLINE CvMat* cvGetRow( const CvArr* arr, CvMat* submat, int row ) +{ + return cvGetRows( arr, submat, row, row + 1, 1 ); +} + + +/* Selects column span of the input array: arr(:,start_col:end_col) + (end_col is not included into the span) */ +CVAPI(CvMat*) cvGetCols( const CvArr* arr, CvMat* submat, + int start_col, int end_col ); + +CV_INLINE CvMat* cvGetCol( const CvArr* arr, CvMat* submat, int col ) +{ + return cvGetCols( arr, submat, col, col + 1 ); +} + +/* Select a diagonal of the input array. + (diag = 0 means the main diagonal, >0 means a diagonal above the main one, + <0 - below the main one). + The diagonal will be represented as a column (nx1 matrix). */ +CVAPI(CvMat*) cvGetDiag( const CvArr* arr, CvMat* submat, + int diag CV_DEFAULT(0)); + +/* low-level scalar <-> raw data conversion functions */ +CVAPI(void) cvScalarToRawData( const CvScalar* scalar, void* data, int type, + int extend_to_12 CV_DEFAULT(0) ); + +CVAPI(void) cvRawDataToScalar( const void* data, int type, CvScalar* scalar ); + +/* Allocates and initializes CvMatND header */ +CVAPI(CvMatND*) cvCreateMatNDHeader( int dims, const int* sizes, int type ); + +/* Allocates and initializes CvMatND header and allocates data */ +CVAPI(CvMatND*) cvCreateMatND( int dims, const int* sizes, int type ); + +/* Initializes preallocated CvMatND header */ +CVAPI(CvMatND*) cvInitMatNDHeader( CvMatND* mat, int dims, const int* sizes, + int type, void* data CV_DEFAULT(NULL) ); + +/* Releases CvMatND */ +CV_INLINE void cvReleaseMatND( CvMatND** mat ) +{ + cvReleaseMat( (CvMat**)mat ); +} + +/* Creates a copy of CvMatND (except, may be, steps) */ +CVAPI(CvMatND*) cvCloneMatND( const CvMatND* mat ); + +/* Allocates and initializes CvSparseMat header and allocates data */ +CVAPI(CvSparseMat*) cvCreateSparseMat( int dims, const int* sizes, int type ); + +/* Releases CvSparseMat */ +CVAPI(void) cvReleaseSparseMat( CvSparseMat** mat ); + +/* Creates a copy of CvSparseMat (except, may be, zero items) */ +CVAPI(CvSparseMat*) cvCloneSparseMat( const CvSparseMat* mat ); + +/* Initializes sparse array iterator + (returns the first node or NULL if the array is empty) */ +CVAPI(CvSparseNode*) cvInitSparseMatIterator( const CvSparseMat* mat, + CvSparseMatIterator* mat_iterator ); + +// returns next sparse array node (or NULL if there is no more nodes) +CV_INLINE CvSparseNode* cvGetNextSparseNode( CvSparseMatIterator* mat_iterator ) +{ + if( mat_iterator->node->next ) + return mat_iterator->node = mat_iterator->node->next; + else + { + int idx; + for( idx = ++mat_iterator->curidx; idx < mat_iterator->mat->hashsize; idx++ ) + { + CvSparseNode* node = (CvSparseNode*)mat_iterator->mat->hashtable[idx]; + if( node ) + { + mat_iterator->curidx = idx; + return mat_iterator->node = node; + } + } + return NULL; + } +} + +/**************** matrix iterator: used for n-ary operations on dense arrays *********/ + +#define CV_MAX_ARR 10 + +typedef struct CvNArrayIterator +{ + int count; /* number of arrays */ + int dims; /* number of dimensions to iterate */ + CvSize size; /* maximal common linear size: { width = size, height = 1 } */ + uchar* ptr[CV_MAX_ARR]; /* pointers to the array slices */ + int stack[CV_MAX_DIM]; /* for internal use */ + CvMatND* hdr[CV_MAX_ARR]; /* pointers to the headers of the + matrices that are processed */ +} +CvNArrayIterator; + +#define CV_NO_DEPTH_CHECK 1 +#define CV_NO_CN_CHECK 2 +#define CV_NO_SIZE_CHECK 4 + +/* initializes iterator that traverses through several arrays simulteneously + (the function together with cvNextArraySlice is used for + N-ari element-wise operations) */ +CVAPI(int) cvInitNArrayIterator( int count, CvArr** arrs, + const CvArr* mask, CvMatND* stubs, + CvNArrayIterator* array_iterator, + int flags CV_DEFAULT(0) ); + +/* returns zero value if iteration is finished, non-zero (slice length) otherwise */ +CVAPI(int) cvNextNArraySlice( CvNArrayIterator* array_iterator ); + + +/* Returns type of array elements: + CV_8UC1 ... CV_64FC4 ... */ +CVAPI(int) cvGetElemType( const CvArr* arr ); + +/* Retrieves number of an array dimensions and + optionally sizes of the dimensions */ +CVAPI(int) cvGetDims( const CvArr* arr, int* sizes CV_DEFAULT(NULL) ); + + +/* Retrieves size of a particular array dimension. + For 2d arrays cvGetDimSize(arr,0) returns number of rows (image height) + and cvGetDimSize(arr,1) returns number of columns (image width) */ +CVAPI(int) cvGetDimSize( const CvArr* arr, int index ); + + +/* ptr = &arr(idx0,idx1,...). All indexes are zero-based, + the major dimensions go first (e.g. (y,x) for 2D, (z,y,x) for 3D */ +CVAPI(uchar*) cvPtr1D( const CvArr* arr, int idx0, int* type CV_DEFAULT(NULL)); +CVAPI(uchar*) cvPtr2D( const CvArr* arr, int idx0, int idx1, int* type CV_DEFAULT(NULL) ); +CVAPI(uchar*) cvPtr3D( const CvArr* arr, int idx0, int idx1, int idx2, + int* type CV_DEFAULT(NULL)); + +/* For CvMat or IplImage number of indices should be 2 + (row index (y) goes first, column index (x) goes next). + For CvMatND or CvSparseMat number of infices should match number of and + indices order should match the array dimension order. */ +CVAPI(uchar*) cvPtrND( const CvArr* arr, const int* idx, int* type CV_DEFAULT(NULL), + int create_node CV_DEFAULT(1), + unsigned* precalc_hashval CV_DEFAULT(NULL)); + +/* value = arr(idx0,idx1,...) */ +CVAPI(CvScalar) cvGet1D( const CvArr* arr, int idx0 ); +CVAPI(CvScalar) cvGet2D( const CvArr* arr, int idx0, int idx1 ); +CVAPI(CvScalar) cvGet3D( const CvArr* arr, int idx0, int idx1, int idx2 ); +CVAPI(CvScalar) cvGetND( const CvArr* arr, const int* idx ); + +/* for 1-channel arrays */ +CVAPI(double) cvGetReal1D( const CvArr* arr, int idx0 ); +CVAPI(double) cvGetReal2D( const CvArr* arr, int idx0, int idx1 ); +CVAPI(double) cvGetReal3D( const CvArr* arr, int idx0, int idx1, int idx2 ); +CVAPI(double) cvGetRealND( const CvArr* arr, const int* idx ); + +/* arr(idx0,idx1,...) = value */ +CVAPI(void) cvSet1D( CvArr* arr, int idx0, CvScalar value ); +CVAPI(void) cvSet2D( CvArr* arr, int idx0, int idx1, CvScalar value ); +CVAPI(void) cvSet3D( CvArr* arr, int idx0, int idx1, int idx2, CvScalar value ); +CVAPI(void) cvSetND( CvArr* arr, const int* idx, CvScalar value ); + +/* for 1-channel arrays */ +CVAPI(void) cvSetReal1D( CvArr* arr, int idx0, double value ); +CVAPI(void) cvSetReal2D( CvArr* arr, int idx0, int idx1, double value ); +CVAPI(void) cvSetReal3D( CvArr* arr, int idx0, + int idx1, int idx2, double value ); +CVAPI(void) cvSetRealND( CvArr* arr, const int* idx, double value ); + +/* clears element of ND dense array, + in case of sparse arrays it deletes the specified node */ +CVAPI(void) cvClearND( CvArr* arr, const int* idx ); + +/* Converts CvArr (IplImage or CvMat,...) to CvMat. + If the last parameter is non-zero, function can + convert multi(>2)-dimensional array to CvMat as long as + the last array's dimension is continous. The resultant + matrix will be have appropriate (a huge) number of rows */ +CVAPI(CvMat*) cvGetMat( const CvArr* arr, CvMat* header, + int* coi CV_DEFAULT(NULL), + int allowND CV_DEFAULT(0)); + +/* Converts CvArr (IplImage or CvMat) to IplImage */ +CVAPI(IplImage*) cvGetImage( const CvArr* arr, IplImage* image_header ); + + +/* Changes a shape of multi-dimensional array. + new_cn == 0 means that number of channels remains unchanged. + new_dims == 0 means that number and sizes of dimensions remain the same + (unless they need to be changed to set the new number of channels) + if new_dims == 1, there is no need to specify new dimension sizes + The resultant configuration should be achievable w/o data copying. + If the resultant array is sparse, CvSparseMat header should be passed + to the function else if the result is 1 or 2 dimensional, + CvMat header should be passed to the function + else CvMatND header should be passed */ +CVAPI(CvArr*) cvReshapeMatND( const CvArr* arr, + int sizeof_header, CvArr* header, + int new_cn, int new_dims, int* new_sizes ); + +#define cvReshapeND( arr, header, new_cn, new_dims, new_sizes ) \ + cvReshapeMatND( (arr), sizeof(*(header)), (header), \ + (new_cn), (new_dims), (new_sizes)) + +CVAPI(CvMat*) cvReshape( const CvArr* arr, CvMat* header, + int new_cn, int new_rows CV_DEFAULT(0) ); + +/* Repeats source 2d array several times in both horizontal and + vertical direction to fill destination array */ +CVAPI(void) cvRepeat( const CvArr* src, CvArr* dst ); + +/* Allocates array data */ +CVAPI(void) cvCreateData( CvArr* arr ); + +/* Releases array data */ +CVAPI(void) cvReleaseData( CvArr* arr ); + +/* Attaches user data to the array header. The step is reffered to + the pre-last dimension. That is, all the planes of the array + must be joint (w/o gaps) */ +CVAPI(void) cvSetData( CvArr* arr, void* data, int step ); + +/* Retrieves raw data of CvMat, IplImage or CvMatND. + In the latter case the function raises an error if + the array can not be represented as a matrix */ +CVAPI(void) cvGetRawData( const CvArr* arr, uchar** data, + int* step CV_DEFAULT(NULL), + CvSize* roi_size CV_DEFAULT(NULL)); + +/* Returns width and height of array in elements */ +CVAPI(CvSize) cvGetSize( const CvArr* arr ); + +/* Copies source array to destination array */ +CVAPI(void) cvCopy( const CvArr* src, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Sets all or "masked" elements of input array + to the same value*/ +CVAPI(void) cvSet( CvArr* arr, CvScalar value, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Clears all the array elements (sets them to 0) */ +CVAPI(void) cvSetZero( CvArr* arr ); +#define cvZero cvSetZero + + +/* Splits a multi-channel array into the set of single-channel arrays or + extracts particular [color] plane */ +CVAPI(void) cvSplit( const CvArr* src, CvArr* dst0, CvArr* dst1, + CvArr* dst2, CvArr* dst3 ); + +/* Merges a set of single-channel arrays into the single multi-channel array + or inserts one particular [color] plane to the array */ +CVAPI(void) cvMerge( const CvArr* src0, const CvArr* src1, + const CvArr* src2, const CvArr* src3, + CvArr* dst ); + +/* Copies several channels from input arrays to + certain channels of output arrays */ +CVAPI(void) cvMixChannels( const CvArr** src, int src_count, + CvArr** dst, int dst_count, + const int* from_to, int pair_count ); + +/* Performs linear transformation on every source array element: + dst(x,y,c) = scale*src(x,y,c)+shift. + Arbitrary combination of input and output array depths are allowed + (number of channels must be the same), thus the function can be used + for type conversion */ +CVAPI(void) cvConvertScale( const CvArr* src, CvArr* dst, + double scale CV_DEFAULT(1), + double shift CV_DEFAULT(0) ); +#define cvCvtScale cvConvertScale +#define cvScale cvConvertScale +#define cvConvert( src, dst ) cvConvertScale( (src), (dst), 1, 0 ) + + +/* Performs linear transformation on every source array element, + stores absolute value of the result: + dst(x,y,c) = abs(scale*src(x,y,c)+shift). + destination array must have 8u type. + In other cases one may use cvConvertScale + cvAbsDiffS */ +CVAPI(void) cvConvertScaleAbs( const CvArr* src, CvArr* dst, + double scale CV_DEFAULT(1), + double shift CV_DEFAULT(0) ); +#define cvCvtScaleAbs cvConvertScaleAbs + + +/* checks termination criteria validity and + sets eps to default_eps (if it is not set), + max_iter to default_max_iters (if it is not set) +*/ +CVAPI(CvTermCriteria) cvCheckTermCriteria( CvTermCriteria criteria, + double default_eps, + int default_max_iters ); + +/****************************************************************************************\ +* Arithmetic, logic and comparison operations * +\****************************************************************************************/ + +/* dst(mask) = src1(mask) + src2(mask) */ +CVAPI(void) cvAdd( const CvArr* src1, const CvArr* src2, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src(mask) + value */ +CVAPI(void) cvAddS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src1(mask) - src2(mask) */ +CVAPI(void) cvSub( const CvArr* src1, const CvArr* src2, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(mask) = src(mask) - value = src(mask) + (-value) */ +CV_INLINE void cvSubS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)) +{ + cvAddS( src, cvScalar( -value.val[0], -value.val[1], -value.val[2], -value.val[3]), + dst, mask ); +} + +/* dst(mask) = value - src(mask) */ +CVAPI(void) cvSubRS( const CvArr* src, CvScalar value, CvArr* dst, + const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) * src2(idx) * scale + (scaled element-wise multiplication of 2 arrays) */ +CVAPI(void) cvMul( const CvArr* src1, const CvArr* src2, + CvArr* dst, double scale CV_DEFAULT(1) ); + +/* element-wise division/inversion with scaling: + dst(idx) = src1(idx) * scale / src2(idx) + or dst(idx) = scale / src2(idx) if src1 == 0 */ +CVAPI(void) cvDiv( const CvArr* src1, const CvArr* src2, + CvArr* dst, double scale CV_DEFAULT(1)); + +/* dst = src1 * scale + src2 */ +CVAPI(void) cvScaleAdd( const CvArr* src1, CvScalar scale, + const CvArr* src2, CvArr* dst ); +#define cvAXPY( A, real_scalar, B, C ) cvScaleAdd(A, cvRealScalar(real_scalar), B, C) + +/* dst = src1 * alpha + src2 * beta + gamma */ +CVAPI(void) cvAddWeighted( const CvArr* src1, double alpha, + const CvArr* src2, double beta, + double gamma, CvArr* dst ); + +/* result = sum_i(src1(i) * src2(i)) (results for all channels are accumulated together) */ +CVAPI(double) cvDotProduct( const CvArr* src1, const CvArr* src2 ); + +/* dst(idx) = src1(idx) & src2(idx) */ +CVAPI(void) cvAnd( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) & value */ +CVAPI(void) cvAndS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) | src2(idx) */ +CVAPI(void) cvOr( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) | value */ +CVAPI(void) cvOrS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src1(idx) ^ src2(idx) */ +CVAPI(void) cvXor( const CvArr* src1, const CvArr* src2, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = src(idx) ^ value */ +CVAPI(void) cvXorS( const CvArr* src, CvScalar value, + CvArr* dst, const CvArr* mask CV_DEFAULT(NULL)); + +/* dst(idx) = ~src(idx) */ +CVAPI(void) cvNot( const CvArr* src, CvArr* dst ); + +/* dst(idx) = lower(idx) <= src(idx) < upper(idx) */ +CVAPI(void) cvInRange( const CvArr* src, const CvArr* lower, + const CvArr* upper, CvArr* dst ); + +/* dst(idx) = lower <= src(idx) < upper */ +CVAPI(void) cvInRangeS( const CvArr* src, CvScalar lower, + CvScalar upper, CvArr* dst ); + +#define CV_CMP_EQ 0 +#define CV_CMP_GT 1 +#define CV_CMP_GE 2 +#define CV_CMP_LT 3 +#define CV_CMP_LE 4 +#define CV_CMP_NE 5 + +/* The comparison operation support single-channel arrays only. + Destination image should be 8uC1 or 8sC1 */ + +/* dst(idx) = src1(idx) _cmp_op_ src2(idx) */ +CVAPI(void) cvCmp( const CvArr* src1, const CvArr* src2, CvArr* dst, int cmp_op ); + +/* dst(idx) = src1(idx) _cmp_op_ value */ +CVAPI(void) cvCmpS( const CvArr* src, double value, CvArr* dst, int cmp_op ); + +/* dst(idx) = min(src1(idx),src2(idx)) */ +CVAPI(void) cvMin( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(idx) = max(src1(idx),src2(idx)) */ +CVAPI(void) cvMax( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(idx) = min(src(idx),value) */ +CVAPI(void) cvMinS( const CvArr* src, double value, CvArr* dst ); + +/* dst(idx) = max(src(idx),value) */ +CVAPI(void) cvMaxS( const CvArr* src, double value, CvArr* dst ); + +/* dst(x,y,c) = abs(src1(x,y,c) - src2(x,y,c)) */ +CVAPI(void) cvAbsDiff( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* dst(x,y,c) = abs(src(x,y,c) - value(c)) */ +CVAPI(void) cvAbsDiffS( const CvArr* src, CvArr* dst, CvScalar value ); +#define cvAbs( src, dst ) cvAbsDiffS( (src), (dst), cvScalarAll(0)) + +/****************************************************************************************\ +* Math operations * +\****************************************************************************************/ + +/* Does cartesian->polar coordinates conversion. + Either of output components (magnitude or angle) is optional */ +CVAPI(void) cvCartToPolar( const CvArr* x, const CvArr* y, + CvArr* magnitude, CvArr* angle CV_DEFAULT(NULL), + int angle_in_degrees CV_DEFAULT(0)); + +/* Does polar->cartesian coordinates conversion. + Either of output components (magnitude or angle) is optional. + If magnitude is missing it is assumed to be all 1's */ +CVAPI(void) cvPolarToCart( const CvArr* magnitude, const CvArr* angle, + CvArr* x, CvArr* y, + int angle_in_degrees CV_DEFAULT(0)); + +/* Does powering: dst(idx) = src(idx)^power */ +CVAPI(void) cvPow( const CvArr* src, CvArr* dst, double power ); + +/* Does exponention: dst(idx) = exp(src(idx)). + Overflow is not handled yet. Underflow is handled. + Maximal relative error is ~7e-6 for single-precision input */ +CVAPI(void) cvExp( const CvArr* src, CvArr* dst ); + +/* Calculates natural logarithms: dst(idx) = log(abs(src(idx))). + Logarithm of 0 gives large negative number(~-700) + Maximal relative error is ~3e-7 for single-precision output +*/ +CVAPI(void) cvLog( const CvArr* src, CvArr* dst ); + +/* Fast arctangent calculation */ +CVAPI(float) cvFastArctan( float y, float x ); + +/* Fast cubic root calculation */ +CVAPI(float) cvCbrt( float value ); + +/* Checks array values for NaNs, Infs or simply for too large numbers + (if CV_CHECK_RANGE is set). If CV_CHECK_QUIET is set, + no runtime errors is raised (function returns zero value in case of "bad" values). + Otherwise cvError is called */ +#define CV_CHECK_RANGE 1 +#define CV_CHECK_QUIET 2 +CVAPI(int) cvCheckArr( const CvArr* arr, int flags CV_DEFAULT(0), + double min_val CV_DEFAULT(0), double max_val CV_DEFAULT(0)); +#define cvCheckArray cvCheckArr + +#define CV_RAND_UNI 0 +#define CV_RAND_NORMAL 1 +CVAPI(void) cvRandArr( CvRNG* rng, CvArr* arr, int dist_type, + CvScalar param1, CvScalar param2 ); + +CVAPI(void) cvRandShuffle( CvArr* mat, CvRNG* rng, + double iter_factor CV_DEFAULT(1.)); + +#define CV_SORT_EVERY_ROW 0 +#define CV_SORT_EVERY_COLUMN 1 +#define CV_SORT_ASCENDING 0 +#define CV_SORT_DESCENDING 16 + +CVAPI(void) cvSort( const CvArr* src, CvArr* dst CV_DEFAULT(NULL), + CvArr* idxmat CV_DEFAULT(NULL), + int flags CV_DEFAULT(0)); + +/* Finds real roots of a cubic equation */ +CVAPI(int) cvSolveCubic( const CvMat* coeffs, CvMat* roots ); + +/* Finds all real and complex roots of a polynomial equation */ +CVAPI(void) cvSolvePoly(const CvMat* coeffs, CvMat *roots2, + int maxiter CV_DEFAULT(20), int fig CV_DEFAULT(100)); + +/****************************************************************************************\ +* Matrix operations * +\****************************************************************************************/ + +/* Calculates cross product of two 3d vectors */ +CVAPI(void) cvCrossProduct( const CvArr* src1, const CvArr* src2, CvArr* dst ); + +/* Matrix transform: dst = A*B + C, C is optional */ +#define cvMatMulAdd( src1, src2, src3, dst ) cvGEMM( (src1), (src2), 1., (src3), 1., (dst), 0 ) +#define cvMatMul( src1, src2, dst ) cvMatMulAdd( (src1), (src2), NULL, (dst)) + +#define CV_GEMM_A_T 1 +#define CV_GEMM_B_T 2 +#define CV_GEMM_C_T 4 +/* Extended matrix transform: + dst = alpha*op(A)*op(B) + beta*op(C), where op(X) is X or X^T */ +CVAPI(void) cvGEMM( const CvArr* src1, const CvArr* src2, double alpha, + const CvArr* src3, double beta, CvArr* dst, + int tABC CV_DEFAULT(0)); +#define cvMatMulAddEx cvGEMM + +/* Transforms each element of source array and stores + resultant vectors in destination array */ +CVAPI(void) cvTransform( const CvArr* src, CvArr* dst, + const CvMat* transmat, + const CvMat* shiftvec CV_DEFAULT(NULL)); +#define cvMatMulAddS cvTransform + +/* Does perspective transform on every element of input array */ +CVAPI(void) cvPerspectiveTransform( const CvArr* src, CvArr* dst, + const CvMat* mat ); + +/* Calculates (A-delta)*(A-delta)^T (order=0) or (A-delta)^T*(A-delta) (order=1) */ +CVAPI(void) cvMulTransposed( const CvArr* src, CvArr* dst, int order, + const CvArr* delta CV_DEFAULT(NULL), + double scale CV_DEFAULT(1.) ); + +/* Tranposes matrix. Square matrices can be transposed in-place */ +CVAPI(void) cvTranspose( const CvArr* src, CvArr* dst ); +#define cvT cvTranspose + +/* Completes the symmetric matrix from the lower (LtoR=0) or from the upper (LtoR!=0) part */ +CVAPI(void) cvCompleteSymm( CvMat* matrix, int LtoR CV_DEFAULT(0) ); + +/* Mirror array data around horizontal (flip=0), + vertical (flip=1) or both(flip=-1) axises: + cvFlip(src) flips images vertically and sequences horizontally (inplace) */ +CVAPI(void) cvFlip( const CvArr* src, CvArr* dst CV_DEFAULT(NULL), + int flip_mode CV_DEFAULT(0)); +#define cvMirror cvFlip + + +#define CV_SVD_MODIFY_A 1 +#define CV_SVD_U_T 2 +#define CV_SVD_V_T 4 + +/* Performs Singular Value Decomposition of a matrix */ +CVAPI(void) cvSVD( CvArr* A, CvArr* W, CvArr* U CV_DEFAULT(NULL), + CvArr* V CV_DEFAULT(NULL), int flags CV_DEFAULT(0)); + +/* Performs Singular Value Back Substitution (solves A*X = B): + flags must be the same as in cvSVD */ +CVAPI(void) cvSVBkSb( const CvArr* W, const CvArr* U, + const CvArr* V, const CvArr* B, + CvArr* X, int flags ); + +#define CV_LU 0 +#define CV_SVD 1 +#define CV_SVD_SYM 2 +#define CV_CHOLESKY 3 +#define CV_QR 4 +#define CV_NORMAL 16 + +/* Inverts matrix */ +CVAPI(double) cvInvert( const CvArr* src, CvArr* dst, + int method CV_DEFAULT(CV_LU)); +#define cvInv cvInvert + +/* Solves linear system (src1)*(dst) = (src2) + (returns 0 if src1 is a singular and CV_LU method is used) */ +CVAPI(int) cvSolve( const CvArr* src1, const CvArr* src2, CvArr* dst, + int method CV_DEFAULT(CV_LU)); + +/* Calculates determinant of input matrix */ +CVAPI(double) cvDet( const CvArr* mat ); + +/* Calculates trace of the matrix (sum of elements on the main diagonal) */ +CVAPI(CvScalar) cvTrace( const CvArr* mat ); + +/* Finds eigen values and vectors of a symmetric matrix */ +CVAPI(void) cvEigenVV( CvArr* mat, CvArr* evects, CvArr* evals, + double eps CV_DEFAULT(0), + int lowindex CV_DEFAULT(-1), + int highindex CV_DEFAULT(-1)); + +///* Finds selected eigen values and vectors of a symmetric matrix */ +//CVAPI(void) cvSelectedEigenVV( CvArr* mat, CvArr* evects, CvArr* evals, +// int lowindex, int highindex ); + +/* Makes an identity matrix (mat_ij = i == j) */ +CVAPI(void) cvSetIdentity( CvArr* mat, CvScalar value CV_DEFAULT(cvRealScalar(1)) ); + +/* Fills matrix with given range of numbers */ +CVAPI(CvArr*) cvRange( CvArr* mat, double start, double end ); + +/* Calculates covariation matrix for a set of vectors */ +/* transpose([v1-avg, v2-avg,...]) * [v1-avg,v2-avg,...] */ +#define CV_COVAR_SCRAMBLED 0 + +/* [v1-avg, v2-avg,...] * transpose([v1-avg,v2-avg,...]) */ +#define CV_COVAR_NORMAL 1 + +/* do not calc average (i.e. mean vector) - use the input vector instead + (useful for calculating covariance matrix by parts) */ +#define CV_COVAR_USE_AVG 2 + +/* scale the covariance matrix coefficients by number of the vectors */ +#define CV_COVAR_SCALE 4 + +/* all the input vectors are stored in a single matrix, as its rows */ +#define CV_COVAR_ROWS 8 + +/* all the input vectors are stored in a single matrix, as its columns */ +#define CV_COVAR_COLS 16 + +CVAPI(void) cvCalcCovarMatrix( const CvArr** vects, int count, + CvArr* cov_mat, CvArr* avg, int flags ); + +#define CV_PCA_DATA_AS_ROW 0 +#define CV_PCA_DATA_AS_COL 1 +#define CV_PCA_USE_AVG 2 +CVAPI(void) cvCalcPCA( const CvArr* data, CvArr* mean, + CvArr* eigenvals, CvArr* eigenvects, int flags ); + +CVAPI(void) cvProjectPCA( const CvArr* data, const CvArr* mean, + const CvArr* eigenvects, CvArr* result ); + +CVAPI(void) cvBackProjectPCA( const CvArr* proj, const CvArr* mean, + const CvArr* eigenvects, CvArr* result ); + +/* Calculates Mahalanobis(weighted) distance */ +CVAPI(double) cvMahalanobis( const CvArr* vec1, const CvArr* vec2, const CvArr* mat ); +#define cvMahalonobis cvMahalanobis + +/****************************************************************************************\ +* Array Statistics * +\****************************************************************************************/ + +/* Finds sum of array elements */ +CVAPI(CvScalar) cvSum( const CvArr* arr ); + +/* Calculates number of non-zero pixels */ +CVAPI(int) cvCountNonZero( const CvArr* arr ); + +/* Calculates mean value of array elements */ +CVAPI(CvScalar) cvAvg( const CvArr* arr, const CvArr* mask CV_DEFAULT(NULL) ); + +/* Calculates mean and standard deviation of pixel values */ +CVAPI(void) cvAvgSdv( const CvArr* arr, CvScalar* mean, CvScalar* std_dev, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Finds global minimum, maximum and their positions */ +CVAPI(void) cvMinMaxLoc( const CvArr* arr, double* min_val, double* max_val, + CvPoint* min_loc CV_DEFAULT(NULL), + CvPoint* max_loc CV_DEFAULT(NULL), + const CvArr* mask CV_DEFAULT(NULL) ); + +/* types of array norm */ +#define CV_C 1 +#define CV_L1 2 +#define CV_L2 4 +#define CV_NORM_MASK 7 +#define CV_RELATIVE 8 +#define CV_DIFF 16 +#define CV_MINMAX 32 + +#define CV_DIFF_C (CV_DIFF | CV_C) +#define CV_DIFF_L1 (CV_DIFF | CV_L1) +#define CV_DIFF_L2 (CV_DIFF | CV_L2) +#define CV_RELATIVE_C (CV_RELATIVE | CV_C) +#define CV_RELATIVE_L1 (CV_RELATIVE | CV_L1) +#define CV_RELATIVE_L2 (CV_RELATIVE | CV_L2) + +/* Finds norm, difference norm or relative difference norm for an array (or two arrays) */ +CVAPI(double) cvNorm( const CvArr* arr1, const CvArr* arr2 CV_DEFAULT(NULL), + int norm_type CV_DEFAULT(CV_L2), + const CvArr* mask CV_DEFAULT(NULL) ); + +CVAPI(void) cvNormalize( const CvArr* src, CvArr* dst, + double a CV_DEFAULT(1.), double b CV_DEFAULT(0.), + int norm_type CV_DEFAULT(CV_L2), + const CvArr* mask CV_DEFAULT(NULL) ); + + +#define CV_REDUCE_SUM 0 +#define CV_REDUCE_AVG 1 +#define CV_REDUCE_MAX 2 +#define CV_REDUCE_MIN 3 + +CVAPI(void) cvReduce( const CvArr* src, CvArr* dst, int dim CV_DEFAULT(-1), + int op CV_DEFAULT(CV_REDUCE_SUM) ); + +/****************************************************************************************\ +* Discrete Linear Transforms and Related Functions * +\****************************************************************************************/ + +#define CV_DXT_FORWARD 0 +#define CV_DXT_INVERSE 1 +#define CV_DXT_SCALE 2 /* divide result by size of array */ +#define CV_DXT_INV_SCALE (CV_DXT_INVERSE + CV_DXT_SCALE) +#define CV_DXT_INVERSE_SCALE CV_DXT_INV_SCALE +#define CV_DXT_ROWS 4 /* transform each row individually */ +#define CV_DXT_MUL_CONJ 8 /* conjugate the second argument of cvMulSpectrums */ + +/* Discrete Fourier Transform: + complex->complex, + real->ccs (forward), + ccs->real (inverse) */ +CVAPI(void) cvDFT( const CvArr* src, CvArr* dst, int flags, + int nonzero_rows CV_DEFAULT(0) ); +#define cvFFT cvDFT + +/* Multiply results of DFTs: DFT(X)*DFT(Y) or DFT(X)*conj(DFT(Y)) */ +CVAPI(void) cvMulSpectrums( const CvArr* src1, const CvArr* src2, + CvArr* dst, int flags ); + +/* Finds optimal DFT vector size >= size0 */ +CVAPI(int) cvGetOptimalDFTSize( int size0 ); + +/* Discrete Cosine Transform */ +CVAPI(void) cvDCT( const CvArr* src, CvArr* dst, int flags ); + +/****************************************************************************************\ +* Dynamic data structures * +\****************************************************************************************/ + +/* Calculates length of sequence slice (with support of negative indices). */ +CVAPI(int) cvSliceLength( CvSlice slice, const CvSeq* seq ); + + +/* Creates new memory storage. + block_size == 0 means that default, + somewhat optimal size, is used (currently, it is 64K) */ +CVAPI(CvMemStorage*) cvCreateMemStorage( int block_size CV_DEFAULT(0)); + + +/* Creates a memory storage that will borrow memory blocks from parent storage */ +CVAPI(CvMemStorage*) cvCreateChildMemStorage( CvMemStorage* parent ); + + +/* Releases memory storage. All the children of a parent must be released before + the parent. A child storage returns all the blocks to parent when it is released */ +CVAPI(void) cvReleaseMemStorage( CvMemStorage** storage ); + + +/* Clears memory storage. This is the only way(!!!) (besides cvRestoreMemStoragePos) + to reuse memory allocated for the storage - cvClearSeq,cvClearSet ... + do not free any memory. + A child storage returns all the blocks to the parent when it is cleared */ +CVAPI(void) cvClearMemStorage( CvMemStorage* storage ); + +/* Remember a storage "free memory" position */ +CVAPI(void) cvSaveMemStoragePos( const CvMemStorage* storage, CvMemStoragePos* pos ); + +/* Restore a storage "free memory" position */ +CVAPI(void) cvRestoreMemStoragePos( CvMemStorage* storage, CvMemStoragePos* pos ); + +/* Allocates continuous buffer of the specified size in the storage */ +CVAPI(void*) cvMemStorageAlloc( CvMemStorage* storage, size_t size ); + +/* Allocates string in memory storage */ +CVAPI(CvString) cvMemStorageAllocString( CvMemStorage* storage, const char* ptr, + int len CV_DEFAULT(-1) ); + +/* Creates new empty sequence that will reside in the specified storage */ +CVAPI(CvSeq*) cvCreateSeq( int seq_flags, int header_size, + int elem_size, CvMemStorage* storage ); + +/* Changes default size (granularity) of sequence blocks. + The default size is ~1Kbyte */ +CVAPI(void) cvSetSeqBlockSize( CvSeq* seq, int delta_elems ); + + +/* Adds new element to the end of sequence. Returns pointer to the element */ +CVAPI(schar*) cvSeqPush( CvSeq* seq, const void* element CV_DEFAULT(NULL)); + + +/* Adds new element to the beginning of sequence. Returns pointer to it */ +CVAPI(schar*) cvSeqPushFront( CvSeq* seq, const void* element CV_DEFAULT(NULL)); + + +/* Removes the last element from sequence and optionally saves it */ +CVAPI(void) cvSeqPop( CvSeq* seq, void* element CV_DEFAULT(NULL)); + + +/* Removes the first element from sequence and optioanally saves it */ +CVAPI(void) cvSeqPopFront( CvSeq* seq, void* element CV_DEFAULT(NULL)); + + +#define CV_FRONT 1 +#define CV_BACK 0 +/* Adds several new elements to the end of sequence */ +CVAPI(void) cvSeqPushMulti( CvSeq* seq, const void* elements, + int count, int in_front CV_DEFAULT(0) ); + +/* Removes several elements from the end of sequence and optionally saves them */ +CVAPI(void) cvSeqPopMulti( CvSeq* seq, void* elements, + int count, int in_front CV_DEFAULT(0) ); + +/* Inserts a new element in the middle of sequence. + cvSeqInsert(seq,0,elem) == cvSeqPushFront(seq,elem) */ +CVAPI(schar*) cvSeqInsert( CvSeq* seq, int before_index, + const void* element CV_DEFAULT(NULL)); + +/* Removes specified sequence element */ +CVAPI(void) cvSeqRemove( CvSeq* seq, int index ); + + +/* Removes all the elements from the sequence. The freed memory + can be reused later only by the same sequence unless cvClearMemStorage + or cvRestoreMemStoragePos is called */ +CVAPI(void) cvClearSeq( CvSeq* seq ); + + +/* Retrieves pointer to specified sequence element. + Negative indices are supported and mean counting from the end + (e.g -1 means the last sequence element) */ +CVAPI(schar*) cvGetSeqElem( const CvSeq* seq, int index ); + +/* Calculates index of the specified sequence element. + Returns -1 if element does not belong to the sequence */ +CVAPI(int) cvSeqElemIdx( const CvSeq* seq, const void* element, + CvSeqBlock** block CV_DEFAULT(NULL) ); + +/* Initializes sequence writer. The new elements will be added to the end of sequence */ +CVAPI(void) cvStartAppendToSeq( CvSeq* seq, CvSeqWriter* writer ); + + +/* Combination of cvCreateSeq and cvStartAppendToSeq */ +CVAPI(void) cvStartWriteSeq( int seq_flags, int header_size, + int elem_size, CvMemStorage* storage, + CvSeqWriter* writer ); + +/* Closes sequence writer, updates sequence header and returns pointer + to the resultant sequence + (which may be useful if the sequence was created using cvStartWriteSeq)) +*/ +CVAPI(CvSeq*) cvEndWriteSeq( CvSeqWriter* writer ); + + +/* Updates sequence header. May be useful to get access to some of previously + written elements via cvGetSeqElem or sequence reader */ +CVAPI(void) cvFlushSeqWriter( CvSeqWriter* writer ); + + +/* Initializes sequence reader. + The sequence can be read in forward or backward direction */ +CVAPI(void) cvStartReadSeq( const CvSeq* seq, CvSeqReader* reader, + int reverse CV_DEFAULT(0) ); + + +/* Returns current sequence reader position (currently observed sequence element) */ +CVAPI(int) cvGetSeqReaderPos( CvSeqReader* reader ); + + +/* Changes sequence reader position. It may seek to an absolute or + to relative to the current position */ +CVAPI(void) cvSetSeqReaderPos( CvSeqReader* reader, int index, + int is_relative CV_DEFAULT(0)); + +/* Copies sequence content to a continuous piece of memory */ +CVAPI(void*) cvCvtSeqToArray( const CvSeq* seq, void* elements, + CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ) ); + +/* Creates sequence header for array. + After that all the operations on sequences that do not alter the content + can be applied to the resultant sequence */ +CVAPI(CvSeq*) cvMakeSeqHeaderForArray( int seq_type, int header_size, + int elem_size, void* elements, int total, + CvSeq* seq, CvSeqBlock* block ); + +/* Extracts sequence slice (with or without copying sequence elements) */ +CVAPI(CvSeq*) cvSeqSlice( const CvSeq* seq, CvSlice slice, + CvMemStorage* storage CV_DEFAULT(NULL), + int copy_data CV_DEFAULT(0)); + +CV_INLINE CvSeq* cvCloneSeq( const CvSeq* seq, CvMemStorage* storage CV_DEFAULT(NULL)) +{ + return cvSeqSlice( seq, CV_WHOLE_SEQ, storage, 1 ); +} + +/* Removes sequence slice */ +CVAPI(void) cvSeqRemoveSlice( CvSeq* seq, CvSlice slice ); + +/* Inserts a sequence or array into another sequence */ +CVAPI(void) cvSeqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr ); + +/* a < b ? -1 : a > b ? 1 : 0 */ +typedef int (CV_CDECL* CvCmpFunc)(const void* a, const void* b, void* userdata ); + +/* Sorts sequence in-place given element comparison function */ +CVAPI(void) cvSeqSort( CvSeq* seq, CvCmpFunc func, void* userdata CV_DEFAULT(NULL) ); + +/* Finds element in a [sorted] sequence */ +CVAPI(schar*) cvSeqSearch( CvSeq* seq, const void* elem, CvCmpFunc func, + int is_sorted, int* elem_idx, + void* userdata CV_DEFAULT(NULL) ); + +/* Reverses order of sequence elements in-place */ +CVAPI(void) cvSeqInvert( CvSeq* seq ); + +/* Splits sequence into one or more equivalence classes using the specified criteria */ +CVAPI(int) cvSeqPartition( const CvSeq* seq, CvMemStorage* storage, + CvSeq** labels, CvCmpFunc is_equal, void* userdata ); + +/************ Internal sequence functions ************/ +CVAPI(void) cvChangeSeqBlock( void* reader, int direction ); +CVAPI(void) cvCreateSeqBlock( CvSeqWriter* writer ); + + +/* Creates a new set */ +CVAPI(CvSet*) cvCreateSet( int set_flags, int header_size, + int elem_size, CvMemStorage* storage ); + +/* Adds new element to the set and returns pointer to it */ +CVAPI(int) cvSetAdd( CvSet* set_header, CvSetElem* elem CV_DEFAULT(NULL), + CvSetElem** inserted_elem CV_DEFAULT(NULL) ); + +/* Fast variant of cvSetAdd */ +CV_INLINE CvSetElem* cvSetNew( CvSet* set_header ) +{ + CvSetElem* elem = set_header->free_elems; + if( elem ) + { + set_header->free_elems = elem->next_free; + elem->flags = elem->flags & CV_SET_ELEM_IDX_MASK; + set_header->active_count++; + } + else + cvSetAdd( set_header, NULL, (CvSetElem**)&elem ); + return elem; +} + +/* Removes set element given its pointer */ +CV_INLINE void cvSetRemoveByPtr( CvSet* set_header, void* elem ) +{ + CvSetElem* _elem = (CvSetElem*)elem; + assert( _elem->flags >= 0 /*&& (elem->flags & CV_SET_ELEM_IDX_MASK) < set_header->total*/ ); + _elem->next_free = set_header->free_elems; + _elem->flags = (_elem->flags & CV_SET_ELEM_IDX_MASK) | CV_SET_ELEM_FREE_FLAG; + set_header->free_elems = _elem; + set_header->active_count--; +} + +/* Removes element from the set by its index */ +CVAPI(void) cvSetRemove( CvSet* set_header, int index ); + +/* Returns a set element by index. If the element doesn't belong to the set, + NULL is returned */ +CV_INLINE CvSetElem* cvGetSetElem( const CvSet* set_header, int index ) +{ + CvSetElem* elem = (CvSetElem*)cvGetSeqElem( (CvSeq*)set_header, index ); + return elem && CV_IS_SET_ELEM( elem ) ? elem : 0; +} + +/* Removes all the elements from the set */ +CVAPI(void) cvClearSet( CvSet* set_header ); + +/* Creates new graph */ +CVAPI(CvGraph*) cvCreateGraph( int graph_flags, int header_size, + int vtx_size, int edge_size, + CvMemStorage* storage ); + +/* Adds new vertex to the graph */ +CVAPI(int) cvGraphAddVtx( CvGraph* graph, const CvGraphVtx* vtx CV_DEFAULT(NULL), + CvGraphVtx** inserted_vtx CV_DEFAULT(NULL) ); + + +/* Removes vertex from the graph together with all incident edges */ +CVAPI(int) cvGraphRemoveVtx( CvGraph* graph, int index ); +CVAPI(int) cvGraphRemoveVtxByPtr( CvGraph* graph, CvGraphVtx* vtx ); + + +/* Link two vertices specifed by indices or pointers if they + are not connected or return pointer to already existing edge + connecting the vertices. + Functions return 1 if a new edge was created, 0 otherwise */ +CVAPI(int) cvGraphAddEdge( CvGraph* graph, + int start_idx, int end_idx, + const CvGraphEdge* edge CV_DEFAULT(NULL), + CvGraphEdge** inserted_edge CV_DEFAULT(NULL) ); + +CVAPI(int) cvGraphAddEdgeByPtr( CvGraph* graph, + CvGraphVtx* start_vtx, CvGraphVtx* end_vtx, + const CvGraphEdge* edge CV_DEFAULT(NULL), + CvGraphEdge** inserted_edge CV_DEFAULT(NULL) ); + +/* Remove edge connecting two vertices */ +CVAPI(void) cvGraphRemoveEdge( CvGraph* graph, int start_idx, int end_idx ); +CVAPI(void) cvGraphRemoveEdgeByPtr( CvGraph* graph, CvGraphVtx* start_vtx, + CvGraphVtx* end_vtx ); + +/* Find edge connecting two vertices */ +CVAPI(CvGraphEdge*) cvFindGraphEdge( const CvGraph* graph, int start_idx, int end_idx ); +CVAPI(CvGraphEdge*) cvFindGraphEdgeByPtr( const CvGraph* graph, + const CvGraphVtx* start_vtx, + const CvGraphVtx* end_vtx ); +#define cvGraphFindEdge cvFindGraphEdge +#define cvGraphFindEdgeByPtr cvFindGraphEdgeByPtr + +/* Remove all vertices and edges from the graph */ +CVAPI(void) cvClearGraph( CvGraph* graph ); + + +/* Count number of edges incident to the vertex */ +CVAPI(int) cvGraphVtxDegree( const CvGraph* graph, int vtx_idx ); +CVAPI(int) cvGraphVtxDegreeByPtr( const CvGraph* graph, const CvGraphVtx* vtx ); + + +/* Retrieves graph vertex by given index */ +#define cvGetGraphVtx( graph, idx ) (CvGraphVtx*)cvGetSetElem((CvSet*)(graph), (idx)) + +/* Retrieves index of a graph vertex given its pointer */ +#define cvGraphVtxIdx( graph, vtx ) ((vtx)->flags & CV_SET_ELEM_IDX_MASK) + +/* Retrieves index of a graph edge given its pointer */ +#define cvGraphEdgeIdx( graph, edge ) ((edge)->flags & CV_SET_ELEM_IDX_MASK) + +#define cvGraphGetVtxCount( graph ) ((graph)->active_count) +#define cvGraphGetEdgeCount( graph ) ((graph)->edges->active_count) + +#define CV_GRAPH_VERTEX 1 +#define CV_GRAPH_TREE_EDGE 2 +#define CV_GRAPH_BACK_EDGE 4 +#define CV_GRAPH_FORWARD_EDGE 8 +#define CV_GRAPH_CROSS_EDGE 16 +#define CV_GRAPH_ANY_EDGE 30 +#define CV_GRAPH_NEW_TREE 32 +#define CV_GRAPH_BACKTRACKING 64 +#define CV_GRAPH_OVER -1 + +#define CV_GRAPH_ALL_ITEMS -1 + +/* flags for graph vertices and edges */ +#define CV_GRAPH_ITEM_VISITED_FLAG (1 << 30) +#define CV_IS_GRAPH_VERTEX_VISITED(vtx) \ + (((CvGraphVtx*)(vtx))->flags & CV_GRAPH_ITEM_VISITED_FLAG) +#define CV_IS_GRAPH_EDGE_VISITED(edge) \ + (((CvGraphEdge*)(edge))->flags & CV_GRAPH_ITEM_VISITED_FLAG) +#define CV_GRAPH_SEARCH_TREE_NODE_FLAG (1 << 29) +#define CV_GRAPH_FORWARD_EDGE_FLAG (1 << 28) + +typedef struct CvGraphScanner +{ + CvGraphVtx* vtx; /* current graph vertex (or current edge origin) */ + CvGraphVtx* dst; /* current graph edge destination vertex */ + CvGraphEdge* edge; /* current edge */ + + CvGraph* graph; /* the graph */ + CvSeq* stack; /* the graph vertex stack */ + int index; /* the lower bound of certainly visited vertices */ + int mask; /* event mask */ +} +CvGraphScanner; + +/* Creates new graph scanner. */ +CVAPI(CvGraphScanner*) cvCreateGraphScanner( CvGraph* graph, + CvGraphVtx* vtx CV_DEFAULT(NULL), + int mask CV_DEFAULT(CV_GRAPH_ALL_ITEMS)); + +/* Releases graph scanner. */ +CVAPI(void) cvReleaseGraphScanner( CvGraphScanner** scanner ); + +/* Get next graph element */ +CVAPI(int) cvNextGraphItem( CvGraphScanner* scanner ); + +/* Creates a copy of graph */ +CVAPI(CvGraph*) cvCloneGraph( const CvGraph* graph, CvMemStorage* storage ); + +/****************************************************************************************\ +* Drawing * +\****************************************************************************************/ + +/****************************************************************************************\ +* Drawing functions work with images/matrices of arbitrary type. * +* For color images the channel order is BGR[A] * +* Antialiasing is supported only for 8-bit image now. * +* All the functions include parameter color that means rgb value (that may be * +* constructed with CV_RGB macro) for color images and brightness * +* for grayscale images. * +* If a drawn figure is partially or completely outside of the image, it is clipped.* +\****************************************************************************************/ + +#define CV_RGB( r, g, b ) cvScalar( (b), (g), (r), 0 ) +#define CV_FILLED -1 + +#define CV_AA 16 + +/* Draws 4-connected, 8-connected or antialiased line segment connecting two points */ +CVAPI(void) cvLine( CvArr* img, CvPoint pt1, CvPoint pt2, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +/* Draws a rectangle given two opposite corners of the rectangle (pt1 & pt2), + if thickness<0 (e.g. thickness == CV_FILLED), the filled box is drawn */ +CVAPI(void) cvRectangle( CvArr* img, CvPoint pt1, CvPoint pt2, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + int shift CV_DEFAULT(0)); + +/* Draws a rectangle specified by a CvRect structure */ +CVAPI(void) cvRectangleR( CvArr* img, CvRect r, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + int shift CV_DEFAULT(0)); + + +/* Draws a circle with specified center and radius. + Thickness works in the same way as with cvRectangle */ +CVAPI(void) cvCircle( CvArr* img, CvPoint center, int radius, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +/* Draws ellipse outline, filled ellipse, elliptic arc or filled elliptic sector, + depending on , and parameters. The resultant figure + is rotated by . All the angles are in degrees */ +CVAPI(void) cvEllipse( CvArr* img, CvPoint center, CvSize axes, + double angle, double start_angle, double end_angle, + CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +CV_INLINE void cvEllipseBox( CvArr* img, CvBox2D box, CvScalar color, + int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ) +{ + CvSize axes; + axes.width = cvRound(box.size.width*0.5); + axes.height = cvRound(box.size.height*0.5); + + cvEllipse( img, cvPointFrom32f( box.center ), axes, box.angle, + 0, 360, color, thickness, line_type, shift ); +} + +/* Fills convex or monotonous polygon. */ +CVAPI(void) cvFillConvexPoly( CvArr* img, const CvPoint* pts, int npts, CvScalar color, + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0)); + +/* Fills an area bounded by one or more arbitrary polygons */ +CVAPI(void) cvFillPoly( CvArr* img, CvPoint** pts, const int* npts, + int contours, CvScalar color, + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +/* Draws one or more polygonal curves */ +CVAPI(void) cvPolyLine( CvArr* img, CvPoint** pts, const int* npts, int contours, + int is_closed, CvScalar color, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), int shift CV_DEFAULT(0) ); + +#define cvDrawRect cvRectangle +#define cvDrawLine cvLine +#define cvDrawCircle cvCircle +#define cvDrawEllipse cvEllipse +#define cvDrawPolyLine cvPolyLine + +/* Clips the line segment connecting *pt1 and *pt2 + by the rectangular window + (0<=xptr will point + to pt1 (or pt2, see left_to_right description) location in the image. + Returns the number of pixels on the line between the ending points. */ +CVAPI(int) cvInitLineIterator( const CvArr* image, CvPoint pt1, CvPoint pt2, + CvLineIterator* line_iterator, + int connectivity CV_DEFAULT(8), + int left_to_right CV_DEFAULT(0)); + +/* Moves iterator to the next line point */ +#define CV_NEXT_LINE_POINT( line_iterator ) \ +{ \ + int _line_iterator_mask = (line_iterator).err < 0 ? -1 : 0; \ + (line_iterator).err += (line_iterator).minus_delta + \ + ((line_iterator).plus_delta & _line_iterator_mask); \ + (line_iterator).ptr += (line_iterator).minus_step + \ + ((line_iterator).plus_step & _line_iterator_mask); \ +} + + +/* basic font types */ +#define CV_FONT_HERSHEY_SIMPLEX 0 +#define CV_FONT_HERSHEY_PLAIN 1 +#define CV_FONT_HERSHEY_DUPLEX 2 +#define CV_FONT_HERSHEY_COMPLEX 3 +#define CV_FONT_HERSHEY_TRIPLEX 4 +#define CV_FONT_HERSHEY_COMPLEX_SMALL 5 +#define CV_FONT_HERSHEY_SCRIPT_SIMPLEX 6 +#define CV_FONT_HERSHEY_SCRIPT_COMPLEX 7 + +/* font flags */ +#define CV_FONT_ITALIC 16 + +#define CV_FONT_VECTOR0 CV_FONT_HERSHEY_SIMPLEX + + +/* Font structure */ +typedef struct CvFont +{ + const char* nameFont; //Qt:nameFont + CvScalar color; //Qt:ColorFont -> cvScalar(blue_component, green_component, red\_component[, alpha_component]) + int font_face; //Qt: bool italic /* =CV_FONT_* */ + const int* ascii; /* font data and metrics */ + const int* greek; + const int* cyrillic; + float hscale, vscale; + float shear; /* slope coefficient: 0 - normal, >0 - italic */ + int thickness; //Qt: weight /* letters thickness */ + float dx; /* horizontal interval between letters */ + int line_type; //Qt: PointSize +} +CvFont; + +/* Initializes font structure used further in cvPutText */ +CVAPI(void) cvInitFont( CvFont* font, int font_face, + double hscale, double vscale, + double shear CV_DEFAULT(0), + int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8)); + +CV_INLINE CvFont cvFont( double scale, int thickness CV_DEFAULT(1) ) +{ + CvFont font; + cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, scale, scale, 0, thickness, CV_AA ); + return font; +} + +/* Renders text stroke with specified font and color at specified location. + CvFont should be initialized with cvInitFont */ +CVAPI(void) cvPutText( CvArr* img, const char* text, CvPoint org, + const CvFont* font, CvScalar color ); + +/* Calculates bounding box of text stroke (useful for alignment) */ +CVAPI(void) cvGetTextSize( const char* text_string, const CvFont* font, + CvSize* text_size, int* baseline ); + + + +/* Unpacks color value, if arrtype is CV_8UC?, is treated as + packed color value, otherwise the first channels (depending on arrtype) + of destination scalar are set to the same value = */ +CVAPI(CvScalar) cvColorToScalar( double packed_color, int arrtype ); + +/* Returns the polygon points which make up the given ellipse. The ellipse is define by + the box of size 'axes' rotated 'angle' around the 'center'. A partial sweep + of the ellipse arc can be done by spcifying arc_start and arc_end to be something + other than 0 and 360, respectively. The input array 'pts' must be large enough to + hold the result. The total number of points stored into 'pts' is returned by this + function. */ +CVAPI(int) cvEllipse2Poly( CvPoint center, CvSize axes, + int angle, int arc_start, int arc_end, CvPoint * pts, int delta ); + +/* Draws contour outlines or filled interiors on the image */ +CVAPI(void) cvDrawContours( CvArr *img, CvSeq* contour, + CvScalar external_color, CvScalar hole_color, + int max_level, int thickness CV_DEFAULT(1), + int line_type CV_DEFAULT(8), + CvPoint offset CV_DEFAULT(cvPoint(0,0))); + +/* Does look-up transformation. Elements of the source array + (that should be 8uC1 or 8sC1) are used as indexes in lutarr 256-element table */ +CVAPI(void) cvLUT( const CvArr* src, CvArr* dst, const CvArr* lut ); + + +/******************* Iteration through the sequence tree *****************/ +typedef struct CvTreeNodeIterator +{ + const void* node; + int level; + int max_level; +} +CvTreeNodeIterator; + +CVAPI(void) cvInitTreeNodeIterator( CvTreeNodeIterator* tree_iterator, + const void* first, int max_level ); +CVAPI(void*) cvNextTreeNode( CvTreeNodeIterator* tree_iterator ); +CVAPI(void*) cvPrevTreeNode( CvTreeNodeIterator* tree_iterator ); + +/* Inserts sequence into tree with specified "parent" sequence. + If parent is equal to frame (e.g. the most external contour), + then added contour will have null pointer to parent. */ +CVAPI(void) cvInsertNodeIntoTree( void* node, void* parent, void* frame ); + +/* Removes contour from tree (together with the contour children). */ +CVAPI(void) cvRemoveNodeFromTree( void* node, void* frame ); + +/* Gathers pointers to all the sequences, + accessible from the , to the single sequence */ +CVAPI(CvSeq*) cvTreeToNodeSeq( const void* first, int header_size, + CvMemStorage* storage ); + +/* The function implements the K-means algorithm for clustering an array of sample + vectors in a specified number of classes */ +#define CV_KMEANS_USE_INITIAL_LABELS 1 +CVAPI(int) cvKMeans2( const CvArr* samples, int cluster_count, CvArr* labels, + CvTermCriteria termcrit, int attempts CV_DEFAULT(1), + CvRNG* rng CV_DEFAULT(0), int flags CV_DEFAULT(0), + CvArr* _centers CV_DEFAULT(0), double* compactness CV_DEFAULT(0) ); + +/****************************************************************************************\ +* System functions * +\****************************************************************************************/ + +/* Add the function pointers table with associated information to the IPP primitives list */ +CVAPI(int) cvRegisterModule( const CvModuleInfo* module_info ); + +/* Loads optimized functions from IPP, MKL etc. or switches back to pure C code */ +CVAPI(int) cvUseOptimized( int on_off ); + +/* Retrieves information about the registered modules and loaded optimized plugins */ +CVAPI(void) cvGetModuleInfo( const char* module_name, + const char** version, + const char** loaded_addon_plugins ); + +typedef void* (CV_CDECL *CvAllocFunc)(size_t size, void* userdata); +typedef int (CV_CDECL *CvFreeFunc)(void* pptr, void* userdata); + +/* Set user-defined memory managment functions (substitutors for malloc and free) that + will be called by cvAlloc, cvFree and higher-level functions (e.g. cvCreateImage) */ +CVAPI(void) cvSetMemoryManager( CvAllocFunc alloc_func CV_DEFAULT(NULL), + CvFreeFunc free_func CV_DEFAULT(NULL), + void* userdata CV_DEFAULT(NULL)); + + +typedef IplImage* (CV_STDCALL* Cv_iplCreateImageHeader) + (int,int,int,char*,char*,int,int,int,int,int, + IplROI*,IplImage*,void*,IplTileInfo*); +typedef void (CV_STDCALL* Cv_iplAllocateImageData)(IplImage*,int,int); +typedef void (CV_STDCALL* Cv_iplDeallocate)(IplImage*,int); +typedef IplROI* (CV_STDCALL* Cv_iplCreateROI)(int,int,int,int,int); +typedef IplImage* (CV_STDCALL* Cv_iplCloneImage)(const IplImage*); + +/* Makes OpenCV use IPL functions for IplImage allocation/deallocation */ +CVAPI(void) cvSetIPLAllocators( Cv_iplCreateImageHeader create_header, + Cv_iplAllocateImageData allocate_data, + Cv_iplDeallocate deallocate, + Cv_iplCreateROI create_roi, + Cv_iplCloneImage clone_image ); + +#define CV_TURN_ON_IPL_COMPATIBILITY() \ + cvSetIPLAllocators( iplCreateImageHeader, iplAllocateImage, \ + iplDeallocate, iplCreateROI, iplCloneImage ) + +/****************************************************************************************\ +* Data Persistence * +\****************************************************************************************/ + +/********************************** High-level functions ********************************/ + +/* opens existing or creates new file storage */ +CVAPI(CvFileStorage*) cvOpenFileStorage( const char* filename, CvMemStorage* memstorage, + int flags, const char* encoding CV_DEFAULT(NULL) ); + +/* closes file storage and deallocates buffers */ +CVAPI(void) cvReleaseFileStorage( CvFileStorage** fs ); + +/* returns attribute value or 0 (NULL) if there is no such attribute */ +CVAPI(const char*) cvAttrValue( const CvAttrList* attr, const char* attr_name ); + +/* starts writing compound structure (map or sequence) */ +CVAPI(void) cvStartWriteStruct( CvFileStorage* fs, const char* name, + int struct_flags, const char* type_name CV_DEFAULT(NULL), + CvAttrList attributes CV_DEFAULT(cvAttrList())); + +/* finishes writing compound structure */ +CVAPI(void) cvEndWriteStruct( CvFileStorage* fs ); + +/* writes an integer */ +CVAPI(void) cvWriteInt( CvFileStorage* fs, const char* name, int value ); + +/* writes a floating-point number */ +CVAPI(void) cvWriteReal( CvFileStorage* fs, const char* name, double value ); + +/* writes a string */ +CVAPI(void) cvWriteString( CvFileStorage* fs, const char* name, + const char* str, int quote CV_DEFAULT(0) ); + +/* writes a comment */ +CVAPI(void) cvWriteComment( CvFileStorage* fs, const char* comment, + int eol_comment ); + +/* writes instance of a standard type (matrix, image, sequence, graph etc.) + or user-defined type */ +CVAPI(void) cvWrite( CvFileStorage* fs, const char* name, const void* ptr, + CvAttrList attributes CV_DEFAULT(cvAttrList())); + +/* starts the next stream */ +CVAPI(void) cvStartNextStream( CvFileStorage* fs ); + +/* helper function: writes multiple integer or floating-point numbers */ +CVAPI(void) cvWriteRawData( CvFileStorage* fs, const void* src, + int len, const char* dt ); + +/* returns the hash entry corresponding to the specified literal key string or 0 + if there is no such a key in the storage */ +CVAPI(CvStringHashNode*) cvGetHashedKey( CvFileStorage* fs, const char* name, + int len CV_DEFAULT(-1), + int create_missing CV_DEFAULT(0)); + +/* returns file node with the specified key within the specified map + (collection of named nodes) */ +CVAPI(CvFileNode*) cvGetRootFileNode( const CvFileStorage* fs, + int stream_index CV_DEFAULT(0) ); + +/* returns file node with the specified key within the specified map + (collection of named nodes) */ +CVAPI(CvFileNode*) cvGetFileNode( CvFileStorage* fs, CvFileNode* map, + const CvStringHashNode* key, + int create_missing CV_DEFAULT(0) ); + +/* this is a slower version of cvGetFileNode that takes the key as a literal string */ +CVAPI(CvFileNode*) cvGetFileNodeByName( const CvFileStorage* fs, + const CvFileNode* map, + const char* name ); + +CV_INLINE int cvReadInt( const CvFileNode* node, int default_value CV_DEFAULT(0) ) +{ + return !node ? default_value : + CV_NODE_IS_INT(node->tag) ? node->data.i : + CV_NODE_IS_REAL(node->tag) ? cvRound(node->data.f) : 0x7fffffff; +} + + +CV_INLINE int cvReadIntByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, int default_value CV_DEFAULT(0) ) +{ + return cvReadInt( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +CV_INLINE double cvReadReal( const CvFileNode* node, double default_value CV_DEFAULT(0.) ) +{ + return !node ? default_value : + CV_NODE_IS_INT(node->tag) ? (double)node->data.i : + CV_NODE_IS_REAL(node->tag) ? node->data.f : 1e300; +} + + +CV_INLINE double cvReadRealByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, double default_value CV_DEFAULT(0.) ) +{ + return cvReadReal( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +CV_INLINE const char* cvReadString( const CvFileNode* node, + const char* default_value CV_DEFAULT(NULL) ) +{ + return !node ? default_value : CV_NODE_IS_STRING(node->tag) ? node->data.str.ptr : 0; +} + + +CV_INLINE const char* cvReadStringByName( const CvFileStorage* fs, const CvFileNode* map, + const char* name, const char* default_value CV_DEFAULT(NULL) ) +{ + return cvReadString( cvGetFileNodeByName( fs, map, name ), default_value ); +} + + +/* decodes standard or user-defined object and returns it */ +CVAPI(void*) cvRead( CvFileStorage* fs, CvFileNode* node, + CvAttrList* attributes CV_DEFAULT(NULL)); + +/* decodes standard or user-defined object and returns it */ +CV_INLINE void* cvReadByName( CvFileStorage* fs, const CvFileNode* map, + const char* name, CvAttrList* attributes CV_DEFAULT(NULL) ) +{ + return cvRead( fs, cvGetFileNodeByName( fs, map, name ), attributes ); +} + + +/* starts reading data from sequence or scalar numeric node */ +CVAPI(void) cvStartReadRawData( const CvFileStorage* fs, const CvFileNode* src, + CvSeqReader* reader ); + +/* reads multiple numbers and stores them to array */ +CVAPI(void) cvReadRawDataSlice( const CvFileStorage* fs, CvSeqReader* reader, + int count, void* dst, const char* dt ); + +/* combination of two previous functions for easier reading of whole sequences */ +CVAPI(void) cvReadRawData( const CvFileStorage* fs, const CvFileNode* src, + void* dst, const char* dt ); + +/* writes a copy of file node to file storage */ +CVAPI(void) cvWriteFileNode( CvFileStorage* fs, const char* new_node_name, + const CvFileNode* node, int embed ); + +/* returns name of file node */ +CVAPI(const char*) cvGetFileNodeName( const CvFileNode* node ); + +/*********************************** Adding own types ***********************************/ + +CVAPI(void) cvRegisterType( const CvTypeInfo* info ); +CVAPI(void) cvUnregisterType( const char* type_name ); +CVAPI(CvTypeInfo*) cvFirstType(void); +CVAPI(CvTypeInfo*) cvFindType( const char* type_name ); +CVAPI(CvTypeInfo*) cvTypeOf( const void* struct_ptr ); + +/* universal functions */ +CVAPI(void) cvRelease( void** struct_ptr ); +CVAPI(void*) cvClone( const void* struct_ptr ); + +/* simple API for reading/writing data */ +CVAPI(void) cvSave( const char* filename, const void* struct_ptr, + const char* name CV_DEFAULT(NULL), + const char* comment CV_DEFAULT(NULL), + CvAttrList attributes CV_DEFAULT(cvAttrList())); +CVAPI(void*) cvLoad( const char* filename, + CvMemStorage* memstorage CV_DEFAULT(NULL), + const char* name CV_DEFAULT(NULL), + const char** real_name CV_DEFAULT(NULL) ); + +/*********************************** Measuring Execution Time ***************************/ + +/* helper functions for RNG initialization and accurate time measurement: + uses internal clock counter on x86 */ +CVAPI(int64) cvGetTickCount( void ); +CVAPI(double) cvGetTickFrequency( void ); + +/*********************************** CPU capabilities ***********************************/ + +#define CV_CPU_NONE 0 +#define CV_CPU_MMX 1 +#define CV_CPU_SSE 2 +#define CV_CPU_SSE2 3 +#define CV_CPU_SSE3 4 +#define CV_CPU_SSSE3 5 +#define CV_CPU_SSE4_1 6 +#define CV_CPU_SSE4_2 7 +#define CV_CPU_POPCNT 8 +#define CV_CPU_AVX 10 +#define CV_HARDWARE_MAX_FEATURE 255 + +CVAPI(int) cvCheckHardwareSupport(int feature); + +/*********************************** Multi-Threading ************************************/ + +/* retrieve/set the number of threads used in OpenMP implementations */ +CVAPI(int) cvGetNumThreads( void ); +CVAPI(void) cvSetNumThreads( int threads CV_DEFAULT(0) ); +/* get index of the thread being executed */ +CVAPI(int) cvGetThreadNum( void ); + + +/********************************** Error Handling **************************************/ + +/* Get current OpenCV error status */ +CVAPI(int) cvGetErrStatus( void ); + +/* Sets error status silently */ +CVAPI(void) cvSetErrStatus( int status ); + +#define CV_ErrModeLeaf 0 /* Print error and exit program */ +#define CV_ErrModeParent 1 /* Print error and continue */ +#define CV_ErrModeSilent 2 /* Don't print and continue */ + +/* Retrives current error processing mode */ +CVAPI(int) cvGetErrMode( void ); + +/* Sets error processing mode, returns previously used mode */ +CVAPI(int) cvSetErrMode( int mode ); + +/* Sets error status and performs some additonal actions (displaying message box, + writing message to stderr, terminating application etc.) + depending on the current error mode */ +CVAPI(void) cvError( int status, const char* func_name, + const char* err_msg, const char* file_name, int line ); + +/* Retrieves textual description of the error given its code */ +CVAPI(const char*) cvErrorStr( int status ); + +/* Retrieves detailed information about the last error occured */ +CVAPI(int) cvGetErrInfo( const char** errcode_desc, const char** description, + const char** filename, int* line ); + +/* Maps IPP error codes to the counterparts from OpenCV */ +CVAPI(int) cvErrorFromIppStatus( int ipp_status ); + +typedef int (CV_CDECL *CvErrorCallback)( int status, const char* func_name, + const char* err_msg, const char* file_name, int line, void* userdata ); + +/* Assigns a new error-handling function */ +CVAPI(CvErrorCallback) cvRedirectError( CvErrorCallback error_handler, + void* userdata CV_DEFAULT(NULL), + void** prev_userdata CV_DEFAULT(NULL) ); + +/* + Output to: + cvNulDevReport - nothing + cvStdErrReport - console(fprintf(stderr,...)) + cvGuiBoxReport - MessageBox(WIN32) + */ +CVAPI(int) cvNulDevReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +CVAPI(int) cvStdErrReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +CVAPI(int) cvGuiBoxReport( int status, const char* func_name, const char* err_msg, + const char* file_name, int line, void* userdata ); + +#define OPENCV_ERROR(status,func,context) \ +cvError((status),(func),(context),__FILE__,__LINE__) + +#define OPENCV_ERRCHK(func,context) \ +{if (cvGetErrStatus() >= 0) \ +{OPENCV_ERROR(CV_StsBackTrace,(func),(context));}} + +#define OPENCV_ASSERT(expr,func,context) \ +{if (! (expr)) \ +{OPENCV_ERROR(CV_StsInternal,(func),(context));}} + +#define OPENCV_RSTERR() (cvSetErrStatus(CV_StsOk)) + +#define OPENCV_CALL( Func ) \ +{ \ +Func; \ +} + + +/* CV_FUNCNAME macro defines icvFuncName constant which is used by CV_ERROR macro */ +#ifdef CV_NO_FUNC_NAMES +#define CV_FUNCNAME( Name ) +#define cvFuncName "" +#else +#define CV_FUNCNAME( Name ) \ +static char cvFuncName[] = Name +#endif + + +/* + CV_ERROR macro unconditionally raises error with passed code and message. + After raising error, control will be transferred to the exit label. + */ +#define CV_ERROR( Code, Msg ) \ +{ \ + cvError( (Code), cvFuncName, Msg, __FILE__, __LINE__ ); \ + __CV_EXIT__; \ +} + +/* Simplified form of CV_ERROR */ +#define CV_ERROR_FROM_CODE( code ) \ + CV_ERROR( code, "" ) + +/* + CV_CHECK macro checks error status after CV (or IPL) + function call. If error detected, control will be transferred to the exit + label. + */ +#define CV_CHECK() \ +{ \ + if( cvGetErrStatus() < 0 ) \ + CV_ERROR( CV_StsBackTrace, "Inner function failed." ); \ +} + + +/* + CV_CALL macro calls CV (or IPL) function, checks error status and + signals a error if the function failed. Useful in "parent node" + error procesing mode + */ +#define CV_CALL( Func ) \ +{ \ + Func; \ + CV_CHECK(); \ +} + + +/* Runtime assertion macro */ +#define CV_ASSERT( Condition ) \ +{ \ + if( !(Condition) ) \ + CV_ERROR( CV_StsInternal, "Assertion: " #Condition " failed" ); \ +} + +#define __CV_BEGIN__ { +#define __CV_END__ goto exit; exit: ; } +#define __CV_EXIT__ goto exit + +#ifdef __cplusplus +} + +// classes for automatic module/RTTI data registration/unregistration +struct CV_EXPORTS CvModule +{ + CvModule( CvModuleInfo* _info ); + ~CvModule(); + CvModuleInfo* info; + + static CvModuleInfo* first; + static CvModuleInfo* last; +}; + +struct CV_EXPORTS CvType +{ + CvType( const char* type_name, + CvIsInstanceFunc is_instance, CvReleaseFunc release=0, + CvReadFunc read=0, CvWriteFunc write=0, CvCloneFunc clone=0 ); + ~CvType(); + CvTypeInfo* info; + + static CvTypeInfo* first; + static CvTypeInfo* last; +}; + +#endif + +#endif diff --git a/opencv2/core/eigen.hpp b/opencv2/core/eigen.hpp new file mode 100644 index 0000000..505652f --- /dev/null +++ b/opencv2/core/eigen.hpp @@ -0,0 +1,186 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_EIGEN_HPP__ +#define __OPENCV_CORE_EIGEN_HPP__ + +#ifdef __cplusplus + +#include "cxcore.h" + +namespace cv +{ + +template +void eigen2cv( const Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& src, Mat& dst ) +{ + if( !(src.Flags & Eigen::RowMajorBit) ) + { + Mat _src(src.cols(), src.rows(), DataType<_Tp>::type, + (void*)src.data(), src.stride()*sizeof(_Tp)); + transpose(_src, dst); + } + else + { + Mat _src(src.rows(), src.cols(), DataType<_Tp>::type, + (void*)src.data(), src.stride()*sizeof(_Tp)); + _src.copyTo(dst); + } +} + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, _rows, _cols, _options, _maxRows, _maxCols>& dst ) +{ + CV_DbgAssert(src.rows == _rows && src.cols == _cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else if( src.cols == src.rows ) + { + src.convertTo(_dst, _dst.type()); + transpose(_dst, _dst); + } + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, Eigen::Dynamic>& dst ) +{ + dst.resize(src.rows, src.cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else if( src.cols == src.rows ) + { + src.convertTo(_dst, _dst.type()); + transpose(_dst, _dst); + } + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, Eigen::Dynamic, 1>& dst ) +{ + CV_Assert(src.cols == 1); + dst.resize(src.rows); + + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + + +template +void cv2eigen( const Mat& src, + Eigen::Matrix<_Tp, 1, Eigen::Dynamic>& dst ) +{ + CV_Assert(src.rows == 1); + dst.resize(src.cols); + if( !(dst.Flags & Eigen::RowMajorBit) ) + { + Mat _dst(src.cols, src.rows, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + if( src.type() == _dst.type() ) + transpose(src, _dst); + else + Mat(src.t()).convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } + else + { + Mat _dst(src.rows, src.cols, DataType<_Tp>::type, + dst.data(), (size_t)(dst.stride()*sizeof(_Tp))); + src.convertTo(_dst, _dst.type()); + CV_DbgAssert(_dst.data == (uchar*)dst.data()); + } +} + +} + +#endif + +#endif + diff --git a/opencv2/core/internal.hpp b/opencv2/core/internal.hpp new file mode 100644 index 0000000..9826bf7 --- /dev/null +++ b/opencv2/core/internal.hpp @@ -0,0 +1,707 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* The header is for internal use and it is likely to change. + It contains some macro definitions that are used in cxcore, cv, cvaux + and, probably, other libraries. If you need some of this functionality, + the safe way is to copy it into your code and rename the macros. +*/ +#ifndef __OPENCV_CORE_INTERNAL_HPP__ +#define __OPENCV_CORE_INTERNAL_HPP__ + +#include + +#if defined WIN32 || defined _WIN32 +# ifndef WIN32 +# define WIN32 +# endif +# ifndef _WIN32 +# define _WIN32 +# endif +#endif + +#if defined WIN32 || defined WINCE +#ifndef _WIN32_WINNT // This is needed for the declaration of TryEnterCriticalSection in winbase.h with Visual Studio 2005 (and older?) +#define _WIN32_WINNT 0x0400 // http://msdn.microsoft.com/en-us/library/ms686857(VS.85).aspx +#endif +#include +#undef small +#undef min +#undef max +#else +#include +#include +#endif + +#ifdef __BORLANDC__ +#ifndef WIN32 + #define WIN32 +#endif +#ifndef _WIN32 + #define _WIN32 +#endif + #define CV_DLL + #undef _CV_ALWAYS_PROFILE_ + #define _CV_ALWAYS_NO_PROFILE_ +#endif + +#ifndef FALSE +#define FALSE 0 +#endif +#ifndef TRUE +#define TRUE 1 +#endif + +#define __BEGIN__ __CV_BEGIN__ +#define __END__ __CV_END__ +#define EXIT __CV_EXIT__ + +#ifdef HAVE_IPP +#include "ipp.h" + +CV_INLINE IppiSize ippiSize(int width, int height) +{ + IppiSize size = { width, height }; + return size; +} +#endif + +#if defined __SSE2__ || _MSC_VER >= 1300 +#include "emmintrin.h" +#define CV_SSE 1 +#define CV_SSE2 1 +#if defined __SSE3__ || _MSC_VER >= 1500 +#include "pmmintrin.h" +#define CV_SSE3 1 +#endif +#else +#define CV_SSE 0 +#define CV_SSE2 0 +#define CV_SSE3 0 +#endif + +#if defined ANDROID && defined __ARM_NEON__ +#include "arm_neon.h" +#define CV_NEON 1 + +#define CPU_HAS_NEON_FEATURE (true) +//TODO: make real check using stuff from "cpu-features.h" +//((bool)android_getCpuFeatures() & ANDROID_CPU_ARM_FEATURE_NEON) +#else +#define CV_NEON 0 +#define CPU_HAS_NEON_FEATURE (false) +#endif + +#ifndef IPPI_CALL +#define IPPI_CALL(func) CV_Assert((func) >= 0) +#endif + +#ifdef HAVE_TBB + #include "tbb/tbb_stddef.h" + #if TBB_VERSION_MAJOR*100 + TBB_VERSION_MINOR >= 202 + #include "tbb/tbb.h" + #include "tbb/task.h" + #undef min + #undef max + #else + #undef HAVE_TBB + #endif +#endif + +#ifdef HAVE_EIGEN + #include + #include "opencv2/core/eigen.hpp" +#endif + +#ifdef __cplusplus + +#ifdef HAVE_TBB + namespace cv + { + typedef tbb::blocked_range BlockedRange; + + template static inline + void parallel_for( const BlockedRange& range, const Body& body ) + { + tbb::parallel_for(range, body); + } + + template static inline + void parallel_do( Iterator first, Iterator last, const Body& body ) + { + tbb::parallel_do(first, last, body); + } + + typedef tbb::split Split; + + template static inline + void parallel_reduce( const BlockedRange& range, Body& body ) + { + tbb::parallel_reduce(range, body); + } + + typedef tbb::concurrent_vector ConcurrentRectVector; + } +#else + namespace cv + { + class BlockedRange + { + public: + BlockedRange() : _begin(0), _end(0), _grainsize(0) {} + BlockedRange(int b, int e, int g=1) : _begin(b), _end(e), _grainsize(g) {} + int begin() const { return _begin; } + int end() const { return _end; } + int grainsize() const { return _grainsize; } + + protected: + int _begin, _end, _grainsize; + }; + + +#ifdef HAVE_THREADING_FRAMEWORK +#include "threading_framework.hpp" + + template + static void parallel_for( const BlockedRange& range, const Body& body ) + { + tf::parallel_for(range, body); + } + typedef tf::ConcurrentVector ConcurrentRectVector; +#else + template static inline + void parallel_for( const BlockedRange& range, const Body& body ) + { + body(range); + } + typedef std::vector ConcurrentRectVector; +#endif + + template static inline + void parallel_do( Iterator first, Iterator last, const Body& body ) + { + for( ; first != last; ++first ) + body(*first); + } + + class Split {}; + + template static inline + void parallel_reduce( const BlockedRange& range, Body& body ) + { + body(range); + } + + } +#endif +#endif + +/* maximal size of vector to run matrix operations on it inline (i.e. w/o ipp calls) */ +#define CV_MAX_INLINE_MAT_OP_SIZE 10 + +/* maximal linear size of matrix to allocate it on stack. */ +#define CV_MAX_LOCAL_MAT_SIZE 32 + +/* maximal size of local memory storage */ +#define CV_MAX_LOCAL_SIZE \ + (CV_MAX_LOCAL_MAT_SIZE*CV_MAX_LOCAL_MAT_SIZE*(int)sizeof(double)) + +/* default image row align (in bytes) */ +#define CV_DEFAULT_IMAGE_ROW_ALIGN 4 + +/* matrices are continuous by default */ +#define CV_DEFAULT_MAT_ROW_ALIGN 1 + +/* maximum size of dynamic memory buffer. + cvAlloc reports an error if a larger block is requested. */ +#define CV_MAX_ALLOC_SIZE (((size_t)1 << (sizeof(size_t)*8-2))) + +/* the alignment of all the allocated buffers */ +#define CV_MALLOC_ALIGN 16 + +/* default alignment for dynamic data strucutures, resided in storages. */ +#define CV_STRUCT_ALIGN ((int)sizeof(double)) + +/* default storage block size */ +#define CV_STORAGE_BLOCK_SIZE ((1<<16) - 128) + +/* default memory block for sparse array elements */ +#define CV_SPARSE_MAT_BLOCK (1<<12) + +/* initial hash table size */ +#define CV_SPARSE_HASH_SIZE0 (1<<10) + +/* maximal average node_count/hash_size ratio beyond which hash table is resized */ +#define CV_SPARSE_HASH_RATIO 3 + +/* max length of strings */ +#define CV_MAX_STRLEN 1024 + +#if 0 /*def CV_CHECK_FOR_NANS*/ + #define CV_CHECK_NANS( arr ) cvCheckArray((arr)) +#else + #define CV_CHECK_NANS( arr ) +#endif + +/****************************************************************************************\ +* Common declarations * +\****************************************************************************************/ + +/* get alloca declaration */ +#ifdef __GNUC__ + #undef alloca + #define alloca __builtin_alloca + #define CV_HAVE_ALLOCA 1 +#elif defined WIN32 || defined _WIN32 || \ + defined WINCE || defined _MSC_VER || defined __BORLANDC__ + #include + #define CV_HAVE_ALLOCA 1 +#elif defined HAVE_ALLOCA_H + #include + #define CV_HAVE_ALLOCA 1 +#elif defined HAVE_ALLOCA + #include + #define CV_HAVE_ALLOCA 1 +#else + #undef CV_HAVE_ALLOCA +#endif + +#ifdef __GNUC__ +#define CV_DECL_ALIGNED(x) __attribute__ ((aligned (x))) +#elif defined _MSC_VER +#define CV_DECL_ALIGNED(x) __declspec(align(x)) +#else +#define CV_DECL_ALIGNED(x) +#endif + +#if CV_HAVE_ALLOCA +/* ! DO NOT make it an inline function */ +#define cvStackAlloc(size) cvAlignPtr( alloca((size) + CV_MALLOC_ALIGN), CV_MALLOC_ALIGN ) +#endif + +#ifndef CV_IMPL +#define CV_IMPL CV_EXTERN_C +#endif + +#define CV_DBG_BREAK() { volatile int* crashMe = 0; *crashMe = 0; } + +/* default step, set in case of continuous data + to work around checks for valid step in some ipp functions */ +#define CV_STUB_STEP (1 << 30) + +#define CV_SIZEOF_FLOAT ((int)sizeof(float)) +#define CV_SIZEOF_SHORT ((int)sizeof(short)) + +#define CV_ORIGIN_TL 0 +#define CV_ORIGIN_BL 1 + +/* IEEE754 constants and macros */ +#define CV_POS_INF 0x7f800000 +#define CV_NEG_INF 0x807fffff /* CV_TOGGLE_FLT(0xff800000) */ +#define CV_1F 0x3f800000 +#define CV_TOGGLE_FLT(x) ((x)^((int)(x) < 0 ? 0x7fffffff : 0)) +#define CV_TOGGLE_DBL(x) \ + ((x)^((int64)(x) < 0 ? CV_BIG_INT(0x7fffffffffffffff) : 0)) + +#define CV_NOP(a) (a) +#define CV_ADD(a, b) ((a) + (b)) +#define CV_SUB(a, b) ((a) - (b)) +#define CV_MUL(a, b) ((a) * (b)) +#define CV_AND(a, b) ((a) & (b)) +#define CV_OR(a, b) ((a) | (b)) +#define CV_XOR(a, b) ((a) ^ (b)) +#define CV_ANDN(a, b) (~(a) & (b)) +#define CV_ORN(a, b) (~(a) | (b)) +#define CV_SQR(a) ((a) * (a)) + +#define CV_LT(a, b) ((a) < (b)) +#define CV_LE(a, b) ((a) <= (b)) +#define CV_EQ(a, b) ((a) == (b)) +#define CV_NE(a, b) ((a) != (b)) +#define CV_GT(a, b) ((a) > (b)) +#define CV_GE(a, b) ((a) >= (b)) + +#define CV_NONZERO(a) ((a) != 0) +#define CV_NONZERO_FLT(a) (((a)+(a)) != 0) + +/* general-purpose saturation macros */ +#define CV_CAST_8U(t) (uchar)(!((t) & ~255) ? (t) : (t) > 0 ? 255 : 0) +#define CV_CAST_8S(t) (schar)(!(((t)+128) & ~255) ? (t) : (t) > 0 ? 127 : -128) +#define CV_CAST_16U(t) (ushort)(!((t) & ~65535) ? (t) : (t) > 0 ? 65535 : 0) +#define CV_CAST_16S(t) (short)(!(((t)+32768) & ~65535) ? (t) : (t) > 0 ? 32767 : -32768) +#define CV_CAST_32S(t) (int)(t) +#define CV_CAST_64S(t) (int64)(t) +#define CV_CAST_32F(t) (float)(t) +#define CV_CAST_64F(t) (double)(t) + +#define CV_PASTE2(a,b) a##b +#define CV_PASTE(a,b) CV_PASTE2(a,b) + +#define CV_EMPTY +#define CV_MAKE_STR(a) #a + +#define CV_ZERO_OBJ(x) memset((x), 0, sizeof(*(x))) + +#define CV_DIM(static_array) ((int)(sizeof(static_array)/sizeof((static_array)[0]))) + +#define cvUnsupportedFormat "Unsupported format" + +CV_INLINE void* cvAlignPtr( const void* ptr, int align CV_DEFAULT(32) ) +{ + assert( (align & (align-1)) == 0 ); + return (void*)( ((size_t)ptr + align - 1) & ~(size_t)(align-1) ); +} + +CV_INLINE int cvAlign( int size, int align ) +{ + assert( (align & (align-1)) == 0 && size < INT_MAX ); + return (size + align - 1) & -align; +} + +CV_INLINE CvSize cvGetMatSize( const CvMat* mat ) +{ + CvSize size; + size.width = mat->cols; + size.height = mat->rows; + return size; +} + +#define CV_DESCALE(x,n) (((x) + (1 << ((n)-1))) >> (n)) +#define CV_FLT_TO_FIX(x,n) cvRound((x)*(1<<(n))) + +/****************************************************************************************\ + + Generic implementation of QuickSort algorithm. + ---------------------------------------------- + Using this macro user can declare customized sort function that can be much faster + than built-in qsort function because of lower overhead on elements + comparison and exchange. The macro takes less_than (or LT) argument - a macro or function + that takes 2 arguments returns non-zero if the first argument should be before the second + one in the sorted sequence and zero otherwise. + + Example: + + Suppose that the task is to sort points by ascending of y coordinates and if + y's are equal x's should ascend. + + The code is: + ------------------------------------------------------------------------------ + #define cmp_pts( pt1, pt2 ) \ + ((pt1).y < (pt2).y || ((pt1).y < (pt2).y && (pt1).x < (pt2).x)) + + [static] CV_IMPLEMENT_QSORT( icvSortPoints, CvPoint, cmp_pts ) + ------------------------------------------------------------------------------ + + After that the function "void icvSortPoints( CvPoint* array, size_t total, int aux );" + is available to user. + + aux is an additional parameter, which can be used when comparing elements. + The current implementation was derived from *BSD system qsort(): + + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + +\****************************************************************************************/ + +#define CV_IMPLEMENT_QSORT_EX( func_name, T, LT, user_data_type ) \ +void func_name( T *array, size_t total, user_data_type aux ) \ +{ \ + int isort_thresh = 7; \ + T t; \ + int sp = 0; \ + \ + struct \ + { \ + T *lb; \ + T *ub; \ + } \ + stack[48]; \ + \ + aux = aux; \ + \ + if( total <= 1 ) \ + return; \ + \ + stack[0].lb = array; \ + stack[0].ub = array + (total - 1); \ + \ + while( sp >= 0 ) \ + { \ + T* left = stack[sp].lb; \ + T* right = stack[sp--].ub; \ + \ + for(;;) \ + { \ + int i, n = (int)(right - left) + 1, m; \ + T* ptr; \ + T* ptr2; \ + \ + if( n <= isort_thresh ) \ + { \ + insert_sort: \ + for( ptr = left + 1; ptr <= right; ptr++ ) \ + { \ + for( ptr2 = ptr; ptr2 > left && LT(ptr2[0],ptr2[-1]); ptr2--) \ + CV_SWAP( ptr2[0], ptr2[-1], t ); \ + } \ + break; \ + } \ + else \ + { \ + T* left0; \ + T* left1; \ + T* right0; \ + T* right1; \ + T* pivot; \ + T* a; \ + T* b; \ + T* c; \ + int swap_cnt = 0; \ + \ + left0 = left; \ + right0 = right; \ + pivot = left + (n/2); \ + \ + if( n > 40 ) \ + { \ + int d = n / 8; \ + a = left, b = left + d, c = left + 2*d; \ + left = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + \ + a = pivot - d, b = pivot, c = pivot + d; \ + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + \ + a = right - 2*d, b = right - d, c = right; \ + right = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + } \ + \ + a = left, b = pivot, c = right; \ + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) \ + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); \ + if( pivot != left0 ) \ + { \ + CV_SWAP( *pivot, *left0, t ); \ + pivot = left0; \ + } \ + left = left1 = left0 + 1; \ + right = right1 = right0; \ + \ + for(;;) \ + { \ + while( left <= right && !LT(*pivot, *left) ) \ + { \ + if( !LT(*left, *pivot) ) \ + { \ + if( left > left1 ) \ + CV_SWAP( *left1, *left, t ); \ + swap_cnt = 1; \ + left1++; \ + } \ + left++; \ + } \ + \ + while( left <= right && !LT(*right, *pivot) ) \ + { \ + if( !LT(*pivot, *right) ) \ + { \ + if( right < right1 ) \ + CV_SWAP( *right1, *right, t ); \ + swap_cnt = 1; \ + right1--; \ + } \ + right--; \ + } \ + \ + if( left > right ) \ + break; \ + CV_SWAP( *left, *right, t ); \ + swap_cnt = 1; \ + left++; \ + right--; \ + } \ + \ + if( swap_cnt == 0 ) \ + { \ + left = left0, right = right0; \ + goto insert_sort; \ + } \ + \ + n = MIN( (int)(left1 - left0), (int)(left - left1) ); \ + for( i = 0; i < n; i++ ) \ + CV_SWAP( left0[i], left[i-n], t ); \ + \ + n = MIN( (int)(right0 - right1), (int)(right1 - right) ); \ + for( i = 0; i < n; i++ ) \ + CV_SWAP( left[i], right0[i-n+1], t ); \ + n = (int)(left - left1); \ + m = (int)(right1 - right); \ + if( n > 1 ) \ + { \ + if( m > 1 ) \ + { \ + if( n > m ) \ + { \ + stack[++sp].lb = left0; \ + stack[sp].ub = left0 + n - 1; \ + left = right0 - m + 1, right = right0; \ + } \ + else \ + { \ + stack[++sp].lb = right0 - m + 1; \ + stack[sp].ub = right0; \ + left = left0, right = left0 + n - 1; \ + } \ + } \ + else \ + left = left0, right = left0 + n - 1; \ + } \ + else if( m > 1 ) \ + left = right0 - m + 1, right = right0; \ + else \ + break; \ + } \ + } \ + } \ +} + +#define CV_IMPLEMENT_QSORT( func_name, T, cmp ) \ + CV_IMPLEMENT_QSORT_EX( func_name, T, cmp, int ) + +/****************************************************************************************\ +* Structures and macros for integration with IPP * +\****************************************************************************************/ + +/* IPP-compatible return codes */ +typedef enum CvStatus +{ + CV_BADMEMBLOCK_ERR = -113, + CV_INPLACE_NOT_SUPPORTED_ERR= -112, + CV_UNMATCHED_ROI_ERR = -111, + CV_NOTFOUND_ERR = -110, + CV_BADCONVERGENCE_ERR = -109, + + CV_BADDEPTH_ERR = -107, + CV_BADROI_ERR = -106, + CV_BADHEADER_ERR = -105, + CV_UNMATCHED_FORMATS_ERR = -104, + CV_UNSUPPORTED_COI_ERR = -103, + CV_UNSUPPORTED_CHANNELS_ERR = -102, + CV_UNSUPPORTED_DEPTH_ERR = -101, + CV_UNSUPPORTED_FORMAT_ERR = -100, + + CV_BADARG_ERR = -49, //ipp comp + CV_NOTDEFINED_ERR = -48, //ipp comp + + CV_BADCHANNELS_ERR = -47, //ipp comp + CV_BADRANGE_ERR = -44, //ipp comp + CV_BADSTEP_ERR = -29, //ipp comp + + CV_BADFLAG_ERR = -12, + CV_DIV_BY_ZERO_ERR = -11, //ipp comp + CV_BADCOEF_ERR = -10, + + CV_BADFACTOR_ERR = -7, + CV_BADPOINT_ERR = -6, + CV_BADSCALE_ERR = -4, + CV_OUTOFMEM_ERR = -3, + CV_NULLPTR_ERR = -2, + CV_BADSIZE_ERR = -1, + CV_NO_ERR = 0, + CV_OK = CV_NO_ERR +} +CvStatus; + +#define CV_NOTHROW throw() + +typedef struct CvFuncTable +{ + void* fn_2d[CV_DEPTH_MAX]; +} +CvFuncTable; + +typedef struct CvBigFuncTable +{ + void* fn_2d[CV_DEPTH_MAX*4]; +} +CvBigFuncTable; + +#define CV_INIT_FUNC_TAB( tab, FUNCNAME, FLAG ) \ + (tab).fn_2d[CV_8U] = (void*)FUNCNAME##_8u##FLAG; \ + (tab).fn_2d[CV_8S] = 0; \ + (tab).fn_2d[CV_16U] = (void*)FUNCNAME##_16u##FLAG; \ + (tab).fn_2d[CV_16S] = (void*)FUNCNAME##_16s##FLAG; \ + (tab).fn_2d[CV_32S] = (void*)FUNCNAME##_32s##FLAG; \ + (tab).fn_2d[CV_32F] = (void*)FUNCNAME##_32f##FLAG; \ + (tab).fn_2d[CV_64F] = (void*)FUNCNAME##_64f##FLAG + +#endif diff --git a/opencv2/core/mat.hpp b/opencv2/core/mat.hpp new file mode 100644 index 0000000..6f444e4 --- /dev/null +++ b/opencv2/core/mat.hpp @@ -0,0 +1,2557 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_MATRIX_OPERATIONS_HPP__ +#define __OPENCV_CORE_MATRIX_OPERATIONS_HPP__ + +#ifndef SKIP_INCLUDES +#include +#include +#endif // SKIP_INCLUDES + +#ifdef __cplusplus + +namespace cv +{ + +//////////////////////////////// Mat //////////////////////////////// + +inline Mat::Mat() + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ +} + +inline Mat::Mat(int _rows, int _cols, int _type) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create(_rows, _cols, _type); +} + +inline Mat::Mat(int _rows, int _cols, int _type, const Scalar& _s) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create(_rows, _cols, _type); + *this = _s; +} + +inline Mat::Mat(Size _sz, int _type) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create( _sz.height, _sz.width, _type ); +} + +inline Mat::Mat(Size _sz, int _type, const Scalar& _s) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create(_sz.height, _sz.width, _type); + *this = _s; +} + +inline Mat::Mat(int _dims, const int* _sz, int _type) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create(_dims, _sz, _type); +} + +inline Mat::Mat(int _dims, const int* _sz, int _type, const Scalar& _s) + : flags(0), dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), datalimit(0), allocator(0), size(&rows) +{ + create(_dims, _sz, _type); + *this = _s; +} + +inline Mat::Mat(const Mat& m) + : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data), + refcount(m.refcount), datastart(m.datastart), dataend(m.dataend), + datalimit(m.datalimit), allocator(m.allocator), size(&rows) +{ + if( refcount ) + CV_XADD(refcount, 1); + if( m.dims <= 2 ) + { + step[0] = m.step[0]; step[1] = m.step[1]; + } + else + { + dims = 0; + copySize(m); + } +} + +inline Mat::Mat(int _rows, int _cols, int _type, void* _data, size_t _step) + : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_rows), cols(_cols), + data((uchar*)_data), refcount(0), datastart((uchar*)_data), dataend(0), + datalimit(0), allocator(0), size(&rows) +{ + size_t esz = CV_ELEM_SIZE(_type), minstep = cols*esz; + if( _step == AUTO_STEP ) + { + _step = minstep; + flags |= CONTINUOUS_FLAG; + } + else + { + if( rows == 1 ) _step = minstep; + CV_DbgAssert( _step >= minstep ); + flags |= _step == minstep ? CONTINUOUS_FLAG : 0; + } + step[0] = _step; step[1] = esz; + datalimit = datastart + _step*rows; + dataend = datalimit - _step + minstep; +} + +inline Mat::Mat(Size _sz, int _type, void* _data, size_t _step) + : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_sz.height), cols(_sz.width), + data((uchar*)_data), refcount(0), datastart((uchar*)_data), dataend(0), + datalimit(0), allocator(0), size(&rows) +{ + size_t esz = CV_ELEM_SIZE(_type), minstep = cols*esz; + if( _step == AUTO_STEP ) + { + _step = minstep; + flags |= CONTINUOUS_FLAG; + } + else + { + if( rows == 1 ) _step = minstep; + CV_DbgAssert( _step >= minstep ); + flags |= _step == minstep ? CONTINUOUS_FLAG : 0; + } + step[0] = _step; step[1] = esz; + datalimit = datastart + _step*rows; + dataend = datalimit - _step + minstep; +} + + +inline Mat::Mat(const CvMat* m, bool copyData) + : flags(MAGIC_VAL + (m->type & (CV_MAT_TYPE_MASK|CV_MAT_CONT_FLAG))), + dims(2), rows(m->rows), cols(m->cols), data(m->data.ptr), refcount(0), + datastart(m->data.ptr), allocator(0), size(&rows) +{ + if( !copyData ) + { + size_t esz = CV_ELEM_SIZE(m->type), minstep = cols*esz, _step = m->step; + if( _step == 0 ) + _step = minstep; + datalimit = datastart + _step*rows; + dataend = datalimit - _step + minstep; + step[0] = _step; step[1] = esz; + } + else + { + data = datastart = dataend = 0; + Mat(m->rows, m->cols, m->type, m->data.ptr, m->step).copyTo(*this); + } +} + +template inline Mat::Mat(const vector<_Tp>& vec, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows((int)vec.size()), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if(vec.empty()) + return; + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&vec[0]; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat((int)vec.size(), 1, DataType<_Tp>::type, (uchar*)&vec[0]).copyTo(*this); +} + + +template inline Mat::Mat(const Vec<_Tp, n>& vec, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(n), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)vec.val; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat(n, 1, DataType<_Tp>::type, (void*)vec.val).copyTo(*this); +} + + +template inline Mat::Mat(const Matx<_Tp,m,n>& M, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(m), cols(n), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = cols*sizeof(_Tp); + step[1] = sizeof(_Tp); + data = datastart = (uchar*)M.val; + datalimit = dataend = datastart + rows*step[0]; + } + else + Mat(m, n, DataType<_Tp>::type, (uchar*)M.val).copyTo(*this); +} + + +template inline Mat::Mat(const Point_<_Tp>& pt, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(2), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&pt.x; + datalimit = dataend = datastart + rows*step[0]; + } + else + { + create(2, 1, DataType<_Tp>::type); + ((_Tp*)data)[0] = pt.x; + ((_Tp*)data)[1] = pt.y; + } +} + + +template inline Mat::Mat(const Point3_<_Tp>& pt, bool copyData) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(2), rows(3), cols(1), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + if( !copyData ) + { + step[0] = step[1] = sizeof(_Tp); + data = datastart = (uchar*)&pt.x; + datalimit = dataend = datastart + rows*step[0]; + } + else + { + create(3, 1, DataType<_Tp>::type); + ((_Tp*)data)[0] = pt.x; + ((_Tp*)data)[1] = pt.y; + ((_Tp*)data)[2] = pt.z; + } +} + + +template inline Mat::Mat(const MatCommaInitializer_<_Tp>& commaInitializer) + : flags(MAGIC_VAL | DataType<_Tp>::type | CV_MAT_CONT_FLAG), + dims(0), rows(0), cols(0), data(0), refcount(0), + datastart(0), dataend(0), allocator(0), size(&rows) +{ + *this = *commaInitializer; +} + +inline Mat::~Mat() +{ + release(); + if( step.p != step.buf ) + fastFree(step.p); +} + +inline Mat& Mat::operator = (const Mat& m) +{ + if( this != &m ) + { + if( m.refcount ) + CV_XADD(m.refcount, 1); + release(); + flags = m.flags; + if( dims <= 2 && m.dims <= 2 ) + { + dims = m.dims; + rows = m.rows; + cols = m.cols; + step[0] = m.step[0]; + step[1] = m.step[1]; + } + else + copySize(m); + data = m.data; + datastart = m.datastart; + dataend = m.dataend; + datalimit = m.datalimit; + refcount = m.refcount; + allocator = m.allocator; + } + return *this; +} + +inline Mat Mat::row(int y) const { return Mat(*this, Range(y, y+1), Range::all()); } +inline Mat Mat::col(int x) const { return Mat(*this, Range::all(), Range(x, x+1)); } +inline Mat Mat::rowRange(int startrow, int endrow) const + { return Mat(*this, Range(startrow, endrow), Range::all()); } +inline Mat Mat::rowRange(const Range& r) const + { return Mat(*this, r, Range::all()); } +inline Mat Mat::colRange(int startcol, int endcol) const + { return Mat(*this, Range::all(), Range(startcol, endcol)); } +inline Mat Mat::colRange(const Range& r) const + { return Mat(*this, Range::all(), r); } + +inline Mat Mat::diag(const Mat& d) +{ + CV_Assert( d.cols == 1 ); + Mat m(d.rows, d.rows, d.type(), Scalar(0)), md = m.diag(); + d.copyTo(md); + return m; +} + +inline Mat Mat::clone() const +{ + Mat m; + copyTo(m); + return m; +} + +inline void Mat::assignTo( Mat& m, int type ) const +{ + if( type < 0 ) + m = *this; + else + convertTo(m, type); +} + +inline void Mat::create(int _rows, int _cols, int _type) +{ + _type &= TYPE_MASK; + if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && data ) + return; + int sz[] = {_rows, _cols}; + create(2, sz, _type); +} + +inline void Mat::create(Size _sz, int _type) +{ + create(_sz.height, _sz.width, _type); +} + +inline void Mat::addref() +{ if( refcount ) CV_XADD(refcount, 1); } + +inline void Mat::release() +{ + if( refcount && CV_XADD(refcount, -1) == 1 ) + deallocate(); + data = datastart = dataend = datalimit = 0; + size.p[0] = 0; + refcount = 0; +} + +inline Mat Mat::operator()( Range rowRange, Range colRange ) const +{ + return Mat(*this, rowRange, colRange); +} + +inline Mat Mat::operator()( const Rect& roi ) const +{ return Mat(*this, roi); } + +inline Mat Mat::operator()(const Range* ranges) const +{ + return Mat(*this, ranges); +} + +inline Mat::operator CvMat() const +{ + CV_DbgAssert(dims <= 2); + CvMat m = cvMat(rows, dims == 1 ? 1 : cols, type(), data); + m.step = (int)step[0]; + m.type = (m.type & ~CONTINUOUS_FLAG) | (flags & CONTINUOUS_FLAG); + return m; +} + +inline bool Mat::isContinuous() const { return (flags & CONTINUOUS_FLAG) != 0; } +inline bool Mat::isSubmatrix() const { return (flags & SUBMATRIX_FLAG) != 0; } +inline size_t Mat::elemSize() const { return dims > 0 ? step.p[dims-1] : 0; } +inline size_t Mat::elemSize1() const { return CV_ELEM_SIZE1(flags); } +inline int Mat::type() const { return CV_MAT_TYPE(flags); } +inline int Mat::depth() const { return CV_MAT_DEPTH(flags); } +inline int Mat::channels() const { return CV_MAT_CN(flags); } +inline size_t Mat::step1(int i) const { return step.p[i]/elemSize1(); } +inline bool Mat::empty() const { return data == 0 || total() == 0; } +inline size_t Mat::total() const +{ + if( dims <= 2 ) + return rows*cols; + size_t p = 1; + for( int i = 0; i < dims; i++ ) + p *= size[i]; + return p; +} + +inline uchar* Mat::ptr(int y) +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return data + step.p[0]*y; +} + +inline const uchar* Mat::ptr(int y) const +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return data + step.p[0]*y; +} + +template inline _Tp* Mat::ptr(int y) +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && (unsigned)y < (unsigned)size.p[0]) ); + return (_Tp*)(data + step.p[0]*y); +} + +template inline const _Tp* Mat::ptr(int y) const +{ + CV_DbgAssert( y == 0 || (data && dims >= 1 && data && (unsigned)y < (unsigned)size.p[0]) ); + return (const _Tp*)(data + step.p[0]*y); +} + + +inline uchar* Mat::ptr(int i0, int i1) +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return data + i0*step.p[0] + i1*step.p[1]; +} + +inline const uchar* Mat::ptr(int i0, int i1) const +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return data + i0*step.p[0] + i1*step.p[1]; +} + +template inline _Tp* Mat::ptr(int i0, int i1) +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return (_Tp*)(data + i0*step.p[0] + i1*step.p[1]); +} + +template inline const _Tp* Mat::ptr(int i0, int i1) const +{ + CV_DbgAssert( dims >= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] ); + return (const _Tp*)(data + i0*step.p[0] + i1*step.p[1]); +} + +inline uchar* Mat::ptr(int i0, int i1, int i2) +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]; +} + +inline const uchar* Mat::ptr(int i0, int i1, int i2) const +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]; +} + +template inline _Tp* Mat::ptr(int i0, int i1, int i2) +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return (_Tp*)(data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]); +} + +template inline const _Tp* Mat::ptr(int i0, int i1, int i2) const +{ + CV_DbgAssert( dims >= 3 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + (unsigned)i2 < (unsigned)size.p[2] ); + return (const _Tp*)(data + i0*step.p[0] + i1*step.p[1] + i2*step.p[2]); +} + +inline uchar* Mat::ptr(const int* idx) +{ + int i, d = dims; + uchar* p = data; + CV_DbgAssert( d >= 1 && p ); + for( i = 0; i < d; i++ ) + { + CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] ); + p += idx[i]*step.p[i]; + } + return p; +} + +inline const uchar* Mat::ptr(const int* idx) const +{ + int i, d = dims; + uchar* p = data; + CV_DbgAssert( d >= 1 && p ); + for( i = 0; i < d; i++ ) + { + CV_DbgAssert( (unsigned)idx[i] < (unsigned)size.p[i] ); + p += idx[i]*step.p[i]; + } + return p; +} + +template inline _Tp& Mat::at(int i0, int i1) +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((_Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline const _Tp& Mat::at(int i0, int i1) const +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((const _Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline _Tp& Mat::at(Point pt) +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)(pt.x*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((_Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline const _Tp& Mat::at(Point pt) const +{ + CV_DbgAssert( dims <= 2 && data && (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)(pt.x*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && + CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1()); + return ((const _Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline _Tp& Mat::at(int i0) +{ + CV_DbgAssert( dims <= 2 && data && (size.p[0] == 1 || size.p[1] == 1) && + (unsigned)i0 < (unsigned)(size.p[0] + size.p[1] - 1) && + elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)(data + step.p[size.p[0]==1]*i0); +} + +template inline const _Tp& Mat::at(int i0) const +{ + CV_DbgAssert( dims <= 2 && data && (size.p[0] == 1 || size.p[1] == 1) && + (unsigned)i0 < (unsigned)(size.p[0] + size.p[1] - 1) && + elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)(data + step.p[size.p[0]==1]*i0); +} + +template inline _Tp& Mat::at(int i0, int i1, int i2) +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)ptr(i0, i1, i2); +} +template inline const _Tp& Mat::at(int i0, int i1, int i2) const +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(const _Tp*)ptr(i0, i1, i2); +} +template inline _Tp& Mat::at(const int* idx) +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(_Tp*)ptr(idx); +} +template inline const _Tp& Mat::at(const int* idx) const +{ + CV_DbgAssert( elemSize() == CV_ELEM_SIZE(DataType<_Tp>::type) ); + return *(const _Tp*)ptr(idx); +} + + +template inline MatConstIterator_<_Tp> Mat::begin() const +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return MatConstIterator_<_Tp>((const Mat_<_Tp>*)this); +} + +template inline MatConstIterator_<_Tp> Mat::end() const +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + MatConstIterator_<_Tp> it((const Mat_<_Tp>*)this); + it += total(); + return it; +} + +template inline MatIterator_<_Tp> Mat::begin() +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + return MatIterator_<_Tp>((Mat_<_Tp>*)this); +} + +template inline MatIterator_<_Tp> Mat::end() +{ + CV_DbgAssert( elemSize() == sizeof(_Tp) ); + MatIterator_<_Tp> it((Mat_<_Tp>*)this); + it += total(); + return it; +} + +template inline Mat::operator vector<_Tp>() const +{ + vector<_Tp> v; + copyTo(v); + return v; +} + +template inline Mat::operator Vec<_Tp, n>() const +{ + CV_Assert( data && dims <= 2 && (rows == 1 || cols == 1) && + rows + cols - 1 == n && channels() == 1 ); + + if( isContinuous() && type() == DataType<_Tp>::type ) + return Vec<_Tp, n>((_Tp*)data); + Vec<_Tp, n> v; Mat tmp(rows, cols, DataType<_Tp>::type, v.val); + convertTo(tmp, tmp.type()); + return v; +} + +template inline Mat::operator Matx<_Tp, m, n>() const +{ + CV_Assert( data && dims <= 2 && rows == m && cols == n && channels() == 1 ); + + if( isContinuous() && type() == DataType<_Tp>::type ) + return Matx<_Tp, m, n>((_Tp*)data); + Matx<_Tp, m, n> mtx; Mat tmp(rows, cols, DataType<_Tp>::type, mtx.val); + convertTo(tmp, tmp.type()); + return mtx; +} + + +template inline void Mat::push_back(const _Tp& elem) +{ + if( !data ) + { + *this = Mat(1, 1, DataType<_Tp>::type, (void*)&elem).clone(); + return; + } + CV_Assert(DataType<_Tp>::type == type() && cols == 1 + /* && dims == 2 (cols == 1 implies dims == 2) */); + uchar* tmp = dataend + step[0]; + if( !isSubmatrix() && isContinuous() && tmp <= datalimit ) + { + *(_Tp*)(data + (size.p[0]++)*step.p[0]) = elem; + dataend = tmp; + } + else + push_back_(&elem); +} + +template inline void Mat::push_back(const Mat_<_Tp>& m) +{ + push_back((const Mat&)m); +} + +inline Mat::MSize::MSize(int* _p) : p(_p) {} +inline Size Mat::MSize::operator()() const +{ + CV_DbgAssert(p[-1] <= 2); + return Size(p[1], p[0]); +} +inline const int& Mat::MSize::operator[](int i) const { return p[i]; } +inline int& Mat::MSize::operator[](int i) { return p[i]; } +inline Mat::MSize::operator const int*() const { return p; } + +inline bool Mat::MSize::operator == (const MSize& sz) const +{ + int d = p[-1], dsz = sz.p[-1]; + if( d != dsz ) + return false; + if( d == 2 ) + return p[0] == sz.p[0] && p[1] == sz.p[1]; + + for( int i = 0; i < d; i++ ) + if( p[i] != sz.p[i] ) + return false; + return true; +} + +inline bool Mat::MSize::operator != (const MSize& sz) const +{ + return !(*this == sz); +} + +inline Mat::MStep::MStep() { p = buf; p[0] = p[1] = 0; } +inline Mat::MStep::MStep(size_t s) { p = buf; p[0] = s; p[1] = 0; } +inline const size_t& Mat::MStep::operator[](int i) const { return p[i]; } +inline size_t& Mat::MStep::operator[](int i) { return p[i]; } +inline Mat::MStep::operator size_t() const +{ + CV_DbgAssert( p == buf ); + return buf[0]; +} +inline Mat::MStep& Mat::MStep::operator = (size_t s) +{ + CV_DbgAssert( p == buf ); + buf[0] = s; + return *this; +} + +static inline Mat cvarrToMatND(const CvArr* arr, bool copyData=false, int coiMode=0) +{ + return cvarrToMat(arr, copyData, true, coiMode); +} + +///////////////////////////////////////////// SVD ////////////////////////////////////////////////////// + +inline SVD::SVD() {} +inline SVD::SVD( InputArray m, int flags ) { operator ()(m, flags); } +inline void SVD::solveZ( InputArray m, OutputArray _dst ) +{ + SVD svd(m); + _dst.create(svd.vt.cols, 1, svd.vt.type()); + Mat dst = _dst.getMat(); + svd.vt.row(svd.vt.rows-1).reshape(1,svd.vt.cols).copyTo(dst); +} + +template inline void + SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w, Matx<_Tp, m, nm>& u, Matx<_Tp, n, nm>& vt ) +{ + assert( nm == MIN(m, n)); + Mat _a(a, false), _u(u, false), _w(w, false), _vt(vt, false); + SVD::compute(_a, _w, _u, _vt); + CV_Assert(_w.data == (uchar*)&w.val[0] && _u.data == (uchar*)&u.val[0] && _vt.data == (uchar*)&vt.val[0]); +} + +template inline void +SVD::compute( const Matx<_Tp, m, n>& a, Matx<_Tp, nm, 1>& w ) +{ + assert( nm == MIN(m, n)); + Mat _a(a, false), _w(w, false); + SVD::compute(_a, _w); + CV_Assert(_w.data == (uchar*)&w.val[0]); +} + +template inline void +SVD::backSubst( const Matx<_Tp, nm, 1>& w, const Matx<_Tp, m, nm>& u, + const Matx<_Tp, n, nm>& vt, const Matx<_Tp, m, nb>& rhs, + Matx<_Tp, n, nb>& dst ) +{ + assert( nm == MIN(m, n)); + Mat _u(u, false), _w(w, false), _vt(vt, false), _rhs(rhs, false), _dst(dst, false); + SVD::backSubst(_w, _u, _vt, _rhs, _dst); + CV_Assert(_dst.data == (uchar*)&dst.val[0]); +} + +///////////////////////////////// Mat_<_Tp> //////////////////////////////////// + +template inline Mat_<_Tp>::Mat_() + : Mat() { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type; } + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols) + : Mat(_rows, _cols, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols, const _Tp& value) + : Mat(_rows, _cols, DataType<_Tp>::type) { *this = value; } + +template inline Mat_<_Tp>::Mat_(Size _sz) + : Mat(_sz.height, _sz.width, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(Size _sz, const _Tp& value) + : Mat(_sz.height, _sz.width, DataType<_Tp>::type) { *this = value; } + +template inline Mat_<_Tp>::Mat_(int _dims, const int* _sz) + : Mat(_dims, _sz, DataType<_Tp>::type) {} + +template inline Mat_<_Tp>::Mat_(int _dims, const int* _sz, const _Tp& _s) + : Mat(_dims, _sz, DataType<_Tp>::type, Scalar(_s)) {} + +template inline Mat_<_Tp>::Mat_(const Mat_<_Tp>& m, const Range* ranges) + : Mat(m, ranges) {} + +template inline Mat_<_Tp>::Mat_(const Mat& m) + : Mat() { flags = (flags & ~CV_MAT_TYPE_MASK) | DataType<_Tp>::type; *this = m; } + +template inline Mat_<_Tp>::Mat_(const Mat_& m) + : Mat(m) {} + +template inline Mat_<_Tp>::Mat_(int _rows, int _cols, _Tp* _data, size_t steps) + : Mat(_rows, _cols, DataType<_Tp>::type, _data, steps) {} + +template inline Mat_<_Tp>::Mat_(const Mat_& m, const Range& rowRange, const Range& colRange) + : Mat(m, rowRange, colRange) {} + +template inline Mat_<_Tp>::Mat_(const Mat_& m, const Rect& roi) + : Mat(m, roi) {} + +template template inline + Mat_<_Tp>::Mat_(const Vec::channel_type, n>& vec, bool copyData) + : Mat(n/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&vec) +{ + CV_Assert(n%DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template template inline + Mat_<_Tp>::Mat_(const Matx::channel_type,m,n>& M, bool copyData) + : Mat(m, n/DataType<_Tp>::channels, DataType<_Tp>::type, (void*)&M) +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const Point_::channel_type>& pt, bool copyData) + : Mat(2/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&pt) +{ + CV_Assert(2 % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const Point3_::channel_type>& pt, bool copyData) + : Mat(3/DataType<_Tp>::channels, 1, DataType<_Tp>::type, (void*)&pt) +{ + CV_Assert(3 % DataType<_Tp>::channels == 0); + if( copyData ) + *this = clone(); +} + +template inline Mat_<_Tp>::Mat_(const MatCommaInitializer_<_Tp>& commaInitializer) + : Mat(commaInitializer) {} + +template inline Mat_<_Tp>::Mat_(const vector<_Tp>& vec, bool copyData) + : Mat(vec, copyData) {} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const Mat& m) +{ + if( DataType<_Tp>::type == m.type() ) + { + Mat::operator = (m); + return *this; + } + if( DataType<_Tp>::depth == m.depth() ) + { + return (*this = m.reshape(DataType<_Tp>::channels, m.dims, 0)); + } + CV_DbgAssert(DataType<_Tp>::channels == m.channels()); + m.convertTo(*this, type()); + return *this; +} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const Mat_& m) +{ + Mat::operator=(m); + return *this; +} + +template inline Mat_<_Tp>& Mat_<_Tp>::operator = (const _Tp& s) +{ + typedef typename DataType<_Tp>::vec_type VT; + Mat::operator=(Scalar((const VT&)s)); + return *this; +} + +template inline void Mat_<_Tp>::create(int _rows, int _cols) +{ + Mat::create(_rows, _cols, DataType<_Tp>::type); +} + +template inline void Mat_<_Tp>::create(Size _sz) +{ + Mat::create(_sz, DataType<_Tp>::type); +} + +template inline void Mat_<_Tp>::create(int _dims, const int* _sz) +{ + Mat::create(_dims, _sz, DataType<_Tp>::type); +} + + +template inline Mat_<_Tp> Mat_<_Tp>::cross(const Mat_& m) const +{ return Mat_<_Tp>(Mat::cross(m)); } + +template template inline Mat_<_Tp>::operator Mat_() const +{ return Mat_(*this); } + +template inline Mat_<_Tp> Mat_<_Tp>::row(int y) const +{ return Mat_(*this, Range(y, y+1), Range::all()); } +template inline Mat_<_Tp> Mat_<_Tp>::col(int x) const +{ return Mat_(*this, Range::all(), Range(x, x+1)); } +template inline Mat_<_Tp> Mat_<_Tp>::diag(int d) const +{ return Mat_(Mat::diag(d)); } +template inline Mat_<_Tp> Mat_<_Tp>::clone() const +{ return Mat_(Mat::clone()); } + +template inline size_t Mat_<_Tp>::elemSize() const +{ + CV_DbgAssert( Mat::elemSize() == sizeof(_Tp) ); + return sizeof(_Tp); +} + +template inline size_t Mat_<_Tp>::elemSize1() const +{ + CV_DbgAssert( Mat::elemSize1() == sizeof(_Tp)/DataType<_Tp>::channels ); + return sizeof(_Tp)/DataType<_Tp>::channels; +} +template inline int Mat_<_Tp>::type() const +{ + CV_DbgAssert( Mat::type() == DataType<_Tp>::type ); + return DataType<_Tp>::type; +} +template inline int Mat_<_Tp>::depth() const +{ + CV_DbgAssert( Mat::depth() == DataType<_Tp>::depth ); + return DataType<_Tp>::depth; +} +template inline int Mat_<_Tp>::channels() const +{ + CV_DbgAssert( Mat::channels() == DataType<_Tp>::channels ); + return DataType<_Tp>::channels; +} +template inline size_t Mat_<_Tp>::stepT(int i) const { return step.p[i]/elemSize(); } +template inline size_t Mat_<_Tp>::step1(int i) const { return step.p[i]/elemSize1(); } + +template inline Mat_<_Tp> Mat_<_Tp>::reshape(int _rows) const +{ return Mat_<_Tp>(Mat::reshape(0,_rows)); } + +template inline Mat_<_Tp>& Mat_<_Tp>::adjustROI( int dtop, int dbottom, int dleft, int dright ) +{ return (Mat_<_Tp>&)(Mat::adjustROI(dtop, dbottom, dleft, dright)); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Range& rowRange, const Range& colRange ) const +{ return Mat_<_Tp>(*this, rowRange, colRange); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Rect& roi ) const +{ return Mat_<_Tp>(*this, roi); } + +template inline Mat_<_Tp> Mat_<_Tp>::operator()( const Range* ranges ) const +{ return Mat_<_Tp>(*this, ranges); } + +template inline _Tp* Mat_<_Tp>::operator [](int y) +{ return (_Tp*)ptr(y); } +template inline const _Tp* Mat_<_Tp>::operator [](int y) const +{ return (const _Tp*)ptr(y); } + +template inline _Tp& Mat_<_Tp>::operator ()(int i0, int i1) +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((_Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0, int i1) const +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)i0 < (unsigned)size.p[0] && + (unsigned)i1 < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((const _Tp*)(data + step.p[0]*i0))[i1]; +} + +template inline _Tp& Mat_<_Tp>::operator ()(Point pt) +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)pt.x < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((_Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline const _Tp& Mat_<_Tp>::operator ()(Point pt) const +{ + CV_DbgAssert( dims <= 2 && data && + (unsigned)pt.y < (unsigned)size.p[0] && + (unsigned)pt.x < (unsigned)size.p[1] && + type() == DataType<_Tp>::type ); + return ((const _Tp*)(data + step.p[0]*pt.y))[pt.x]; +} + +template inline _Tp& Mat_<_Tp>::operator ()(const int* idx) +{ + return Mat::at<_Tp>(idx); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(const int* idx) const +{ + return Mat::at<_Tp>(idx); +} + +template inline _Tp& Mat_<_Tp>::operator ()(int i0) +{ + return this->at<_Tp>(i0); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0) const +{ + return this->at<_Tp>(i0); +} + +template inline _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2) +{ + return this->at<_Tp>(i0, i1, i2); +} + +template inline const _Tp& Mat_<_Tp>::operator ()(int i0, int i1, int i2) const +{ + return this->at<_Tp>(i0, i1, i2); +} + + +template inline Mat_<_Tp>::operator vector<_Tp>() const +{ + vector<_Tp> v; + copyTo(v); + return v; +} + +template template inline Mat_<_Tp>::operator Vec::channel_type, n>() const +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + return this->Mat::operator Vec::channel_type, n>(); +} + +template template inline Mat_<_Tp>::operator Matx::channel_type, m, n>() const +{ + CV_Assert(n % DataType<_Tp>::channels == 0); + return this->Mat::operator Matx::channel_type, m, n>(); +} + +template inline void +process( const Mat_& m1, Mat_& m2, Op op ) +{ + int y, x, rows = m1.rows, cols = m1.cols; + int c1 = m1.channels(), c2 = m2.channels(); + + CV_DbgAssert( m1.size() == m2.size() ); + + for( y = 0; y < rows; y++ ) + { + const T1* src = m1[y]; + T2* dst = m2[y]; + + for( x = 0; x < cols; x++ ) + dst[x] = op(src[x]); + } +} + +template inline void +process( const Mat_& m1, const Mat_& m2, Mat_& m3, Op op ) +{ + int y, x, rows = m1.rows, cols = m1.cols; + + CV_DbgAssert( m1.size() == m2.size() ); + + for( y = 0; y < rows; y++ ) + { + const T1* src1 = m1[y]; + const T2* src2 = m2[y]; + T3* dst = m3[y]; + + for( x = 0; x < cols; x++ ) + dst[x] = op( src1[x], src2[x] ); + } +} + + +/////////////////////////////// Input/Output Arrays ///////////////////////////////// + +template inline _InputArray::_InputArray(const vector<_Tp>& vec) + : flags(STD_VECTOR + DataType<_Tp>::type), obj((void*)&vec) {} + +template inline _InputArray::_InputArray(const vector >& vec) + : flags(STD_VECTOR_VECTOR + DataType<_Tp>::type), obj((void*)&vec) {} + +template inline _InputArray::_InputArray(const Matx<_Tp, m, n>& mtx) + : flags(MATX + DataType<_Tp>::type), obj((void*)&mtx), sz(n, m) {} + +inline _InputArray::_InputArray(const Scalar& s) + : flags(MATX + CV_64F), obj((void*)&s), sz(1, 4) {} + +template inline _OutputArray::_OutputArray(vector<_Tp>& vec) : _InputArray(vec) {} +template inline _OutputArray::_OutputArray(vector >& vec) : _InputArray(vec) {} +template inline _OutputArray::_OutputArray(Matx<_Tp, m, n>& mtx) : _InputArray(mtx) {} + +//////////////////////////////////// Matrix Expressions ///////////////////////////////////////// + +class CV_EXPORTS MatOp +{ +public: + MatOp() {}; + virtual ~MatOp() {}; + + virtual bool elementWise(const MatExpr& expr) const; + virtual void assign(const MatExpr& expr, Mat& m, int type=-1) const = 0; + virtual void roi(const MatExpr& expr, const Range& rowRange, + const Range& colRange, MatExpr& res) const; + virtual void diag(const MatExpr& expr, int d, MatExpr& res) const; + virtual void augAssignAdd(const MatExpr& expr, Mat& m) const; + virtual void augAssignSubtract(const MatExpr& expr, Mat& m) const; + virtual void augAssignMultiply(const MatExpr& expr, Mat& m) const; + virtual void augAssignDivide(const MatExpr& expr, Mat& m) const; + virtual void augAssignAnd(const MatExpr& expr, Mat& m) const; + virtual void augAssignOr(const MatExpr& expr, Mat& m) const; + virtual void augAssignXor(const MatExpr& expr, Mat& m) const; + + virtual void add(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void add(const MatExpr& expr1, const Scalar& s, MatExpr& res) const; + + virtual void subtract(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void subtract(const Scalar& s, const MatExpr& expr, MatExpr& res) const; + + virtual void multiply(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const; + virtual void multiply(const MatExpr& expr1, double s, MatExpr& res) const; + + virtual void divide(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res, double scale=1) const; + virtual void divide(double s, const MatExpr& expr, MatExpr& res) const; + + virtual void abs(const MatExpr& expr, MatExpr& res) const; + + virtual void transpose(const MatExpr& expr, MatExpr& res) const; + virtual void matmul(const MatExpr& expr1, const MatExpr& expr2, MatExpr& res) const; + virtual void invert(const MatExpr& expr, int method, MatExpr& res) const; + + virtual Size size(const MatExpr& expr) const; + virtual int type(const MatExpr& expr) const; +}; + + +class CV_EXPORTS MatExpr +{ +public: + MatExpr() : op(0), flags(0), a(Mat()), b(Mat()), c(Mat()), alpha(0), beta(0), s(Scalar()) {} + MatExpr(const MatOp* _op, int _flags, const Mat& _a=Mat(), const Mat& _b=Mat(), + const Mat& _c=Mat(), double _alpha=1, double _beta=1, const Scalar& _s=Scalar()) + : op(_op), flags(_flags), a(_a), b(_b), c(_c), alpha(_alpha), beta(_beta), s(_s) {} + explicit MatExpr(const Mat& m); + operator Mat() const + { + Mat m; + op->assign(*this, m); + return m; + } + + template operator Mat_<_Tp>() const + { + Mat_<_Tp> m; + op->assign(*this, m, DataType<_Tp>::type); + return m; + } + + MatExpr row(int y) const; + MatExpr col(int x) const; + MatExpr diag(int d=0) const; + MatExpr operator()( const Range& rowRange, const Range& colRange ) const; + MatExpr operator()( const Rect& roi ) const; + + Mat cross(const Mat& m) const; + double dot(const Mat& m) const; + + MatExpr t() const; + MatExpr inv(int method = DECOMP_LU) const; + MatExpr mul(const MatExpr& e, double scale=1) const; + MatExpr mul(const Mat& m, double scale=1) const; + + Size size() const; + int type() const; + + const MatOp* op; + int flags; + + Mat a, b, c; + double alpha, beta; + Scalar s; +}; + + +CV_EXPORTS MatExpr operator + (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator + (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator + (const Scalar& s, const Mat& a); +CV_EXPORTS MatExpr operator + (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator + (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator + (const MatExpr& e, const Scalar& s); +CV_EXPORTS MatExpr operator + (const Scalar& s, const MatExpr& e); +CV_EXPORTS MatExpr operator + (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator - (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator - (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator - (const Scalar& s, const Mat& a); +CV_EXPORTS MatExpr operator - (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator - (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator - (const MatExpr& e, const Scalar& s); +CV_EXPORTS MatExpr operator - (const Scalar& s, const MatExpr& e); +CV_EXPORTS MatExpr operator - (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator - (const Mat& m); +CV_EXPORTS MatExpr operator - (const MatExpr& e); + +CV_EXPORTS MatExpr operator * (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator * (const Mat& a, double s); +CV_EXPORTS MatExpr operator * (double s, const Mat& a); +CV_EXPORTS MatExpr operator * (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator * (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator * (const MatExpr& e, double s); +CV_EXPORTS MatExpr operator * (double s, const MatExpr& e); +CV_EXPORTS MatExpr operator * (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator / (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator / (const Mat& a, double s); +CV_EXPORTS MatExpr operator / (double s, const Mat& a); +CV_EXPORTS MatExpr operator / (const MatExpr& e, const Mat& m); +CV_EXPORTS MatExpr operator / (const Mat& m, const MatExpr& e); +CV_EXPORTS MatExpr operator / (const MatExpr& e, double s); +CV_EXPORTS MatExpr operator / (double s, const MatExpr& e); +CV_EXPORTS MatExpr operator / (const MatExpr& e1, const MatExpr& e2); + +CV_EXPORTS MatExpr operator < (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator < (const Mat& a, double s); +CV_EXPORTS MatExpr operator < (double s, const Mat& a); + +CV_EXPORTS MatExpr operator <= (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator <= (const Mat& a, double s); +CV_EXPORTS MatExpr operator <= (double s, const Mat& a); + +CV_EXPORTS MatExpr operator == (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator == (const Mat& a, double s); +CV_EXPORTS MatExpr operator == (double s, const Mat& a); + +CV_EXPORTS MatExpr operator != (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator != (const Mat& a, double s); +CV_EXPORTS MatExpr operator != (double s, const Mat& a); + +CV_EXPORTS MatExpr operator >= (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator >= (const Mat& a, double s); +CV_EXPORTS MatExpr operator >= (double s, const Mat& a); + +CV_EXPORTS MatExpr operator > (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator > (const Mat& a, double s); +CV_EXPORTS MatExpr operator > (double s, const Mat& a); + +CV_EXPORTS MatExpr min(const Mat& a, const Mat& b); +CV_EXPORTS MatExpr min(const Mat& a, double s); +CV_EXPORTS MatExpr min(double s, const Mat& a); + +CV_EXPORTS MatExpr max(const Mat& a, const Mat& b); +CV_EXPORTS MatExpr max(const Mat& a, double s); +CV_EXPORTS MatExpr max(double s, const Mat& a); + +template static inline MatExpr min(const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + return cv::min((const Mat&)a, (const Mat&)b); +} + +template static inline MatExpr min(const Mat_<_Tp>& a, double s) +{ + return cv::min((const Mat&)a, s); +} + +template static inline MatExpr min(double s, const Mat_<_Tp>& a) +{ + return cv::min((const Mat&)a, s); +} + +template static inline MatExpr max(const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + return cv::max((const Mat&)a, (const Mat&)b); +} + +template static inline MatExpr max(const Mat_<_Tp>& a, double s) +{ + return cv::max((const Mat&)a, s); +} + +template static inline MatExpr max(double s, const Mat_<_Tp>& a) +{ + return cv::max((const Mat&)a, s); +} + +template static inline void min(const Mat_<_Tp>& a, const Mat_<_Tp>& b, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, (const Mat&)b, (Mat&)c); +} + +template static inline void min(const Mat_<_Tp>& a, double s, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + +template static inline void min(double s, const Mat_<_Tp>& a, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + +template static inline void max(const Mat_<_Tp>& a, const Mat_<_Tp>& b, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, (const Mat&)b, (Mat&)c); +} + +template static inline void max(const Mat_<_Tp>& a, double s, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + +template static inline void max(double s, const Mat_<_Tp>& a, Mat_<_Tp>& c) +{ + cv::min((const Mat&)a, s, (Mat&)c); +} + + +CV_EXPORTS MatExpr operator & (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator & (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator & (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator | (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator | (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator | (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator ^ (const Mat& a, const Mat& b); +CV_EXPORTS MatExpr operator ^ (const Mat& a, const Scalar& s); +CV_EXPORTS MatExpr operator ^ (const Scalar& s, const Mat& a); + +CV_EXPORTS MatExpr operator ~(const Mat& m); + +CV_EXPORTS MatExpr abs(const Mat& m); +CV_EXPORTS MatExpr abs(const MatExpr& e); + +template static inline MatExpr abs(const Mat_<_Tp>& m) +{ + return cv::abs((const Mat&)m); +} + +////////////////////////////// Augmenting algebraic operations ////////////////////////////////// + +inline Mat& Mat::operator = (const MatExpr& e) +{ + e.op->assign(e, *this); + return *this; +} + +template inline Mat_<_Tp>::Mat_(const MatExpr& e) +{ + e.op->assign(e, *this, DataType<_Tp>::type); +} + +template Mat_<_Tp>& Mat_<_Tp>::operator = (const MatExpr& e) +{ + e.op->assign(e, *this, DataType<_Tp>::type); + return *this; +} + +static inline Mat& operator += (const Mat& a, const Mat& b) +{ + add(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator += (const Mat& a, const Scalar& s) +{ + add(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + add(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const Scalar& s) +{ + add(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator += (const Mat& a, const MatExpr& b) +{ + b.op->augAssignAdd(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator += (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignAdd(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator -= (const Mat& a, const Mat& b) +{ + subtract(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator -= (const Mat& a, const Scalar& s) +{ + subtract(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + subtract(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const Scalar& s) +{ + subtract(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator -= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignSubtract(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator -= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignSubtract(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator *= (const Mat& a, const Mat& b) +{ + gemm(a, b, 1, Mat(), 0, (Mat&)a, 0); + return (Mat&)a; +} + +static inline Mat& operator *= (const Mat& a, double s) +{ + a.convertTo((Mat&)a, -1, s); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + gemm(a, b, 1, Mat(), 0, (Mat&)a, 0); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, double s) +{ + a.convertTo((Mat&)a, -1, s); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator *= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignMultiply(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator *= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignMultiply(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator /= (const Mat& a, const Mat& b) +{ + divide(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator /= (const Mat& a, double s) +{ + a.convertTo((Mat&)a, -1, 1./s); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + divide(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, double s) +{ + a.convertTo((Mat&)a, -1, 1./s); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator /= (const Mat& a, const MatExpr& b) +{ + b.op->augAssignDivide(b, (Mat&)a); + return (Mat&)a; +} + +template static inline +Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const MatExpr& b) +{ + b.op->augAssignDivide(b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +////////////////////////////// Logical operations /////////////////////////////// + +static inline Mat& operator &= (const Mat& a, const Mat& b) +{ + bitwise_and(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator &= (const Mat& a, const Scalar& s) +{ + bitwise_and(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator &= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_and(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator &= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_and(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator |= (const Mat& a, const Mat& b) +{ + bitwise_or(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator |= (const Mat& a, const Scalar& s) +{ + bitwise_or(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator |= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_or(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator |= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_or(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +static inline Mat& operator ^= (const Mat& a, const Mat& b) +{ + bitwise_xor(a, b, (Mat&)a); + return (Mat&)a; +} + +static inline Mat& operator ^= (const Mat& a, const Scalar& s) +{ + bitwise_xor(a, s, (Mat&)a); + return (Mat&)a; +} + +template static inline Mat_<_Tp>& +operator ^= (const Mat_<_Tp>& a, const Mat_<_Tp>& b) +{ + bitwise_xor(a, b, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +template static inline Mat_<_Tp>& +operator ^= (const Mat_<_Tp>& a, const Scalar& s) +{ + bitwise_xor(a, s, (Mat&)a); + return (Mat_<_Tp>&)a; +} + +/////////////////////////////// Miscellaneous operations ////////////////////////////// + +template void split(const Mat& src, vector >& mv) +{ split(src, (vector&)mv ); } + +////////////////////////////////////////////////////////////// + +template inline MatExpr Mat_<_Tp>::zeros(int rows, int cols) +{ + return Mat::zeros(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::zeros(Size sz) +{ + return Mat::zeros(sz, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::ones(int rows, int cols) +{ + return Mat::ones(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::ones(Size sz) +{ + return Mat::ones(sz, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::eye(int rows, int cols) +{ + return Mat::eye(rows, cols, DataType<_Tp>::type); +} + +template inline MatExpr Mat_<_Tp>::eye(Size sz) +{ + return Mat::eye(sz, DataType<_Tp>::type); +} + +//////////////////////////////// Iterators & Comma initializers ////////////////////////////////// + +inline MatConstIterator::MatConstIterator() + : m(0), elemSize(0), ptr(0), sliceStart(0), sliceEnd(0) {} + +inline MatConstIterator::MatConstIterator(const Mat* _m) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + if( m && m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + seek((const int*)0); +} + +inline MatConstIterator::MatConstIterator(const Mat* _m, int _row, int _col) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + CV_Assert(m && m->dims <= 2); + if( m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + int idx[]={_row, _col}; + seek(idx); +} + +inline MatConstIterator::MatConstIterator(const Mat* _m, Point _pt) + : m(_m), elemSize(_m->elemSize()), ptr(0), sliceStart(0), sliceEnd(0) +{ + CV_Assert(m && m->dims <= 2); + if( m->isContinuous() ) + { + sliceStart = m->data; + sliceEnd = sliceStart + m->total()*elemSize; + } + int idx[]={_pt.y, _pt.x}; + seek(idx); +} + +inline MatConstIterator::MatConstIterator(const MatConstIterator& it) + : m(it.m), elemSize(it.elemSize), ptr(it.ptr), sliceStart(it.sliceStart), sliceEnd(it.sliceEnd) +{} + +inline MatConstIterator& MatConstIterator::operator = (const MatConstIterator& it ) +{ + m = it.m; elemSize = it.elemSize; ptr = it.ptr; + sliceStart = it.sliceStart; sliceEnd = it.sliceEnd; + return *this; +} + +inline uchar* MatConstIterator::operator *() const { return ptr; } + +inline MatConstIterator& MatConstIterator::operator += (ptrdiff_t ofs) +{ + if( !m || ofs == 0 ) + return *this; + ptrdiff_t ofsb = ofs*elemSize; + ptr += ofsb; + if( ptr < sliceStart || sliceEnd <= ptr ) + { + ptr -= ofsb; + seek(ofs, true); + } + return *this; +} + +inline MatConstIterator& MatConstIterator::operator -= (ptrdiff_t ofs) +{ return (*this += -ofs); } + +inline MatConstIterator& MatConstIterator::operator --() +{ + if( m && (ptr -= elemSize) < sliceStart ) + { + ptr += elemSize; + seek(-1, true); + } + return *this; +} + +inline MatConstIterator MatConstIterator::operator --(int) +{ + MatConstIterator b = *this; + *this += -1; + return b; +} + +inline MatConstIterator& MatConstIterator::operator ++() +{ + if( m && (ptr += elemSize) >= sliceEnd ) + { + ptr -= elemSize; + seek(1, true); + } + return *this; +} + +inline MatConstIterator MatConstIterator::operator ++(int) +{ + MatConstIterator b = *this; + *this += 1; + return b; +} + +template inline MatConstIterator_<_Tp>::MatConstIterator_() {} + +template inline MatConstIterator_<_Tp>::MatConstIterator_(const Mat_<_Tp>* _m) + : MatConstIterator(_m) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const Mat_<_Tp>* _m, int _row, int _col) + : MatConstIterator(_m, _row, _col) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const Mat_<_Tp>* _m, Point _pt) + : MatConstIterator(_m, _pt) {} + +template inline MatConstIterator_<_Tp>:: + MatConstIterator_(const MatConstIterator_& it) + : MatConstIterator(it) {} + +template inline MatConstIterator_<_Tp>& + MatConstIterator_<_Tp>::operator = (const MatConstIterator_& it ) +{ + MatConstIterator::operator = (it); + return *this; +} + +template inline _Tp MatConstIterator_<_Tp>::operator *() const { return *(_Tp*)(this->ptr); } + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator += (ptrdiff_t ofs) +{ + MatConstIterator::operator += (ofs); + return *this; +} + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator -= (ptrdiff_t ofs) +{ return (*this += -ofs); } + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator --() +{ + MatConstIterator::operator --(); + return *this; +} + +template inline MatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator --(int) +{ + MatConstIterator_ b = *this; + MatConstIterator::operator --(); + return b; +} + +template inline MatConstIterator_<_Tp>& MatConstIterator_<_Tp>::operator ++() +{ + MatConstIterator::operator ++(); + return *this; +} + +template inline MatConstIterator_<_Tp> MatConstIterator_<_Tp>::operator ++(int) +{ + MatConstIterator_ b = *this; + MatConstIterator::operator ++(); + return b; +} + +template inline MatIterator_<_Tp>::MatIterator_() : MatConstIterator_<_Tp>() {} + +template inline MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m) + : MatConstIterator_<_Tp>(_m) {} + +template inline MatIterator_<_Tp>::MatIterator_(Mat_<_Tp>* _m, int _row, int _col) + : MatConstIterator_<_Tp>(_m, _row, _col) {} + +template inline MatIterator_<_Tp>::MatIterator_(const Mat_<_Tp>* _m, Point _pt) + : MatConstIterator_<_Tp>(_m, _pt) {} + +template inline MatIterator_<_Tp>::MatIterator_(const Mat_<_Tp>* _m, const int* _idx) + : MatConstIterator_<_Tp>(_m, _idx) {} + +template inline MatIterator_<_Tp>::MatIterator_(const MatIterator_& it) + : MatConstIterator_<_Tp>(it) {} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator = (const MatIterator_<_Tp>& it ) +{ + MatConstIterator::operator = (it); + return *this; +} + +template inline _Tp& MatIterator_<_Tp>::operator *() const { return *(_Tp*)(this->ptr); } + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator += (ptrdiff_t ofs) +{ + MatConstIterator::operator += (ofs); + return *this; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator -= (ptrdiff_t ofs) +{ + MatConstIterator::operator += (-ofs); + return *this; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator --() +{ + MatConstIterator::operator --(); + return *this; +} + +template inline MatIterator_<_Tp> MatIterator_<_Tp>::operator --(int) +{ + MatIterator_ b = *this; + MatConstIterator::operator --(); + return b; +} + +template inline MatIterator_<_Tp>& MatIterator_<_Tp>::operator ++() +{ + MatConstIterator::operator ++(); + return *this; +} + +template inline MatIterator_<_Tp> MatIterator_<_Tp>::operator ++(int) +{ + MatIterator_ b = *this; + MatConstIterator::operator ++(); + return b; +} + +template inline Point MatConstIterator_<_Tp>::pos() const +{ + if( !m ) + return Point(); + CV_DbgAssert( m->dims <= 2 ); + if( m->isContinuous() ) + { + ptrdiff_t ofs = (const _Tp*)ptr - (const _Tp*)m->data; + int y = (int)(ofs / m->cols), x = (int)(ofs - (ptrdiff_t)y*m->cols); + return Point(x, y); + } + else + { + ptrdiff_t ofs = (uchar*)ptr - m->data; + int y = (int)(ofs / m->step), x = (int)((ofs - y*m->step)/sizeof(_Tp)); + return Point(x, y); + } +} + +static inline bool +operator == (const MatConstIterator& a, const MatConstIterator& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatConstIterator& a, const MatConstIterator& b) +{ return !(a == b); } + +template static inline bool +operator == (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatConstIterator_<_Tp>& a, const MatConstIterator_<_Tp>& b) +{ return a.m != b.m || a.ptr != b.ptr; } + +template static inline bool +operator == (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b) +{ return a.m == b.m && a.ptr == b.ptr; } + +template static inline bool +operator != (const MatIterator_<_Tp>& a, const MatIterator_<_Tp>& b) +{ return a.m != b.m || a.ptr != b.ptr; } + +static inline bool +operator < (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr < b.ptr; } + +static inline bool +operator > (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr > b.ptr; } + +static inline bool +operator <= (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr <= b.ptr; } + +static inline bool +operator >= (const MatConstIterator& a, const MatConstIterator& b) +{ return a.ptr >= b.ptr; } + +CV_EXPORTS ptrdiff_t operator - (const MatConstIterator& b, const MatConstIterator& a); + +static inline MatConstIterator operator + (const MatConstIterator& a, ptrdiff_t ofs) +{ MatConstIterator b = a; return b += ofs; } + +static inline MatConstIterator operator + (ptrdiff_t ofs, const MatConstIterator& a) +{ MatConstIterator b = a; return b += ofs; } + +static inline MatConstIterator operator - (const MatConstIterator& a, ptrdiff_t ofs) +{ MatConstIterator b = a; return b += -ofs; } + +template static inline MatConstIterator_<_Tp> +operator + (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; } + +template static inline MatConstIterator_<_Tp> +operator + (ptrdiff_t ofs, const MatConstIterator_<_Tp>& a) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; } + +template static inline MatConstIterator_<_Tp> +operator - (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatConstIterator_<_Tp>&)t; } + +inline uchar* MatConstIterator::operator [](ptrdiff_t i) const +{ return *(*this + i); } + +template inline _Tp MatConstIterator_<_Tp>::operator [](ptrdiff_t i) const +{ return *(_Tp*)MatConstIterator::operator [](i); } + +template static inline MatIterator_<_Tp> +operator + (const MatIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatIterator_<_Tp>&)t; } + +template static inline MatIterator_<_Tp> +operator + (ptrdiff_t ofs, const MatIterator_<_Tp>& a) +{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatIterator_<_Tp>&)t; } + +template static inline MatIterator_<_Tp> +operator - (const MatIterator_<_Tp>& a, ptrdiff_t ofs) +{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatIterator_<_Tp>&)t; } + +template inline _Tp& MatIterator_<_Tp>::operator [](ptrdiff_t i) const +{ return *(*this + i); } + +template inline MatConstIterator_<_Tp> Mat_<_Tp>::begin() const +{ return Mat::begin<_Tp>(); } + +template inline MatConstIterator_<_Tp> Mat_<_Tp>::end() const +{ return Mat::end<_Tp>(); } + +template inline MatIterator_<_Tp> Mat_<_Tp>::begin() +{ return Mat::begin<_Tp>(); } + +template inline MatIterator_<_Tp> Mat_<_Tp>::end() +{ return Mat::end<_Tp>(); } + +template inline MatCommaInitializer_<_Tp>::MatCommaInitializer_(Mat_<_Tp>* _m) : it(_m) {} + +template template inline MatCommaInitializer_<_Tp>& +MatCommaInitializer_<_Tp>::operator , (T2 v) +{ + CV_DbgAssert( this->it < ((const Mat_<_Tp>*)this->it.m)->end() ); + *this->it = _Tp(v); ++this->it; + return *this; +} + +template inline Mat_<_Tp> MatCommaInitializer_<_Tp>::operator *() const +{ + CV_DbgAssert( this->it == ((const Mat_<_Tp>*)this->it.m)->end() ); + return Mat_<_Tp>(*this->it.m); +} + +template inline MatCommaInitializer_<_Tp>::operator Mat_<_Tp>() const +{ + CV_DbgAssert( this->it == ((const Mat_<_Tp>*)this->it.m)->end() ); + return Mat_<_Tp>(*this->it.m); +} + +template static inline MatCommaInitializer_<_Tp> +operator << (const Mat_<_Tp>& m, T2 val) +{ + MatCommaInitializer_<_Tp> commaInitializer((Mat_<_Tp>*)&m); + return (commaInitializer, val); +} + +//////////////////////////////// SparseMat //////////////////////////////// + +inline SparseMat::SparseMat() +: flags(MAGIC_VAL), hdr(0) +{ +} + +inline SparseMat::SparseMat(int _dims, const int* _sizes, int _type) +: flags(MAGIC_VAL), hdr(0) +{ + create(_dims, _sizes, _type); +} + +inline SparseMat::SparseMat(const SparseMat& m) +: flags(m.flags), hdr(m.hdr) +{ + addref(); +} + +inline SparseMat::~SparseMat() +{ + release(); +} + +inline SparseMat& SparseMat::operator = (const SparseMat& m) +{ + if( this != &m ) + { + if( m.hdr ) + CV_XADD(&m.hdr->refcount, 1); + release(); + flags = m.flags; + hdr = m.hdr; + } + return *this; +} + +inline SparseMat& SparseMat::operator = (const Mat& m) +{ return (*this = SparseMat(m)); } + +inline SparseMat SparseMat::clone() const +{ + SparseMat temp; + this->copyTo(temp); + return temp; +} + + +inline void SparseMat::assignTo( SparseMat& m, int type ) const +{ + if( type < 0 ) + m = *this; + else + convertTo(m, type); +} + +inline void SparseMat::addref() +{ if( hdr ) CV_XADD(&hdr->refcount, 1); } + +inline void SparseMat::release() +{ + if( hdr && CV_XADD(&hdr->refcount, -1) == 1 ) + delete hdr; + hdr = 0; +} + +inline size_t SparseMat::elemSize() const +{ return CV_ELEM_SIZE(flags); } + +inline size_t SparseMat::elemSize1() const +{ return CV_ELEM_SIZE1(flags); } + +inline int SparseMat::type() const +{ return CV_MAT_TYPE(flags); } + +inline int SparseMat::depth() const +{ return CV_MAT_DEPTH(flags); } + +inline int SparseMat::channels() const +{ return CV_MAT_CN(flags); } + +inline const int* SparseMat::size() const +{ + return hdr ? hdr->size : 0; +} + +inline int SparseMat::size(int i) const +{ + if( hdr ) + { + CV_DbgAssert((unsigned)i < (unsigned)hdr->dims); + return hdr->size[i]; + } + return 0; +} + +inline int SparseMat::dims() const +{ + return hdr ? hdr->dims : 0; +} + +inline size_t SparseMat::nzcount() const +{ + return hdr ? hdr->nodeCount : 0; +} + +inline size_t SparseMat::hash(int i0) const +{ + return (size_t)i0; +} + +inline size_t SparseMat::hash(int i0, int i1) const +{ + return (size_t)(unsigned)i0*HASH_SCALE + (unsigned)i1; +} + +inline size_t SparseMat::hash(int i0, int i1, int i2) const +{ + return ((size_t)(unsigned)i0*HASH_SCALE + (unsigned)i1)*HASH_SCALE + (unsigned)i2; +} + +inline size_t SparseMat::hash(const int* idx) const +{ + size_t h = (unsigned)idx[0]; + if( !hdr ) + return 0; + int i, d = hdr->dims; + for( i = 1; i < d; i++ ) + h = h*HASH_SCALE + (unsigned)idx[i]; + return h; +} + +template inline _Tp& SparseMat::ref(int i0, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, true, hashval); } + +template inline _Tp& SparseMat::ref(int i0, int i1, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, true, hashval); } + +template inline _Tp& SparseMat::ref(int i0, int i1, int i2, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(i0, i1, i2, true, hashval); } + +template inline _Tp& SparseMat::ref(const int* idx, size_t* hashval) +{ return *(_Tp*)((SparseMat*)this)->ptr(idx, true, hashval); } + +template inline _Tp SparseMat::value(int i0, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(int i0, int i1, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(int i0, int i1, int i2, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval); + return p ? *p : _Tp(); +} + +template inline _Tp SparseMat::value(const int* idx, size_t* hashval) const +{ + const _Tp* p = (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval); + return p ? *p : _Tp(); +} + +template inline const _Tp* SparseMat::find(int i0, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, false, hashval); } + +template inline const _Tp* SparseMat::find(int i0, int i1, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, false, hashval); } + +template inline const _Tp* SparseMat::find(int i0, int i1, int i2, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(i0, i1, i2, false, hashval); } + +template inline const _Tp* SparseMat::find(const int* idx, size_t* hashval) const +{ return (const _Tp*)((SparseMat*)this)->ptr(idx, false, hashval); } + +template inline _Tp& SparseMat::value(Node* n) +{ return *(_Tp*)((uchar*)n + hdr->valueOffset); } + +template inline const _Tp& SparseMat::value(const Node* n) const +{ return *(const _Tp*)((const uchar*)n + hdr->valueOffset); } + +inline SparseMat::Node* SparseMat::node(size_t nidx) +{ return (Node*)&hdr->pool[nidx]; } + +inline const SparseMat::Node* SparseMat::node(size_t nidx) const +{ return (const Node*)&hdr->pool[nidx]; } + +inline SparseMatIterator SparseMat::begin() +{ return SparseMatIterator(this); } + +inline SparseMatConstIterator SparseMat::begin() const +{ return SparseMatConstIterator(this); } + +inline SparseMatIterator SparseMat::end() +{ SparseMatIterator it(this); it.seekEnd(); return it; } + +inline SparseMatConstIterator SparseMat::end() const +{ SparseMatConstIterator it(this); it.seekEnd(); return it; } + +template inline SparseMatIterator_<_Tp> SparseMat::begin() +{ return SparseMatIterator_<_Tp>(this); } + +template inline SparseMatConstIterator_<_Tp> SparseMat::begin() const +{ return SparseMatConstIterator_<_Tp>(this); } + +template inline SparseMatIterator_<_Tp> SparseMat::end() +{ SparseMatIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline SparseMatConstIterator_<_Tp> SparseMat::end() const +{ SparseMatConstIterator_<_Tp> it(this); it.seekEnd(); return it; } + + +inline SparseMatConstIterator::SparseMatConstIterator() +: m(0), hashidx(0), ptr(0) +{ +} + +inline SparseMatConstIterator::SparseMatConstIterator(const SparseMatConstIterator& it) +: m(it.m), hashidx(it.hashidx), ptr(it.ptr) +{ +} + +static inline bool operator == (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2) +{ return it1.m == it2.m && it1.hashidx == it2.hashidx && it1.ptr == it2.ptr; } + +static inline bool operator != (const SparseMatConstIterator& it1, const SparseMatConstIterator& it2) +{ return !(it1 == it2); } + + +inline SparseMatConstIterator& SparseMatConstIterator::operator = (const SparseMatConstIterator& it) +{ + if( this != &it ) + { + m = it.m; + hashidx = it.hashidx; + ptr = it.ptr; + } + return *this; +} + +template inline const _Tp& SparseMatConstIterator::value() const +{ return *(_Tp*)ptr; } + +inline const SparseMat::Node* SparseMatConstIterator::node() const +{ + return ptr && m && m->hdr ? + (const SparseMat::Node*)(ptr - m->hdr->valueOffset) : 0; +} + +inline SparseMatConstIterator SparseMatConstIterator::operator ++(int) +{ + SparseMatConstIterator it = *this; + ++*this; + return it; +} + + +inline void SparseMatConstIterator::seekEnd() +{ + if( m && m->hdr ) + { + hashidx = m->hdr->hashtab.size(); + ptr = 0; + } +} + +inline SparseMatIterator::SparseMatIterator() +{} + +inline SparseMatIterator::SparseMatIterator(SparseMat* _m) +: SparseMatConstIterator(_m) +{} + +inline SparseMatIterator::SparseMatIterator(const SparseMatIterator& it) +: SparseMatConstIterator(it) +{ +} + +inline SparseMatIterator& SparseMatIterator::operator = (const SparseMatIterator& it) +{ + (SparseMatConstIterator&)*this = it; + return *this; +} + +template inline _Tp& SparseMatIterator::value() const +{ return *(_Tp*)ptr; } + +inline SparseMat::Node* SparseMatIterator::node() const +{ + return (SparseMat::Node*)SparseMatConstIterator::node(); +} + +inline SparseMatIterator& SparseMatIterator::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +inline SparseMatIterator SparseMatIterator::operator ++(int) +{ + SparseMatIterator it = *this; + ++*this; + return it; +} + + +template inline SparseMat_<_Tp>::SparseMat_() +{ flags = MAGIC_VAL | DataType<_Tp>::type; } + +template inline SparseMat_<_Tp>::SparseMat_(int _dims, const int* _sizes) +: SparseMat(_dims, _sizes, DataType<_Tp>::type) +{} + +template inline SparseMat_<_Tp>::SparseMat_(const SparseMat& m) +{ + if( m.type() == DataType<_Tp>::type ) + *this = (const SparseMat_<_Tp>&)m; + else + m.convertTo(this, DataType<_Tp>::type); +} + +template inline SparseMat_<_Tp>::SparseMat_(const SparseMat_<_Tp>& m) +{ + this->flags = m.flags; + this->hdr = m.hdr; + if( this->hdr ) + CV_XADD(&this->hdr->refcount, 1); +} + +template inline SparseMat_<_Tp>::SparseMat_(const Mat& m) +{ + SparseMat sm(m); + *this = sm; +} + +template inline SparseMat_<_Tp>::SparseMat_(const CvSparseMat* m) +{ + SparseMat sm(m); + *this = sm; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const SparseMat_<_Tp>& m) +{ + if( this != &m ) + { + if( m.hdr ) CV_XADD(&m.hdr->refcount, 1); + release(); + flags = m.flags; + hdr = m.hdr; + } + return *this; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const SparseMat& m) +{ + if( m.type() == DataType<_Tp>::type ) + return (*this = (const SparseMat_<_Tp>&)m); + m.convertTo(*this, DataType<_Tp>::type); + return *this; +} + +template inline SparseMat_<_Tp>& +SparseMat_<_Tp>::operator = (const Mat& m) +{ return (*this = SparseMat(m)); } + +template inline SparseMat_<_Tp> +SparseMat_<_Tp>::clone() const +{ + SparseMat_<_Tp> m; + this->copyTo(m); + return m; +} + +template inline void +SparseMat_<_Tp>::create(int _dims, const int* _sizes) +{ + SparseMat::create(_dims, _sizes, DataType<_Tp>::type); +} + +template inline +SparseMat_<_Tp>::operator CvSparseMat*() const +{ + return SparseMat::operator CvSparseMat*(); +} + +template inline int SparseMat_<_Tp>::type() const +{ return DataType<_Tp>::type; } + +template inline int SparseMat_<_Tp>::depth() const +{ return DataType<_Tp>::depth; } + +template inline int SparseMat_<_Tp>::channels() const +{ return DataType<_Tp>::channels; } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, int i1, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, i1, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, int i1, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, i1, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(int i0, int i1, int i2, size_t* hashval) +{ return SparseMat::ref<_Tp>(i0, i1, i2, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(int i0, int i1, int i2, size_t* hashval) const +{ return SparseMat::value<_Tp>(i0, i1, i2, hashval); } + +template inline _Tp& +SparseMat_<_Tp>::ref(const int* idx, size_t* hashval) +{ return SparseMat::ref<_Tp>(idx, hashval); } + +template inline _Tp +SparseMat_<_Tp>::operator()(const int* idx, size_t* hashval) const +{ return SparseMat::value<_Tp>(idx, hashval); } + +template inline SparseMatIterator_<_Tp> SparseMat_<_Tp>::begin() +{ return SparseMatIterator_<_Tp>(this); } + +template inline SparseMatConstIterator_<_Tp> SparseMat_<_Tp>::begin() const +{ return SparseMatConstIterator_<_Tp>(this); } + +template inline SparseMatIterator_<_Tp> SparseMat_<_Tp>::end() +{ SparseMatIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline SparseMatConstIterator_<_Tp> SparseMat_<_Tp>::end() const +{ SparseMatConstIterator_<_Tp> it(this); it.seekEnd(); return it; } + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_() +{} + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMat_<_Tp>* _m) +: SparseMatConstIterator(_m) +{} + +template inline +SparseMatConstIterator_<_Tp>::SparseMatConstIterator_(const SparseMatConstIterator_<_Tp>& it) +: SparseMatConstIterator(it) +{} + +template inline SparseMatConstIterator_<_Tp>& +SparseMatConstIterator_<_Tp>::operator = (const SparseMatConstIterator_<_Tp>& it) +{ return ((SparseMatConstIterator&)*this = it); } + +template inline const _Tp& +SparseMatConstIterator_<_Tp>::operator *() const +{ return *(const _Tp*)this->ptr; } + +template inline SparseMatConstIterator_<_Tp>& +SparseMatConstIterator_<_Tp>::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +template inline SparseMatConstIterator_<_Tp> +SparseMatConstIterator_<_Tp>::operator ++(int) +{ + SparseMatConstIterator it = *this; + SparseMatConstIterator::operator ++(); + return it; +} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_() +{} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_(SparseMat_<_Tp>* _m) +: SparseMatConstIterator_<_Tp>(_m) +{} + +template inline +SparseMatIterator_<_Tp>::SparseMatIterator_(const SparseMatIterator_<_Tp>& it) +: SparseMatConstIterator_<_Tp>(it) +{} + +template inline SparseMatIterator_<_Tp>& +SparseMatIterator_<_Tp>::operator = (const SparseMatIterator_<_Tp>& it) +{ return ((SparseMatIterator&)*this = it); } + +template inline _Tp& +SparseMatIterator_<_Tp>::operator *() const +{ return *(_Tp*)this->ptr; } + +template inline SparseMatIterator_<_Tp>& +SparseMatIterator_<_Tp>::operator ++() +{ + SparseMatConstIterator::operator ++(); + return *this; +} + +template inline SparseMatIterator_<_Tp> +SparseMatIterator_<_Tp>::operator ++(int) +{ + SparseMatIterator it = *this; + SparseMatConstIterator::operator ++(); + return it; +} + +} + +#endif +#endif diff --git a/opencv2/core/operations.hpp b/opencv2/core/operations.hpp new file mode 100644 index 0000000..ee37b70 --- /dev/null +++ b/opencv2/core/operations.hpp @@ -0,0 +1,3628 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_OPERATIONS_HPP__ +#define __OPENCV_CORE_OPERATIONS_HPP__ + +#ifndef SKIP_INCLUDES + #include + #include +#endif // SKIP_INCLUDES + +#ifdef __cplusplus + +/////// exchange-add operation for atomic operations on reference counters /////// +#ifdef __GNUC__ + + #if __GNUC__*10 + __GNUC_MINOR__ >= 42 + + #if !defined WIN32 && (defined __i486__ || defined __i586__ || \ + defined __i686__ || defined __MMX__ || defined __SSE__ || defined __ppc__) + #define CV_XADD __sync_fetch_and_add + #else + #include + #define CV_XADD __gnu_cxx::__exchange_and_add + #endif + + #else + #include + #if __GNUC__*10 + __GNUC_MINOR__ >= 34 + #define CV_XADD __gnu_cxx::__exchange_and_add + #else + #define CV_XADD __exchange_and_add + #endif + #endif + +#elif defined WIN32 || defined _WIN32 + #include + #define CV_XADD(addr,delta) _InterlockedExchangeAdd((long volatile*)(addr), (delta)) +#else + + template static inline _Tp CV_XADD(_Tp* addr, _Tp delta) + { int tmp = *addr; *addr += delta; return tmp; } + +#endif + +#include + +namespace cv +{ + +using std::cos; +using std::sin; +using std::max; +using std::min; +using std::exp; +using std::log; +using std::pow; +using std::sqrt; + + +/////////////// saturate_cast (used in image & signal processing) /////////////////// + +template static inline _Tp saturate_cast(uchar v) { return _Tp(v); } +template static inline _Tp saturate_cast(schar v) { return _Tp(v); } +template static inline _Tp saturate_cast(ushort v) { return _Tp(v); } +template static inline _Tp saturate_cast(short v) { return _Tp(v); } +template static inline _Tp saturate_cast(unsigned v) { return _Tp(v); } +template static inline _Tp saturate_cast(int v) { return _Tp(v); } +template static inline _Tp saturate_cast(float v) { return _Tp(v); } +template static inline _Tp saturate_cast(double v) { return _Tp(v); } + +template<> inline uchar saturate_cast(schar v) +{ return (uchar)std::max((int)v, 0); } +template<> inline uchar saturate_cast(ushort v) +{ return (uchar)std::min((unsigned)v, (unsigned)UCHAR_MAX); } +template<> inline uchar saturate_cast(int v) +{ return (uchar)((unsigned)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); } +template<> inline uchar saturate_cast(short v) +{ return saturate_cast((int)v); } +template<> inline uchar saturate_cast(unsigned v) +{ return (uchar)std::min(v, (unsigned)UCHAR_MAX); } +template<> inline uchar saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline uchar saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline schar saturate_cast(uchar v) +{ return (schar)std::min((int)v, SCHAR_MAX); } +template<> inline schar saturate_cast(ushort v) +{ return (schar)std::min((unsigned)v, (unsigned)SCHAR_MAX); } +template<> inline schar saturate_cast(int v) +{ + return (schar)((unsigned)(v-SCHAR_MIN) <= (unsigned)UCHAR_MAX ? + v : v > 0 ? SCHAR_MAX : SCHAR_MIN); +} +template<> inline schar saturate_cast(short v) +{ return saturate_cast((int)v); } +template<> inline schar saturate_cast(unsigned v) +{ return (schar)std::min(v, (unsigned)SCHAR_MAX); } + +template<> inline schar saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline schar saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline ushort saturate_cast(schar v) +{ return (ushort)std::max((int)v, 0); } +template<> inline ushort saturate_cast(short v) +{ return (ushort)std::max((int)v, 0); } +template<> inline ushort saturate_cast(int v) +{ return (ushort)((unsigned)v <= (unsigned)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); } +template<> inline ushort saturate_cast(unsigned v) +{ return (ushort)std::min(v, (unsigned)USHRT_MAX); } +template<> inline ushort saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline ushort saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline short saturate_cast(ushort v) +{ return (short)std::min((int)v, SHRT_MAX); } +template<> inline short saturate_cast(int v) +{ + return (short)((unsigned)(v - SHRT_MIN) <= (unsigned)USHRT_MAX ? + v : v > 0 ? SHRT_MAX : SHRT_MIN); +} +template<> inline short saturate_cast(unsigned v) +{ return (short)std::min(v, (unsigned)SHRT_MAX); } +template<> inline short saturate_cast(float v) +{ int iv = cvRound(v); return saturate_cast(iv); } +template<> inline short saturate_cast(double v) +{ int iv = cvRound(v); return saturate_cast(iv); } + +template<> inline int saturate_cast(float v) { return cvRound(v); } +template<> inline int saturate_cast(double v) { return cvRound(v); } + +// we intentionally do not clip negative numbers, to make -1 become 0xffffffff etc. +template<> inline unsigned saturate_cast(float v){ return cvRound(v); } +template<> inline unsigned saturate_cast(double v) { return cvRound(v); } + + +//////////////////////////////// Matx ///////////////////////////////// + + +template inline Matx<_Tp, m, n>::Matx() +{ + for(int i = 0; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0) +{ + val[0] = v0; + for(int i = 1; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1) +{ + assert(channels >= 2); + val[0] = v0; val[1] = v1; + for(int i = 2; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2) +{ + assert(channels >= 3); + val[0] = v0; val[1] = v1; val[2] = v2; + for(int i = 3; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3) +{ + assert(channels >= 4); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + for(int i = 4; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4) +{ + assert(channels >= 5); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; val[4] = v4; + for(int i = 5; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5) +{ + assert(channels >= 6); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; + for(int i = 6; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6) +{ + assert(channels >= 7); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; + for(int i = 7; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7) +{ + assert(channels >= 8); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + for(int i = 8; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8) +{ + assert(channels >= 9); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; + for(int i = 9; i < channels; i++) val[i] = _Tp(0); +} + +template inline Matx<_Tp, m, n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9) +{ + assert(channels >= 10); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; + for(int i = 10; i < channels; i++) val[i] = _Tp(0); +} + + +template +inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11) +{ + assert(channels == 12); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11; +} + +template +inline Matx<_Tp,m,n>::Matx(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9, _Tp v10, _Tp v11, + _Tp v12, _Tp v13, _Tp v14, _Tp v15) +{ + assert(channels == 16); + val[0] = v0; val[1] = v1; val[2] = v2; val[3] = v3; + val[4] = v4; val[5] = v5; val[6] = v6; val[7] = v7; + val[8] = v8; val[9] = v9; val[10] = v10; val[11] = v11; + val[12] = v12; val[13] = v13; val[14] = v14; val[15] = v15; +} + +template inline Matx<_Tp, m, n>::Matx(const _Tp* values) +{ + for( int i = 0; i < channels; i++ ) val[i] = values[i]; +} + +template inline Matx<_Tp, m, n> Matx<_Tp, m, n>::all(_Tp alpha) +{ + Matx<_Tp, m, n> M; + for( int i = 0; i < m*n; i++ ) M.val[i] = alpha; + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::zeros() +{ + return all(0); +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::ones() +{ + return all(1); +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::eye() +{ + Matx<_Tp,m,n> M; + for(int i = 0; i < MIN(m,n); i++) + M(i,i) = 1; + return M; +} + +template inline _Tp Matx<_Tp, m, n>::dot(const Matx<_Tp, m, n>& M) const +{ + _Tp s = 0; + for( int i = 0; i < m*n; i++ ) s += val[i]*M.val[i]; + return s; +} + + +template inline double Matx<_Tp, m, n>::ddot(const Matx<_Tp, m, n>& M) const +{ + double s = 0; + for( int i = 0; i < m*n; i++ ) s += (double)val[i]*M.val[i]; + return s; +} + + + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::diag(const Matx<_Tp,MIN(m,n),1>& d) +{ + Matx<_Tp,m,n> M; + for(int i = 0; i < MIN(m,n); i++) + M(i,i) = d(i, 0); + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::randu(_Tp a, _Tp b) +{ + Matx<_Tp,m,n> M; + Mat matM(M, false); + cv::randu(matM, Scalar(a), Scalar(b)); + return M; +} + +template inline +Matx<_Tp,m,n> Matx<_Tp,m,n>::randn(_Tp a, _Tp b) +{ + Matx<_Tp,m,n> M; + Mat matM(M, false); + cv::randn(matM, Scalar(a), Scalar(b)); + return M; +} + +template template +inline Matx<_Tp, m, n>::operator Matx() const +{ + Matx M; + for( int i = 0; i < m*n; i++ ) M.val[i] = saturate_cast(val[i]); + return M; +} + + +template template inline +Matx<_Tp, m1, n1> Matx<_Tp, m, n>::reshape() const +{ + CV_DbgAssert(m1*n1 == m*n); + return (const Matx<_Tp, m1, n1>&)*this; +} + + +template +template inline +Matx<_Tp, m1, n1> Matx<_Tp, m, n>::get_minor(int i, int j) const +{ + CV_DbgAssert(0 <= i && i+m1 <= m && 0 <= j && j+n1 <= n); + Matx<_Tp, m1, n1> s; + for( int di = 0; di < m1; di++ ) + for( int dj = 0; dj < n1; dj++ ) + s(di, dj) = (*this)(i+di, j+dj); + return s; +} + + +template inline +Matx<_Tp, 1, n> Matx<_Tp, m, n>::row(int i) const +{ + CV_DbgAssert((unsigned)i < (unsigned)m); + return Matx<_Tp, 1, n>(&val[i*n]); +} + + +template inline +Matx<_Tp, m, 1> Matx<_Tp, m, n>::col(int j) const +{ + CV_DbgAssert((unsigned)j < (unsigned)n); + Matx<_Tp, m, 1> v; + for( int i = 0; i < m; i++ ) + v[i] = val[i*n + j]; + return v; +} + + +template inline +Matx<_Tp, MIN(m,n), 1> Matx<_Tp, m, n>::diag() const +{ + diag_type d; + for( int i = 0; i < MIN(m, n); i++ ) + d.val[i] = val[i*n + i]; + return d; +} + + +template inline +const _Tp& Matx<_Tp, m, n>::operator ()(int i, int j) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n ); + return this->val[i*n + j]; +} + + +template inline +_Tp& Matx<_Tp, m, n>::operator ()(int i, int j) +{ + CV_DbgAssert( (unsigned)i < (unsigned)m && (unsigned)j < (unsigned)n ); + return val[i*n + j]; +} + + +template inline +const _Tp& Matx<_Tp, m, n>::operator ()(int i) const +{ + CV_DbgAssert( (m == 1 || n == 1) && (unsigned)i < (unsigned)(m+n-1) ); + return val[i]; +} + + +template inline +_Tp& Matx<_Tp, m, n>::operator ()(int i) +{ + CV_DbgAssert( (m == 1 || n == 1) && (unsigned)i < (unsigned)(m+n-1) ); + return val[i]; +} + + +template static inline +Matx<_Tp1, m, n>& operator += (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]); + return a; +} + + +template static inline +Matx<_Tp1, m, n>& operator -= (Matx<_Tp1, m, n>& a, const Matx<_Tp2, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]); + return a; +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_AddOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] + b.val[i]); +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_SubOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] - b.val[i]); +} + + +template template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, _T2 alpha, Matx_ScaleOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] * alpha); +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b, Matx_MulOp) +{ + for( int i = 0; i < m*n; i++ ) + val[i] = saturate_cast<_Tp>(a.val[i] * b.val[i]); +} + + +template template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b, Matx_MatMulOp) +{ + for( int i = 0; i < m; i++ ) + for( int j = 0; j < n; j++ ) + { + _Tp s = 0; + for( int k = 0; k < l; k++ ) + s += a(i, k) * b(k, j); + val[i*n + j] = s; + } +} + + +template inline +Matx<_Tp,m,n>::Matx(const Matx<_Tp, n, m>& a, Matx_TOp) +{ + for( int i = 0; i < m; i++ ) + for( int j = 0; j < n; j++ ) + val[i*n + j] = a(j, i); +} + + +template static inline +Matx<_Tp, m, n> operator + (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_AddOp()); +} + + +template static inline +Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_SubOp()); +} + + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, int alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, float alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n>& operator *= (Matx<_Tp, m, n>& a, double alpha) +{ + for( int i = 0; i < m*n; i++ ) + a.val[i] = saturate_cast<_Tp>(a.val[i] * alpha); + return a; +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, int alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, float alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, n>& a, double alpha) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (int alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (float alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator * (double alpha, const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, alpha, Matx_ScaleOp()); +} + +template static inline +Matx<_Tp, m, n> operator - (const Matx<_Tp, m, n>& a) +{ + return Matx<_Tp, m, n>(a, -1, Matx_ScaleOp()); +} + + +template static inline +Matx<_Tp, m, n> operator * (const Matx<_Tp, m, l>& a, const Matx<_Tp, l, n>& b) +{ + return Matx<_Tp, m, n>(a, b, Matx_MatMulOp()); +} + + +template static inline +Point_<_Tp> operator * (const Matx<_Tp, 2, 2>& a, const Point_<_Tp>& b) +{ + return Point_<_Tp>(a*Vec<_Tp,2>(b.x,b.y)); +} + + +template static inline +Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point3_<_Tp>& b) +{ + return Point3_<_Tp>(a*Vec<_Tp,3>(b.x,b.y,b.z)); +} + + +template static inline +Point3_<_Tp> operator * (const Matx<_Tp, 3, 3>& a, const Point_<_Tp>& b) +{ + return Point3_<_Tp>(a*Vec<_Tp,3>(b.x, b.y, 1)); +} + + +template static inline +Matx<_Tp, 4, 1> operator * (const Matx<_Tp, 4, 4>& a, const Point3_<_Tp>& b) +{ + return a*Matx<_Tp, 4, 1>(b.x, b.y, b.z, 1); +} + + +template static inline +Scalar operator * (const Matx<_Tp, 4, 4>& a, const Scalar& b) +{ + return Scalar(a*Matx<_Tp, 4, 1>(b[0],b[1],b[2],b[3])); +} + + +template inline +Matx<_Tp, m, n> Matx<_Tp, m, n>::mul(const Matx<_Tp, m, n>& a) const +{ + return Matx<_Tp, m, n>(*this, a, Matx_MulOp()); +} + + +CV_EXPORTS int LU(float* A, size_t astep, int m, float* b, size_t bstep, int n); +CV_EXPORTS int LU(double* A, size_t astep, int m, double* b, size_t bstep, int n); +CV_EXPORTS bool Cholesky(float* A, size_t astep, int m, float* b, size_t bstep, int n); +CV_EXPORTS bool Cholesky(double* A, size_t astep, int m, double* b, size_t bstep, int n); + + +template struct CV_EXPORTS Matx_DetOp +{ + double operator ()(const Matx<_Tp, m, m>& a) const + { + Matx<_Tp, m, m> temp = a; + double p = LU(temp.val, m, m, 0, 0, 0); + if( p == 0 ) + return p; + for( int i = 0; i < m; i++ ) + p *= temp(i, i); + return p; + } +}; + + +template struct CV_EXPORTS Matx_DetOp<_Tp, 1> +{ + double operator ()(const Matx<_Tp, 1, 1>& a) const + { + return a(0,0); + } +}; + + +template struct CV_EXPORTS Matx_DetOp<_Tp, 2> +{ + double operator ()(const Matx<_Tp, 2, 2>& a) const + { + return a(0,0)*a(1,1) - a(0,1)*a(1,0); + } +}; + + +template struct CV_EXPORTS Matx_DetOp<_Tp, 3> +{ + double operator ()(const Matx<_Tp, 3, 3>& a) const + { + return a(0,0)*(a(1,1)*a(2,2) - a(2,1)*a(1,2)) - + a(0,1)*(a(1,0)*a(2,2) - a(2,0)*a(1,2)) + + a(0,2)*(a(1,0)*a(2,1) - a(2,0)*a(1,1)); + } +}; + +template static inline +double determinant(const Matx<_Tp, m, m>& a) +{ + return Matx_DetOp<_Tp, m>()(a); +} + + +template static inline +double trace(const Matx<_Tp, m, n>& a) +{ + _Tp s = 0; + for( int i = 0; i < std::min(m, n); i++ ) + s += a(i,i); + return s; +} + + +template inline +Matx<_Tp, n, m> Matx<_Tp, m, n>::t() const +{ + return Matx<_Tp, n, m>(*this, Matx_TOp()); +} + + +template struct CV_EXPORTS Matx_FastInvOp +{ + bool operator()(const Matx<_Tp, m, m>& a, Matx<_Tp, m, m>& b, int method) const + { + Matx<_Tp, m, m> temp = a; + + // assume that b is all 0's on input => make it a unity matrix + for( int i = 0; i < m; i++ ) + b(i, i) = (_Tp)1; + + if( method == DECOMP_CHOLESKY ) + return Cholesky(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m); + + return LU(temp.val, m*sizeof(_Tp), m, b.val, m*sizeof(_Tp), m) != 0; + } +}; + + +template struct CV_EXPORTS Matx_FastInvOp<_Tp, 2> +{ + bool operator()(const Matx<_Tp, 2, 2>& a, Matx<_Tp, 2, 2>& b, int) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + b(1,1) = a(0,0)*d; + b(0,0) = a(1,1)*d; + b(0,1) = -a(0,1)*d; + b(1,0) = -a(1,0)*d; + return true; + } +}; + + +template struct CV_EXPORTS Matx_FastInvOp<_Tp, 3> +{ + bool operator()(const Matx<_Tp, 3, 3>& a, Matx<_Tp, 3, 3>& b, int) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + b(0,0) = (a(1,1) * a(2,2) - a(1,2) * a(2,1)) * d; + b(0,1) = (a(0,2) * a(2,1) - a(0,1) * a(2,2)) * d; + b(0,2) = (a(0,1) * a(1,2) - a(0,2) * a(1,1)) * d; + + b(1,0) = (a(1,2) * a(2,0) - a(1,0) * a(2,2)) * d; + b(1,1) = (a(0,0) * a(2,2) - a(0,2) * a(2,0)) * d; + b(1,2) = (a(0,2) * a(1,0) - a(0,0) * a(1,2)) * d; + + b(2,0) = (a(1,0) * a(2,1) - a(1,1) * a(2,0)) * d; + b(2,1) = (a(0,1) * a(2,0) - a(0,0) * a(2,1)) * d; + b(2,2) = (a(0,0) * a(1,1) - a(0,1) * a(1,0)) * d; + return true; + } +}; + + +template inline +Matx<_Tp, n, m> Matx<_Tp, m, n>::inv(int method) const +{ + Matx<_Tp, n, m> b; + bool ok; + if( method == DECOMP_LU || method == DECOMP_CHOLESKY ) + ok = Matx_FastInvOp<_Tp, m>()(*this, b, method); + else + { + Mat A(*this, false), B(b, false); + ok = invert(A, B, method); + } + return ok ? b : Matx<_Tp, n, m>::zeros(); +} + + +template struct CV_EXPORTS Matx_FastSolveOp +{ + bool operator()(const Matx<_Tp, m, m>& a, const Matx<_Tp, m, n>& b, + Matx<_Tp, m, n>& x, int method) const + { + Matx<_Tp, m, m> temp = a; + x = b; + if( method == DECOMP_CHOLESKY ) + return Cholesky(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n); + + return LU(temp.val, m*sizeof(_Tp), m, x.val, n*sizeof(_Tp), n) != 0; + } +}; + + +template struct CV_EXPORTS Matx_FastSolveOp<_Tp, 2, 1> +{ + bool operator()(const Matx<_Tp, 2, 2>& a, const Matx<_Tp, 2, 1>& b, + Matx<_Tp, 2, 1>& x, int method) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + x(0) = (b(0)*a(1,1) - b(1)*a(0,1))*d; + x(1) = (b(1)*a(0,0) - b(0)*a(1,0))*d; + return true; + } +}; + + +template struct CV_EXPORTS Matx_FastSolveOp<_Tp, 3, 1> +{ + bool operator()(const Matx<_Tp, 3, 3>& a, const Matx<_Tp, 3, 1>& b, + Matx<_Tp, 3, 1>& x, int method) const + { + _Tp d = determinant(a); + if( d == 0 ) + return false; + d = 1/d; + x(0) = d*(b(0)*(a(1,1)*a(2,2) - a(1,2)*a(2,1)) - + a(0,1)*(b(1)*a(2,2) - a(1,2)*b(2)) + + a(0,2)*(b(1)*a(2,1) - a(1,1)*b(2))); + + x(1) = d*(a(0,0)*(b(1)*a(2,2) - a(1,2)*b(2)) - + b(0)*(a(1,0)*a(2,2) - a(1,2)*a(2,0)) + + a(0,2)*(a(1,0)*b(2) - b(1)*a(2,0))); + + x(2) = d*(a(0,0)*(a(1,1)*b(2) - b(1)*a(2,1)) - + a(0,1)*(a(1,0)*b(2) - b(1)*a(2,0)) + + b(0)*(a(1,0)*a(2,1) - a(1,1)*a(2,0))); + return true; + } +}; + + +template template inline +Matx<_Tp, n, l> Matx<_Tp, m, n>::solve(const Matx<_Tp, m, l>& rhs, int method) const +{ + Matx<_Tp, n, l> x; + bool ok; + if( method == DECOMP_LU || method == DECOMP_CHOLESKY ) + ok = Matx_FastSolveOp<_Tp, m, l>()(*this, rhs, x, method); + else + { + Mat A(*this, false), B(rhs, false), X(x, false); + ok = cv::solve(A, B, X, method); + } + + return ok ? x : Matx<_Tp, n, l>::zeros(); +} + + +template static inline +double norm(const Matx<_Tp, m, n>& M) +{ + double s = 0; + for( int i = 0; i < m*n; i++ ) + s += (double)M.val[i]*M.val[i]; + return std::sqrt(s); +} + + +template static inline +double norm(const Matx<_Tp, m, n>& M, int normType) +{ + if( normType == NORM_INF ) + { + _Tp s = 0; + for( int i = 0; i < m*n; i++ ) + s = std::max(s, std::abs(M.val[i])); + return s; + } + + if( normType == NORM_L1 ) + { + _Tp s = 0; + for( int i = 0; i < m*n; i++ ) + s += std::abs(M.val[i]); + return s; + } + + CV_DbgAssert( normType == NORM_L2 ); + return norm(M); +} + + +template static inline +bool operator == (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + for( int i = 0; i < m*n; i++ ) + if( a.val[i] != b.val[i] ) return false; + return true; +} + +template static inline +bool operator != (const Matx<_Tp, m, n>& a, const Matx<_Tp, m, n>& b) +{ + return !(a == b); +} + + +template static inline +MatxCommaInitializer<_Tp, m, n> operator << (const Matx<_Tp, m, n>& mtx, _T2 val) +{ + MatxCommaInitializer<_Tp, m, n> commaInitializer((Matx<_Tp, m, n>*)&mtx); + return (commaInitializer, val); +} + +template inline +MatxCommaInitializer<_Tp, m, n>::MatxCommaInitializer(Matx<_Tp, m, n>* _mtx) + : dst(_mtx), idx(0) +{} + +template template inline +MatxCommaInitializer<_Tp, m, n>& MatxCommaInitializer<_Tp, m, n>::operator , (_T2 value) +{ + CV_DbgAssert( idx < m*n ); + dst->val[idx++] = saturate_cast<_Tp>(value); + return *this; +} + +template inline +Matx<_Tp, m, n> MatxCommaInitializer<_Tp, m, n>::operator *() const +{ + CV_DbgAssert( idx == n*m ); + return *dst; +} + +/////////////////////////// short vector (Vec) ///////////////////////////// + +template inline Vec<_Tp, cn>::Vec() +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0) + : Matx<_Tp, cn, 1>(v0) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1) + : Matx<_Tp, cn, 1>(v0, v1) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2) + : Matx<_Tp, cn, 1>(v0, v1, v2) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, _Tp v4, _Tp v5) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8) +{} + +template inline Vec<_Tp, cn>::Vec(_Tp v0, _Tp v1, _Tp v2, _Tp v3, + _Tp v4, _Tp v5, _Tp v6, _Tp v7, + _Tp v8, _Tp v9) + : Matx<_Tp, cn, 1>(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9) +{} + +template inline Vec<_Tp, cn>::Vec(const _Tp* values) + : Matx<_Tp, cn, 1>(values) +{} + + +template inline Vec<_Tp, cn>::Vec(const Vec<_Tp, cn>& m) + : Matx<_Tp, cn, 1>(m.val) +{} + +template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_AddOp op) +: Matx<_Tp, cn, 1>(a, b, op) +{} + +template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, const Matx<_Tp, cn, 1>& b, Matx_SubOp op) +: Matx<_Tp, cn, 1>(a, b, op) +{} + +template template inline +Vec<_Tp, cn>::Vec(const Matx<_Tp, cn, 1>& a, _T2 alpha, Matx_ScaleOp op) +: Matx<_Tp, cn, 1>(a, alpha, op) +{} + +template inline +Vec<_Tp, cn>& Vec<_Tp, cn>::operator = (const Matx<_Tp, cn, 1>& m) +{ + for( int i = 0; i < cn; i++ ) + this->val[i] = m.val[i]; + return *this; +} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::all(_Tp alpha) +{ + Vec v; + for( int i = 0; i < cn; i++ ) v.val[i] = alpha; + return v; +} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::mul(const Vec<_Tp, cn>& v) const +{ + Vec<_Tp, cn> w; + for( int i = 0; i < cn; i++ ) w.val[i] = saturate_cast<_Tp>(this->val[i]*v.val[i]); + return w; +} + +template inline Vec<_Tp, cn> Vec<_Tp, cn>::cross(const Vec<_Tp, cn>& v) const +{ + CV_Error(CV_StsError, "for arbitrary-size vector there is no cross-product defined"); + return Vec<_Tp, cn>(); +} + +template template +inline Vec<_Tp, cn>::operator Vec() const +{ + Vec v; + for( int i = 0; i < cn; i++ ) v.val[i] = saturate_cast(this->val[i]); + return v; +} + +template inline Vec<_Tp, cn>::operator CvScalar() const +{ + CvScalar s = {{0,0,0,0}}; + int i; + for( i = 0; i < std::min(cn, 4); i++ ) s.val[i] = this->val[i]; + for( ; i < 4; i++ ) s.val[i] = 0; + return s; +} + +template inline const _Tp& Vec<_Tp, cn>::operator [](int i) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline _Tp& Vec<_Tp, cn>::operator [](int i) +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline const _Tp& Vec<_Tp, cn>::operator ()(int i) const +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template inline _Tp& Vec<_Tp, cn>::operator ()(int i) +{ + CV_DbgAssert( (unsigned)i < (unsigned)cn ); + return this->val[i]; +} + +template static inline Vec<_Tp1, cn>& +operator += (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b) +{ + for( int i = 0; i < cn; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] + b.val[i]); + return a; +} + +template static inline Vec<_Tp1, cn>& +operator -= (Vec<_Tp1, cn>& a, const Vec<_Tp2, cn>& b) +{ + for( int i = 0; i < cn; i++ ) + a.val[i] = saturate_cast<_Tp1>(a.val[i] - b.val[i]); + return a; +} + +template static inline Vec<_Tp, cn> +operator + (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b) +{ + return Vec<_Tp, cn>(a, b, Matx_AddOp()); +} + +template static inline Vec<_Tp, cn> +operator - (const Vec<_Tp, cn>& a, const Vec<_Tp, cn>& b) +{ + return Vec<_Tp, cn>(a, b, Matx_SubOp()); +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, int alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, float alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + +template static inline +Vec<_Tp, cn>& operator *= (Vec<_Tp, cn>& a, double alpha) +{ + for( int i = 0; i < cn; i++ ) + a[i] = saturate_cast<_Tp>(a[i]*alpha); + return a; +} + + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, int alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (int alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, float alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (float alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (const Vec<_Tp, cn>& a, double alpha) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator * (double alpha, const Vec<_Tp, cn>& a) +{ + return Vec<_Tp, cn>(a, alpha, Matx_ScaleOp()); +} + +template static inline Vec<_Tp, cn> +operator - (const Vec<_Tp, cn>& a) +{ + Vec<_Tp,cn> t; + for( int i = 0; i < cn; i++ ) t.val[i] = saturate_cast<_Tp>(-a.val[i]); + return t; +} + +template<> inline Vec Vec::cross(const Vec& v) const +{ + return Vec(val[1]*v.val[2] - val[2]*v.val[1], + val[2]*v.val[0] - val[0]*v.val[2], + val[0]*v.val[1] - val[1]*v.val[0]); +} + +template<> inline Vec Vec::cross(const Vec& v) const +{ + return Vec(val[1]*v.val[2] - val[2]*v.val[1], + val[2]*v.val[0] - val[0]*v.val[2], + val[0]*v.val[1] - val[1]*v.val[0]); +} + +template static inline +Vec& operator += (Vec& a, const Vec& b) +{ + a[0] = saturate_cast(a[0] + b[0]); + a[1] = saturate_cast(a[1] + b[1]); + return a; +} + +template static inline +Vec& operator += (Vec& a, const Vec& b) +{ + a[0] = saturate_cast(a[0] + b[0]); + a[1] = saturate_cast(a[1] + b[1]); + a[2] = saturate_cast(a[2] + b[2]); + return a; +} + + +template static inline +Vec& operator += (Vec& a, const Vec& b) +{ + a[0] = saturate_cast(a[0] + b[0]); + a[1] = saturate_cast(a[1] + b[1]); + a[2] = saturate_cast(a[2] + b[2]); + a[3] = saturate_cast(a[3] + b[3]); + return a; +} + + +template static inline +VecCommaInitializer<_Tp, cn> operator << (const Vec<_Tp, cn>& vec, _T2 val) +{ + VecCommaInitializer<_Tp, cn> commaInitializer((Vec<_Tp, cn>*)&vec); + return (commaInitializer, val); +} + +template inline +VecCommaInitializer<_Tp, cn>::VecCommaInitializer(Vec<_Tp, cn>* _vec) + : MatxCommaInitializer<_Tp, cn, 1>(_vec) +{} + +template template inline +VecCommaInitializer<_Tp, cn>& VecCommaInitializer<_Tp, cn>::operator , (_T2 value) +{ + CV_DbgAssert( this->idx < cn ); + this->dst->val[this->idx++] = saturate_cast<_Tp>(value); + return *this; +} + +template inline +Vec<_Tp, cn> VecCommaInitializer<_Tp, cn>::operator *() const +{ + CV_DbgAssert( this->idx == cn ); + return *this->dst; +} + +//////////////////////////////// Complex ////////////////////////////// + +template inline Complex<_Tp>::Complex() : re(0), im(0) {} +template inline Complex<_Tp>::Complex( _Tp _re, _Tp _im ) : re(_re), im(_im) {} +template template inline Complex<_Tp>::operator Complex() const +{ return Complex(saturate_cast(re), saturate_cast(im)); } +template inline Complex<_Tp> Complex<_Tp>::conj() const +{ return Complex<_Tp>(re, -im); } + +template static inline +bool operator == (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return a.re == b.re && a.im == b.im; } + +template static inline +bool operator != (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return a.re != b.re || a.im != b.im; } + +template static inline +Complex<_Tp> operator + (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re + b.re, a.im + b.im ); } + +template static inline +Complex<_Tp>& operator += (Complex<_Tp>& a, const Complex<_Tp>& b) +{ a.re += b.re; a.im += b.im; return a; } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re - b.re, a.im - b.im ); } + +template static inline +Complex<_Tp>& operator -= (Complex<_Tp>& a, const Complex<_Tp>& b) +{ a.re -= b.re; a.im -= b.im; return a; } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a) +{ return Complex<_Tp>(-a.re, -a.im); } + +template static inline +Complex<_Tp> operator * (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ return Complex<_Tp>( a.re*b.re - a.im*b.im, a.re*b.im + a.im*b.re ); } + +template static inline +Complex<_Tp> operator * (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re*b, a.im*b ); } + +template static inline +Complex<_Tp> operator * (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( a.re*b, a.im*b ); } + +template static inline +Complex<_Tp> operator + (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re + b, a.im ); } + +template static inline +Complex<_Tp> operator - (const Complex<_Tp>& a, _Tp b) +{ return Complex<_Tp>( a.re - b, a.im ); } + +template static inline +Complex<_Tp> operator + (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( a.re + b, a.im ); } + +template static inline +Complex<_Tp> operator - (_Tp b, const Complex<_Tp>& a) +{ return Complex<_Tp>( b - a.re, -a.im ); } + +template static inline +Complex<_Tp>& operator += (Complex<_Tp>& a, _Tp b) +{ a.re += b; return a; } + +template static inline +Complex<_Tp>& operator -= (Complex<_Tp>& a, _Tp b) +{ a.re -= b; return a; } + +template static inline +Complex<_Tp>& operator *= (Complex<_Tp>& a, _Tp b) +{ a.re *= b; a.im *= b; return a; } + +template static inline +double abs(const Complex<_Tp>& a) +{ return std::sqrt( (double)a.re*a.re + (double)a.im*a.im); } + +template static inline +Complex<_Tp> operator / (const Complex<_Tp>& a, const Complex<_Tp>& b) +{ + double t = 1./((double)b.re*b.re + (double)b.im*b.im); + return Complex<_Tp>( (_Tp)((a.re*b.re + a.im*b.im)*t), + (_Tp)((-a.re*b.im + a.im*b.re)*t) ); +} + +template static inline +Complex<_Tp>& operator /= (Complex<_Tp>& a, const Complex<_Tp>& b) +{ + return (a = a / b); +} + +template static inline +Complex<_Tp> operator / (const Complex<_Tp>& a, _Tp b) +{ + _Tp t = (_Tp)1/b; + return Complex<_Tp>( a.re*t, a.im*t ); +} + +template static inline +Complex<_Tp> operator / (_Tp b, const Complex<_Tp>& a) +{ + return Complex<_Tp>(b)/a; +} + +template static inline +Complex<_Tp> operator /= (const Complex<_Tp>& a, _Tp b) +{ + _Tp t = (_Tp)1/b; + a.re *= t; a.im *= t; return a; +} + +//////////////////////////////// 2D Point //////////////////////////////// + +template inline Point_<_Tp>::Point_() : x(0), y(0) {} +template inline Point_<_Tp>::Point_(_Tp _x, _Tp _y) : x(_x), y(_y) {} +template inline Point_<_Tp>::Point_(const Point_& pt) : x(pt.x), y(pt.y) {} +template inline Point_<_Tp>::Point_(const CvPoint& pt) : x((_Tp)pt.x), y((_Tp)pt.y) {} +template inline Point_<_Tp>::Point_(const CvPoint2D32f& pt) + : x(saturate_cast<_Tp>(pt.x)), y(saturate_cast<_Tp>(pt.y)) {} +template inline Point_<_Tp>::Point_(const Size_<_Tp>& sz) : x(sz.width), y(sz.height) {} +template inline Point_<_Tp>::Point_(const Vec<_Tp,2>& v) : x(v[0]), y(v[1]) {} +template inline Point_<_Tp>& Point_<_Tp>::operator = (const Point_& pt) +{ x = pt.x; y = pt.y; return *this; } + +template template inline Point_<_Tp>::operator Point_<_Tp2>() const +{ return Point_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y)); } +template inline Point_<_Tp>::operator CvPoint() const +{ return cvPoint(saturate_cast(x), saturate_cast(y)); } +template inline Point_<_Tp>::operator CvPoint2D32f() const +{ return cvPoint2D32f((float)x, (float)y); } +template inline Point_<_Tp>::operator Vec<_Tp, 2>() const +{ return Vec<_Tp, 2>(x, y); } + +template inline _Tp Point_<_Tp>::dot(const Point_& pt) const +{ return saturate_cast<_Tp>(x*pt.x + y*pt.y); } +template inline double Point_<_Tp>::ddot(const Point_& pt) const +{ return (double)x*pt.x + (double)y*pt.y; } + +template static inline Point_<_Tp>& +operator += (Point_<_Tp>& a, const Point_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x + b.x); + a.y = saturate_cast<_Tp>(a.y + b.y); + return a; +} + +template static inline Point_<_Tp>& +operator -= (Point_<_Tp>& a, const Point_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x - b.x); + a.y = saturate_cast<_Tp>(a.y - b.y); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, int b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, float b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline Point_<_Tp>& +operator *= (Point_<_Tp>& a, double b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + return a; +} + +template static inline double norm(const Point_<_Tp>& pt) +{ return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y); } + +template static inline bool operator == (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return a.x == b.x && a.y == b.y; } + +template static inline bool operator != (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return a.x != b.x || a.y != b.y; } + +template static inline Point_<_Tp> operator + (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x + b.x), saturate_cast<_Tp>(a.y + b.y) ); } + +template static inline Point_<_Tp> operator - (const Point_<_Tp>& a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x - b.x), saturate_cast<_Tp>(a.y - b.y) ); } + +template static inline Point_<_Tp> operator - (const Point_<_Tp>& a) +{ return Point_<_Tp>( saturate_cast<_Tp>(-a.x), saturate_cast<_Tp>(-a.y) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, int b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (int a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, float b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (float a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +template static inline Point_<_Tp> operator * (const Point_<_Tp>& a, double b) +{ return Point_<_Tp>( saturate_cast<_Tp>(a.x*b), saturate_cast<_Tp>(a.y*b) ); } + +template static inline Point_<_Tp> operator * (double a, const Point_<_Tp>& b) +{ return Point_<_Tp>( saturate_cast<_Tp>(b.x*a), saturate_cast<_Tp>(b.y*a) ); } + +//////////////////////////////// 3D Point //////////////////////////////// + +template inline Point3_<_Tp>::Point3_() : x(0), y(0), z(0) {} +template inline Point3_<_Tp>::Point3_(_Tp _x, _Tp _y, _Tp _z) : x(_x), y(_y), z(_z) {} +template inline Point3_<_Tp>::Point3_(const Point3_& pt) : x(pt.x), y(pt.y), z(pt.z) {} +template inline Point3_<_Tp>::Point3_(const Point_<_Tp>& pt) : x(pt.x), y(pt.y), z(_Tp()) {} +template inline Point3_<_Tp>::Point3_(const CvPoint3D32f& pt) : + x(saturate_cast<_Tp>(pt.x)), y(saturate_cast<_Tp>(pt.y)), z(saturate_cast<_Tp>(pt.z)) {} +template inline Point3_<_Tp>::Point3_(const Vec<_Tp, 3>& v) : x(v[0]), y(v[1]), z(v[2]) {} + +template template inline Point3_<_Tp>::operator Point3_<_Tp2>() const +{ return Point3_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), saturate_cast<_Tp2>(z)); } + +template inline Point3_<_Tp>::operator CvPoint3D32f() const +{ return cvPoint3D32f((float)x, (float)y, (float)z); } + +template inline Point3_<_Tp>::operator Vec<_Tp, 3>() const +{ return Vec<_Tp, 3>(x, y, z); } + +template inline Point3_<_Tp>& Point3_<_Tp>::operator = (const Point3_& pt) +{ x = pt.x; y = pt.y; z = pt.z; return *this; } + +template inline _Tp Point3_<_Tp>::dot(const Point3_& pt) const +{ return saturate_cast<_Tp>(x*pt.x + y*pt.y + z*pt.z); } +template inline double Point3_<_Tp>::ddot(const Point3_& pt) const +{ return (double)x*pt.x + (double)y*pt.y + (double)z*pt.z; } + +template inline Point3_<_Tp> Point3_<_Tp>::cross(const Point3_<_Tp>& pt) const +{ + return Point3_<_Tp>(y*pt.z - z*pt.y, z*pt.x - x*pt.z, x*pt.y - y*pt.x); +} + +template static inline Point3_<_Tp>& +operator += (Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x + b.x); + a.y = saturate_cast<_Tp>(a.y + b.y); + a.z = saturate_cast<_Tp>(a.z + b.z); + return a; +} + +template static inline Point3_<_Tp>& +operator -= (Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ + a.x = saturate_cast<_Tp>(a.x - b.x); + a.y = saturate_cast<_Tp>(a.y - b.y); + a.z = saturate_cast<_Tp>(a.z - b.z); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, int b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, float b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline Point3_<_Tp>& +operator *= (Point3_<_Tp>& a, double b) +{ + a.x = saturate_cast<_Tp>(a.x*b); + a.y = saturate_cast<_Tp>(a.y*b); + a.z = saturate_cast<_Tp>(a.z*b); + return a; +} + +template static inline double norm(const Point3_<_Tp>& pt) +{ return std::sqrt((double)pt.x*pt.x + (double)pt.y*pt.y + (double)pt.z*pt.z); } + +template static inline bool operator == (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return a.x == b.x && a.y == b.y && a.z == b.z; } + +template static inline bool operator != (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return a.x != b.x || a.y != b.y || a.z != b.z; } + +template static inline Point3_<_Tp> operator + (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x + b.x), + saturate_cast<_Tp>(a.y + b.y), + saturate_cast<_Tp>(a.z + b.z)); } + +template static inline Point3_<_Tp> operator - (const Point3_<_Tp>& a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x - b.x), + saturate_cast<_Tp>(a.y - b.y), + saturate_cast<_Tp>(a.z - b.z)); } + +template static inline Point3_<_Tp> operator - (const Point3_<_Tp>& a) +{ return Point3_<_Tp>( saturate_cast<_Tp>(-a.x), + saturate_cast<_Tp>(-a.y), + saturate_cast<_Tp>(-a.z) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, int b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (int a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, float b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (float a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +template static inline Point3_<_Tp> operator * (const Point3_<_Tp>& a, double b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(a.x*b), + saturate_cast<_Tp>(a.y*b), + saturate_cast<_Tp>(a.z*b) ); } + +template static inline Point3_<_Tp> operator * (double a, const Point3_<_Tp>& b) +{ return Point3_<_Tp>( saturate_cast<_Tp>(b.x*a), + saturate_cast<_Tp>(b.y*a), + saturate_cast<_Tp>(b.z*a) ); } + +//////////////////////////////// Size //////////////////////////////// + +template inline Size_<_Tp>::Size_() + : width(0), height(0) {} +template inline Size_<_Tp>::Size_(_Tp _width, _Tp _height) + : width(_width), height(_height) {} +template inline Size_<_Tp>::Size_(const Size_& sz) + : width(sz.width), height(sz.height) {} +template inline Size_<_Tp>::Size_(const CvSize& sz) + : width(saturate_cast<_Tp>(sz.width)), height(saturate_cast<_Tp>(sz.height)) {} +template inline Size_<_Tp>::Size_(const CvSize2D32f& sz) + : width(saturate_cast<_Tp>(sz.width)), height(saturate_cast<_Tp>(sz.height)) {} +template inline Size_<_Tp>::Size_(const Point_<_Tp>& pt) : width(pt.x), height(pt.y) {} + +template template inline Size_<_Tp>::operator Size_<_Tp2>() const +{ return Size_<_Tp2>(saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); } +template inline Size_<_Tp>::operator CvSize() const +{ return cvSize(saturate_cast(width), saturate_cast(height)); } +template inline Size_<_Tp>::operator CvSize2D32f() const +{ return cvSize2D32f((float)width, (float)height); } + +template inline Size_<_Tp>& Size_<_Tp>::operator = (const Size_<_Tp>& sz) +{ width = sz.width; height = sz.height; return *this; } +template static inline Size_<_Tp> operator * (const Size_<_Tp>& a, _Tp b) +{ return Size_<_Tp>(a.width * b, a.height * b); } +template static inline Size_<_Tp> operator + (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return Size_<_Tp>(a.width + b.width, a.height + b.height); } +template static inline Size_<_Tp> operator - (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return Size_<_Tp>(a.width - b.width, a.height - b.height); } +template inline _Tp Size_<_Tp>::area() const { return width*height; } + +template static inline Size_<_Tp>& operator += (Size_<_Tp>& a, const Size_<_Tp>& b) +{ a.width += b.width; a.height += b.height; return a; } +template static inline Size_<_Tp>& operator -= (Size_<_Tp>& a, const Size_<_Tp>& b) +{ a.width -= b.width; a.height -= b.height; return a; } + +template static inline bool operator == (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return a.width == b.width && a.height == b.height; } +template static inline bool operator != (const Size_<_Tp>& a, const Size_<_Tp>& b) +{ return a.width != b.width || a.height != b.height; } + +//////////////////////////////// Rect //////////////////////////////// + + +template inline Rect_<_Tp>::Rect_() : x(0), y(0), width(0), height(0) {} +template inline Rect_<_Tp>::Rect_(_Tp _x, _Tp _y, _Tp _width, _Tp _height) : x(_x), y(_y), width(_width), height(_height) {} +template inline Rect_<_Tp>::Rect_(const Rect_<_Tp>& r) : x(r.x), y(r.y), width(r.width), height(r.height) {} +template inline Rect_<_Tp>::Rect_(const CvRect& r) : x((_Tp)r.x), y((_Tp)r.y), width((_Tp)r.width), height((_Tp)r.height) {} +template inline Rect_<_Tp>::Rect_(const Point_<_Tp>& org, const Size_<_Tp>& sz) : + x(org.x), y(org.y), width(sz.width), height(sz.height) {} +template inline Rect_<_Tp>::Rect_(const Point_<_Tp>& pt1, const Point_<_Tp>& pt2) +{ + x = std::min(pt1.x, pt2.x); y = std::min(pt1.y, pt2.y); + width = std::max(pt1.x, pt2.x) - x; height = std::max(pt1.y, pt2.y) - y; +} +template inline Rect_<_Tp>& Rect_<_Tp>::operator = ( const Rect_<_Tp>& r ) +{ x = r.x; y = r.y; width = r.width; height = r.height; return *this; } + +template inline Point_<_Tp> Rect_<_Tp>::tl() const { return Point_<_Tp>(x,y); } +template inline Point_<_Tp> Rect_<_Tp>::br() const { return Point_<_Tp>(x+width, y+height); } + +template static inline Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Point_<_Tp>& b ) +{ a.x += b.x; a.y += b.y; return a; } +template static inline Rect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Point_<_Tp>& b ) +{ a.x -= b.x; a.y -= b.y; return a; } + +template static inline Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Size_<_Tp>& b ) +{ a.width += b.width; a.height += b.height; return a; } + +template static inline Rect_<_Tp>& operator -= ( Rect_<_Tp>& a, const Size_<_Tp>& b ) +{ a.width -= b.width; a.height -= b.height; return a; } + +template static inline Rect_<_Tp>& operator &= ( Rect_<_Tp>& a, const Rect_<_Tp>& b ) +{ + _Tp x1 = std::max(a.x, b.x), y1 = std::max(a.y, b.y); + a.width = std::min(a.x + a.width, b.x + b.width) - x1; + a.height = std::min(a.y + a.height, b.y + b.height) - y1; + a.x = x1; a.y = y1; + if( a.width <= 0 || a.height <= 0 ) + a = Rect(); + return a; +} + +template static inline Rect_<_Tp>& operator |= ( Rect_<_Tp>& a, const Rect_<_Tp>& b ) +{ + _Tp x1 = std::min(a.x, b.x), y1 = std::min(a.y, b.y); + a.width = std::max(a.x + a.width, b.x + b.width) - x1; + a.height = std::max(a.y + a.height, b.y + b.height) - y1; + a.x = x1; a.y = y1; + return a; +} + +template inline Size_<_Tp> Rect_<_Tp>::size() const { return Size_<_Tp>(width, height); } +template inline _Tp Rect_<_Tp>::area() const { return width*height; } + +template template inline Rect_<_Tp>::operator Rect_<_Tp2>() const +{ return Rect_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), + saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); } +template inline Rect_<_Tp>::operator CvRect() const +{ return cvRect(saturate_cast(x), saturate_cast(y), + saturate_cast(width), saturate_cast(height)); } + +template inline bool Rect_<_Tp>::contains(const Point_<_Tp>& pt) const +{ return x <= pt.x && pt.x < x + width && y <= pt.y && pt.y < y + height; } + +template static inline bool operator == (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + return a.x == b.x && a.y == b.y && a.width == b.width && a.height == b.height; +} + +template static inline bool operator != (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + return a.x != b.x || a.y != b.y || a.width != b.width || a.height != b.height; +} + +template static inline Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Point_<_Tp>& b) +{ + return Rect_<_Tp>( a.x + b.x, a.y + b.y, a.width, a.height ); +} + +template static inline Rect_<_Tp> operator - (const Rect_<_Tp>& a, const Point_<_Tp>& b) +{ + return Rect_<_Tp>( a.x - b.x, a.y - b.y, a.width, a.height ); +} + +template static inline Rect_<_Tp> operator + (const Rect_<_Tp>& a, const Size_<_Tp>& b) +{ + return Rect_<_Tp>( a.x, a.y, a.width + b.width, a.height + b.height ); +} + +template static inline Rect_<_Tp> operator & (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + Rect_<_Tp> c = a; + return c &= b; +} + +template static inline Rect_<_Tp> operator | (const Rect_<_Tp>& a, const Rect_<_Tp>& b) +{ + Rect_<_Tp> c = a; + return c |= b; +} + +template inline bool Point_<_Tp>::inside( const Rect_<_Tp>& r ) const +{ + return r.contains(*this); +} + +inline RotatedRect::RotatedRect() { angle = 0; } +inline RotatedRect::RotatedRect(const Point2f& _center, const Size2f& _size, float _angle) + : center(_center), size(_size), angle(_angle) {} +inline RotatedRect::RotatedRect(const CvBox2D& box) + : center(box.center), size(box.size), angle(box.angle) {} +inline RotatedRect::operator CvBox2D() const +{ + CvBox2D box; box.center = center; box.size = size; box.angle = angle; + return box; +} + +//////////////////////////////// Scalar_ /////////////////////////////// + +template inline Scalar_<_Tp>::Scalar_() +{ this->val[0] = this->val[1] = this->val[2] = this->val[3] = 0; } + +template inline Scalar_<_Tp>::Scalar_(_Tp v0, _Tp v1, _Tp v2, _Tp v3) +{ this->val[0] = v0; this->val[1] = v1; this->val[2] = v2; this->val[3] = v3; } + +template inline Scalar_<_Tp>::Scalar_(const CvScalar& s) +{ + this->val[0] = saturate_cast<_Tp>(s.val[0]); + this->val[1] = saturate_cast<_Tp>(s.val[1]); + this->val[2] = saturate_cast<_Tp>(s.val[2]); + this->val[3] = saturate_cast<_Tp>(s.val[3]); +} + +template inline Scalar_<_Tp>::Scalar_(_Tp v0) +{ this->val[0] = v0; this->val[1] = this->val[2] = this->val[3] = 0; } + +template inline Scalar_<_Tp> Scalar_<_Tp>::all(_Tp v0) +{ return Scalar_<_Tp>(v0, v0, v0, v0); } +template inline Scalar_<_Tp>::operator CvScalar() const +{ return cvScalar(this->val[0], this->val[1], this->val[2], this->val[3]); } + +template template inline Scalar_<_Tp>::operator Scalar_() const +{ + return Scalar_(saturate_cast(this->val[0]), + saturate_cast(this->val[1]), + saturate_cast(this->val[2]), + saturate_cast(this->val[3])); +} + +template static inline Scalar_<_Tp>& operator += (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] + b.val[0]); + a.val[1] = saturate_cast<_Tp>(a.val[1] + b.val[1]); + a.val[2] = saturate_cast<_Tp>(a.val[2] + b.val[2]); + a.val[3] = saturate_cast<_Tp>(a.val[3] + b.val[3]); + return a; +} + +template static inline Scalar_<_Tp>& operator -= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] - b.val[0]); + a.val[1] = saturate_cast<_Tp>(a.val[1] - b.val[1]); + a.val[2] = saturate_cast<_Tp>(a.val[2] - b.val[2]); + a.val[3] = saturate_cast<_Tp>(a.val[3] - b.val[3]); + return a; +} + +template static inline Scalar_<_Tp>& operator *= ( Scalar_<_Tp>& a, _Tp v ) +{ + a.val[0] = saturate_cast<_Tp>(a.val[0] * v); + a.val[1] = saturate_cast<_Tp>(a.val[1] * v); + a.val[2] = saturate_cast<_Tp>(a.val[2] * v); + a.val[3] = saturate_cast<_Tp>(a.val[3] * v); + return a; +} + +template inline Scalar_<_Tp> Scalar_<_Tp>::mul(const Scalar_<_Tp>& t, double scale ) const +{ + return Scalar_<_Tp>( saturate_cast<_Tp>(this->val[0]*t.val[0]*scale), + saturate_cast<_Tp>(this->val[1]*t.val[1]*scale), + saturate_cast<_Tp>(this->val[2]*t.val[2]*scale), + saturate_cast<_Tp>(this->val[3]*t.val[3]*scale)); +} + +template static inline bool operator == ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b ) +{ + return a.val[0] == b.val[0] && a.val[1] == b.val[1] && + a.val[2] == b.val[2] && a.val[3] == b.val[3]; +} + +template static inline bool operator != ( const Scalar_<_Tp>& a, const Scalar_<_Tp>& b ) +{ + return a.val[0] != b.val[0] || a.val[1] != b.val[1] || + a.val[2] != b.val[2] || a.val[3] != b.val[3]; +} + +template static inline Scalar_<_Tp> operator + (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] + b.val[0]), + saturate_cast<_Tp>(a.val[1] + b.val[1]), + saturate_cast<_Tp>(a.val[2] + b.val[2]), + saturate_cast<_Tp>(a.val[3] + b.val[3])); +} + +template static inline Scalar_<_Tp> operator - (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] - b.val[0]), + saturate_cast<_Tp>(a.val[1] - b.val[1]), + saturate_cast<_Tp>(a.val[2] - b.val[2]), + saturate_cast<_Tp>(a.val[3] - b.val[3])); +} + +template static inline Scalar_<_Tp> operator * (const Scalar_<_Tp>& a, _Tp alpha) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] * alpha), + saturate_cast<_Tp>(a.val[1] * alpha), + saturate_cast<_Tp>(a.val[2] * alpha), + saturate_cast<_Tp>(a.val[3] * alpha)); +} + +template static inline Scalar_<_Tp> operator * (_Tp alpha, const Scalar_<_Tp>& a) +{ + return a*alpha; +} + +template static inline Scalar_<_Tp> operator - (const Scalar_<_Tp>& a) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(-a.val[0]), saturate_cast<_Tp>(-a.val[1]), + saturate_cast<_Tp>(-a.val[2]), saturate_cast<_Tp>(-a.val[3])); +} + + +template static inline Scalar_<_Tp> +operator * (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a[0]*b[0] - a[1]*b[1] - a[2]*b[2] - a[3]*b[3]), + saturate_cast<_Tp>(a[0]*b[1] + a[1]*b[0] + a[2]*b[3] - a[3]*b[2]), + saturate_cast<_Tp>(a[0]*b[2] - a[1]*b[3] + a[2]*b[0] - a[3]*b[1]), + saturate_cast<_Tp>(a[0]*b[3] + a[1]*b[2] - a[2]*b[1] - a[3]*b[0])); +} + +template static inline Scalar_<_Tp>& +operator *= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a = a*b; + return a; +} + +template inline Scalar_<_Tp> Scalar_<_Tp>::conj() const +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(this->val[0]), + saturate_cast<_Tp>(-this->val[1]), + saturate_cast<_Tp>(-this->val[2]), + saturate_cast<_Tp>(-this->val[3])); +} + +template inline bool Scalar_<_Tp>::isReal() const +{ + return this->val[1] == 0 && this->val[2] == 0 && this->val[3] == 0; +} + +template static inline +Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, _Tp alpha) +{ + return Scalar_<_Tp>(saturate_cast<_Tp>(a.val[0] / alpha), + saturate_cast<_Tp>(a.val[1] / alpha), + saturate_cast<_Tp>(a.val[2] / alpha), + saturate_cast<_Tp>(a.val[3] / alpha)); +} + +template static inline +Scalar_ operator / (const Scalar_& a, float alpha) +{ + float s = 1/alpha; + return Scalar_(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s); +} + +template static inline +Scalar_ operator / (const Scalar_& a, double alpha) +{ + double s = 1/alpha; + return Scalar_(a.val[0]*s, a.val[1]*s, a.val[2]*s, a.val[3]*s); +} + +template static inline +Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, _Tp alpha) +{ + a = a/alpha; + return a; +} + +template static inline +Scalar_<_Tp> operator / (_Tp a, const Scalar_<_Tp>& b) +{ + _Tp s = a/(b[0]*b[0] + b[1]*b[1] + b[2]*b[2] + b[3]*b[3]); + return b.conj()*s; +} + +template static inline +Scalar_<_Tp> operator / (const Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + return a*((_Tp)1/b); +} + +template static inline +Scalar_<_Tp>& operator /= (Scalar_<_Tp>& a, const Scalar_<_Tp>& b) +{ + a = a/b; + return a; +} + +//////////////////////////////// Range ///////////////////////////////// + +inline Range::Range() : start(0), end(0) {} +inline Range::Range(int _start, int _end) : start(_start), end(_end) {} +inline Range::Range(const CvSlice& slice) : start(slice.start_index), end(slice.end_index) +{ + if( start == 0 && end == CV_WHOLE_SEQ_END_INDEX ) + *this = Range::all(); +} + +inline int Range::size() const { return end - start; } +inline bool Range::empty() const { return start == end; } +inline Range Range::all() { return Range(INT_MIN, INT_MAX); } + +static inline bool operator == (const Range& r1, const Range& r2) +{ return r1.start == r2.start && r1.end == r2.end; } + +static inline bool operator != (const Range& r1, const Range& r2) +{ return !(r1 == r2); } + +static inline bool operator !(const Range& r) +{ return r.start == r.end; } + +static inline Range operator & (const Range& r1, const Range& r2) +{ + Range r(std::max(r1.start, r2.start), std::min(r1.end, r2.end)); + r.end = std::max(r.end, r.start); + return r; +} + +static inline Range& operator &= (Range& r1, const Range& r2) +{ + r1 = r1 & r2; + return r1; +} + +static inline Range operator + (const Range& r1, int delta) +{ + return Range(r1.start + delta, r1.end + delta); +} + +static inline Range operator + (int delta, const Range& r1) +{ + return Range(r1.start + delta, r1.end + delta); +} + +static inline Range operator - (const Range& r1, int delta) +{ + return r1 + (-delta); +} + +inline Range::operator CvSlice() const +{ return *this != Range::all() ? cvSlice(start, end) : CV_WHOLE_SEQ; } + + + +//////////////////////////////// Vector //////////////////////////////// + +// template vector class. It is similar to STL's vector, +// with a few important differences: +// 1) it can be created on top of user-allocated data w/o copying it +// 2) vector b = a means copying the header, +// not the underlying data (use clone() to make a deep copy) +template class CV_EXPORTS Vector +{ +public: + typedef _Tp value_type; + typedef _Tp* iterator; + typedef const _Tp* const_iterator; + typedef _Tp& reference; + typedef const _Tp& const_reference; + + struct CV_EXPORTS Hdr + { + Hdr() : data(0), datastart(0), refcount(0), size(0), capacity(0) {}; + _Tp* data; + _Tp* datastart; + int* refcount; + size_t size; + size_t capacity; + }; + + Vector() {} + Vector(size_t _size) { resize(_size); } + Vector(size_t _size, const _Tp& val) + { + resize(_size); + for(size_t i = 0; i < _size; i++) + hdr.data[i] = val; + } + Vector(_Tp* _data, size_t _size, bool _copyData=false) + { set(_data, _size, _copyData); } + + template Vector(const Vec<_Tp, n>& vec) + { set((_Tp*)&vec.val[0], n, true); } + + Vector(const std::vector<_Tp>& vec, bool _copyData=false) + { set((_Tp*)&vec[0], vec.size(), _copyData); } + + Vector(const Vector& d) { *this = d; } + + Vector(const Vector& d, const Range& r_) + { + Range r = r_ == Range::all() ? Range(0, d.size()) : r_; + /*if( r == Range::all() ) + r = Range(0, d.size());*/ + if( r.size() > 0 && r.start >= 0 && r.end <= d.size() ) + { + if( d.hdr.refcount ) + CV_XADD(d.hdr.refcount, 1); + hdr.refcount = d.hdr.refcount; + hdr.datastart = d.hdr.datastart; + hdr.data = d.hdr.data + r.start; + hdr.capacity = hdr.size = r.size(); + } + } + + Vector<_Tp>& operator = (const Vector& d) + { + if( this != &d ) + { + if( d.hdr.refcount ) + CV_XADD(d.hdr.refcount, 1); + release(); + hdr = d.hdr; + } + return *this; + } + + ~Vector() { release(); } + + Vector<_Tp> clone() const + { return hdr.data ? Vector<_Tp>(hdr.data, hdr.size, true) : Vector<_Tp>(); } + + void copyTo(Vector<_Tp>& vec) const + { + size_t i, sz = size(); + vec.resize(sz); + const _Tp* src = hdr.data; + _Tp* dst = vec.hdr.data; + for( i = 0; i < sz; i++ ) + dst[i] = src[i]; + } + + void copyTo(std::vector<_Tp>& vec) const + { + size_t i, sz = size(); + vec.resize(sz); + const _Tp* src = hdr.data; + _Tp* dst = sz ? &vec[0] : 0; + for( i = 0; i < sz; i++ ) + dst[i] = src[i]; + } + + operator CvMat() const + { return cvMat((int)size(), 1, type(), (void*)hdr.data); } + + _Tp& operator [] (size_t i) { CV_DbgAssert( i < size() ); return hdr.data[i]; } + const _Tp& operator [] (size_t i) const { CV_DbgAssert( i < size() ); return hdr.data[i]; } + Vector operator() (const Range& r) const { return Vector(*this, r); } + _Tp& back() { CV_DbgAssert(!empty()); return hdr.data[hdr.size-1]; } + const _Tp& back() const { CV_DbgAssert(!empty()); return hdr.data[hdr.size-1]; } + _Tp& front() { CV_DbgAssert(!empty()); return hdr.data[0]; } + const _Tp& front() const { CV_DbgAssert(!empty()); return hdr.data[0]; } + + _Tp* begin() { return hdr.data; } + _Tp* end() { return hdr.data + hdr.size; } + const _Tp* begin() const { return hdr.data; } + const _Tp* end() const { return hdr.data + hdr.size; } + + void addref() { if( hdr.refcount ) CV_XADD(hdr.refcount, 1); } + void release() + { + if( hdr.refcount && CV_XADD(hdr.refcount, -1) == 1 ) + { + delete[] hdr.datastart; + delete hdr.refcount; + } + hdr = Hdr(); + } + + void set(_Tp* _data, size_t _size, bool _copyData=false) + { + if( !_copyData ) + { + release(); + hdr.data = hdr.datastart = _data; + hdr.size = hdr.capacity = _size; + hdr.refcount = 0; + } + else + { + reserve(_size); + for( size_t i = 0; i < _size; i++ ) + hdr.data[i] = _data[i]; + hdr.size = _size; + } + } + + void reserve(size_t newCapacity) + { + _Tp* newData; + int* newRefcount; + size_t i, oldSize = hdr.size; + if( (!hdr.refcount || *hdr.refcount == 1) && hdr.capacity >= newCapacity ) + return; + newCapacity = std::max(newCapacity, oldSize); + newData = new _Tp[newCapacity]; + newRefcount = new int(1); + for( i = 0; i < oldSize; i++ ) + newData[i] = hdr.data[i]; + release(); + hdr.data = hdr.datastart = newData; + hdr.capacity = newCapacity; + hdr.size = oldSize; + hdr.refcount = newRefcount; + } + + void resize(size_t newSize) + { + size_t i; + newSize = std::max(newSize, (size_t)0); + if( (!hdr.refcount || *hdr.refcount == 1) && hdr.size == newSize ) + return; + if( newSize > hdr.capacity ) + reserve(std::max(newSize, std::max((size_t)4, hdr.capacity*2))); + for( i = hdr.size; i < newSize; i++ ) + hdr.data[i] = _Tp(); + hdr.size = newSize; + } + + Vector<_Tp>& push_back(const _Tp& elem) + { + if( hdr.size == hdr.capacity ) + reserve( std::max((size_t)4, hdr.capacity*2) ); + hdr.data[hdr.size++] = elem; + return *this; + } + + Vector<_Tp>& pop_back() + { + if( hdr.size > 0 ) + --hdr.size; + return *this; + } + + size_t size() const { return hdr.size; } + size_t capacity() const { return hdr.capacity; } + bool empty() const { return hdr.size == 0; } + void clear() { resize(0); } + int type() const { return DataType<_Tp>::type; } + +protected: + Hdr hdr; +}; + + +template inline typename DataType<_Tp>::work_type +dot(const Vector<_Tp>& v1, const Vector<_Tp>& v2) +{ + typedef typename DataType<_Tp>::work_type _Tw; + size_t i, n = v1.size(); + assert(v1.size() == v2.size()); + + _Tw s = 0; + const _Tp *ptr1 = &v1[0], *ptr2 = &v2[0]; + for( i = 0; i <= n - 4; i += 4 ) + s += (_Tw)ptr1[i]*ptr2[i] + (_Tw)ptr1[i+1]*ptr2[i+1] + + (_Tw)ptr1[i+2]*ptr2[i+2] + (_Tw)ptr1[i+3]*ptr2[i+3]; + for( ; i < n; i++ ) + s += (_Tw)ptr1[i]*ptr2[i]; + return s; +} + +// Multiply-with-Carry RNG +inline RNG::RNG() { state = 0xffffffff; } +inline RNG::RNG(uint64 _state) { state = _state ? _state : 0xffffffff; } +inline unsigned RNG::next() +{ + state = (uint64)(unsigned)state*CV_RNG_COEFF + (unsigned)(state >> 32); + return (unsigned)state; +} + +inline RNG::operator uchar() { return (uchar)next(); } +inline RNG::operator schar() { return (schar)next(); } +inline RNG::operator ushort() { return (ushort)next(); } +inline RNG::operator short() { return (short)next(); } +inline RNG::operator unsigned() { return next(); } +inline unsigned RNG::operator ()(unsigned N) {return (unsigned)uniform(0,N);} +inline unsigned RNG::operator ()() {return next();} +inline RNG::operator int() { return (int)next(); } +// * (2^32-1)^-1 +inline RNG::operator float() { return next()*2.3283064365386962890625e-10f; } +inline RNG::operator double() +{ + unsigned t = next(); + return (((uint64)t << 32) | next())*5.4210108624275221700372640043497e-20; +} +inline int RNG::uniform(int a, int b) { return a == b ? a : next()%(b - a) + a; } +inline float RNG::uniform(float a, float b) { return ((float)*this)*(b - a) + a; } +inline double RNG::uniform(double a, double b) { return ((double)*this)*(b - a) + a; } + +inline TermCriteria::TermCriteria() : type(0), maxCount(0), epsilon(0) {} +inline TermCriteria::TermCriteria(int _type, int _maxCount, double _epsilon) + : type(_type), maxCount(_maxCount), epsilon(_epsilon) {} +inline TermCriteria::TermCriteria(const CvTermCriteria& criteria) + : type(criteria.type), maxCount(criteria.max_iter), epsilon(criteria.epsilon) {} +inline TermCriteria::operator CvTermCriteria() const +{ return cvTermCriteria(type, maxCount, epsilon); } + +inline uchar* LineIterator::operator *() { return ptr; } +inline LineIterator& LineIterator::operator ++() +{ + int mask = err < 0 ? -1 : 0; + err += minusDelta + (plusDelta & mask); + ptr += minusStep + (plusStep & mask); + return *this; +} +inline LineIterator LineIterator::operator ++(int) +{ + LineIterator it = *this; + ++(*this); + return it; +} +inline Point LineIterator::pos() const +{ + Point p; + p.y = (int)((ptr - ptr0)/step); + p.x = (int)(((ptr - ptr0) - p.y*step)/elemSize); + return p; +} + +/////////////////////////////// AutoBuffer //////////////////////////////////////// + +template inline AutoBuffer<_Tp, fixed_size>::AutoBuffer() +{ + ptr = buf; + size = fixed_size; +} + +template inline AutoBuffer<_Tp, fixed_size>::AutoBuffer(size_t _size) +{ + ptr = buf; + size = fixed_size; + allocate(_size); +} + +template inline AutoBuffer<_Tp, fixed_size>::~AutoBuffer() +{ deallocate(); } + +template inline void AutoBuffer<_Tp, fixed_size>::allocate(size_t _size) +{ + if(_size <= size) + return; + deallocate(); + if(_size > fixed_size) + { + ptr = cv::allocate<_Tp>(_size); + size = _size; + } +} + +template inline void AutoBuffer<_Tp, fixed_size>::deallocate() +{ + if( ptr != buf ) + { + cv::deallocate<_Tp>(ptr, size); + ptr = buf; + size = fixed_size; + } +} + +template inline AutoBuffer<_Tp, fixed_size>::operator _Tp* () +{ return ptr; } + +template inline AutoBuffer<_Tp, fixed_size>::operator const _Tp* () const +{ return ptr; } + + +/////////////////////////////////// Ptr //////////////////////////////////////// + +template inline Ptr<_Tp>::Ptr() : obj(0), refcount(0) {} +template inline Ptr<_Tp>::Ptr(_Tp* _obj) : obj(_obj) +{ + if(obj) + { + refcount = (int*)fastMalloc(sizeof(*refcount)); + *refcount = 1; + } + else + refcount = 0; +} + +template inline void Ptr<_Tp>::addref() +{ if( refcount ) CV_XADD(refcount, 1); } + +template inline void Ptr<_Tp>::release() +{ + if( refcount && CV_XADD(refcount, -1) == 1 ) + { + delete_obj(); + fastFree(refcount); + } + refcount = 0; + obj = 0; +} + +template inline void Ptr<_Tp>::delete_obj() +{ + if( obj ) delete obj; +} + +template inline Ptr<_Tp>::~Ptr() { release(); } + +template inline Ptr<_Tp>::Ptr(const Ptr<_Tp>& ptr) +{ + obj = ptr.obj; + refcount = ptr.refcount; + addref(); +} + +template inline Ptr<_Tp>& Ptr<_Tp>::operator = (const Ptr<_Tp>& ptr) +{ + int* _refcount = ptr.refcount; + if( _refcount ) + CV_XADD(_refcount, 1); + release(); + obj = ptr.obj; + refcount = _refcount; + return *this; +} + +template inline _Tp* Ptr<_Tp>::operator -> () { return obj; } +template inline const _Tp* Ptr<_Tp>::operator -> () const { return obj; } + +template inline Ptr<_Tp>::operator _Tp* () { return obj; } +template inline Ptr<_Tp>::operator const _Tp*() const { return obj; } + +template inline bool Ptr<_Tp>::empty() const { return obj == 0; } + +//// specializied implementations of Ptr::delete_obj() for classic OpenCV types + +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); +template<> CV_EXPORTS void Ptr::delete_obj(); + +//////////////////////////////////////// XML & YAML I/O //////////////////////////////////// + +CV_EXPORTS_W void write( FileStorage& fs, const string& name, int value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, float value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, double value ); +CV_EXPORTS_W void write( FileStorage& fs, const string& name, const string& value ); + +template inline void write(FileStorage& fs, const _Tp& value) +{ write(fs, string(), value); } + +CV_EXPORTS void writeScalar( FileStorage& fs, int value ); +CV_EXPORTS void writeScalar( FileStorage& fs, float value ); +CV_EXPORTS void writeScalar( FileStorage& fs, double value ); +CV_EXPORTS void writeScalar( FileStorage& fs, const string& value ); + +template<> inline void write( FileStorage& fs, const int& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const float& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const double& value ) +{ + writeScalar(fs, value); +} + +template<> inline void write( FileStorage& fs, const string& value ) +{ + writeScalar(fs, value); +} + +template inline void write(FileStorage& fs, const Point_<_Tp>& pt ) +{ + write(fs, pt.x); + write(fs, pt.y); +} + +template inline void write(FileStorage& fs, const Point3_<_Tp>& pt ) +{ + write(fs, pt.x); + write(fs, pt.y); + write(fs, pt.z); +} + +template inline void write(FileStorage& fs, const Size_<_Tp>& sz ) +{ + write(fs, sz.width); + write(fs, sz.height); +} + +template inline void write(FileStorage& fs, const Complex<_Tp>& c ) +{ + write(fs, c.re); + write(fs, c.im); +} + +template inline void write(FileStorage& fs, const Rect_<_Tp>& r ) +{ + write(fs, r.x); + write(fs, r.y); + write(fs, r.width); + write(fs, r.height); +} + +template inline void write(FileStorage& fs, const Vec<_Tp, cn>& v ) +{ + for(int i = 0; i < cn; i++) + write(fs, v.val[i]); +} + +template inline void write(FileStorage& fs, const Scalar_<_Tp>& s ) +{ + write(fs, s.val[0]); + write(fs, s.val[1]); + write(fs, s.val[2]); + write(fs, s.val[3]); +} + +inline void write(FileStorage& fs, const Range& r ) +{ + write(fs, r.start); + write(fs, r.end); +} + +class CV_EXPORTS WriteStructContext +{ +public: + WriteStructContext(FileStorage& _fs, const string& name, + int flags, const string& typeName=string()); + ~WriteStructContext(); + FileStorage* fs; +}; + +template inline void write(FileStorage& fs, const string& name, const Point_<_Tp>& pt ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, pt.x); + write(fs, pt.y); +} + +template inline void write(FileStorage& fs, const string& name, const Point3_<_Tp>& pt ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, pt.x); + write(fs, pt.y); + write(fs, pt.z); +} + +template inline void write(FileStorage& fs, const string& name, const Size_<_Tp>& sz ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, sz.width); + write(fs, sz.height); +} + +template inline void write(FileStorage& fs, const string& name, const Complex<_Tp>& c ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, c.re); + write(fs, c.im); +} + +template inline void write(FileStorage& fs, const string& name, const Rect_<_Tp>& r ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, r.x); + write(fs, r.y); + write(fs, r.width); + write(fs, r.height); +} + +template inline void write(FileStorage& fs, const string& name, const Vec<_Tp, cn>& v ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + for(int i = 0; i < cn; i++) + write(fs, v.val[i]); +} + +template inline void write(FileStorage& fs, const string& name, const Scalar_<_Tp>& s ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, s.val[0]); + write(fs, s.val[1]); + write(fs, s.val[2]); + write(fs, s.val[3]); +} + +inline void write(FileStorage& fs, const string& name, const Range& r ) +{ + WriteStructContext ws(fs, name, CV_NODE_SEQ+CV_NODE_FLOW); + write(fs, r.start); + write(fs, r.end); +} + +template class CV_EXPORTS VecWriterProxy +{ +public: + VecWriterProxy( FileStorage* _fs ) : fs(_fs) {} + void operator()(const vector<_Tp>& vec) const + { + size_t i, count = vec.size(); + for( i = 0; i < count; i++ ) + write( *fs, vec[i] ); + } + FileStorage* fs; +}; + +template class CV_EXPORTS VecWriterProxy<_Tp,1> +{ +public: + VecWriterProxy( FileStorage* _fs ) : fs(_fs) {} + void operator()(const vector<_Tp>& vec) const + { + int _fmt = DataType<_Tp>::fmt; + char fmt[] = { (char)((_fmt>>8)+'1'), (char)_fmt, '\0' }; + fs->writeRaw( string(fmt), (uchar*)&vec[0], vec.size()*sizeof(_Tp) ); + } + FileStorage* fs; +}; + + +template static inline void write( FileStorage& fs, const vector<_Tp>& vec ) +{ + VecWriterProxy<_Tp, DataType<_Tp>::fmt != 0> w(&fs); + w(vec); +} + +template static inline FileStorage& +operator << ( FileStorage& fs, const vector<_Tp>& vec ) +{ + VecWriterProxy<_Tp, DataType<_Tp>::generic_type == 0> w(&fs); + w(vec); + return fs; +} + +CV_EXPORTS_W void write( FileStorage& fs, const string& name, const Mat& value ); +CV_EXPORTS void write( FileStorage& fs, const string& name, const SparseMat& value ); + +template static inline FileStorage& operator << (FileStorage& fs, const _Tp& value) +{ + if( !fs.isOpened() ) + return fs; + if( fs.state == FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP ) + CV_Error( CV_StsError, "No element name has been given" ); + write( fs, fs.elname, value ); + if( fs.state & FileStorage::INSIDE_MAP ) + fs.state = FileStorage::NAME_EXPECTED + FileStorage::INSIDE_MAP; + return fs; +} + +CV_EXPORTS FileStorage& operator << (FileStorage& fs, const string& str); + +static inline FileStorage& operator << (FileStorage& fs, const char* str) +{ return (fs << string(str)); } + +inline FileNode::FileNode() : fs(0), node(0) {} +inline FileNode::FileNode(const CvFileStorage* _fs, const CvFileNode* _node) + : fs(_fs), node(_node) {} + +inline FileNode::FileNode(const FileNode& _node) : fs(_node.fs), node(_node.node) {} + +inline int FileNode::type() const { return !node ? NONE : (node->tag & TYPE_MASK); } +inline bool FileNode::empty() const { return node == 0; } +inline bool FileNode::isNone() const { return type() == NONE; } +inline bool FileNode::isSeq() const { return type() == SEQ; } +inline bool FileNode::isMap() const { return type() == MAP; } +inline bool FileNode::isInt() const { return type() == INT; } +inline bool FileNode::isReal() const { return type() == REAL; } +inline bool FileNode::isString() const { return type() == STR; } +inline bool FileNode::isNamed() const { return !node ? false : (node->tag & NAMED) != 0; } +inline size_t FileNode::size() const +{ + int t = type(); + return t == MAP ? ((CvSet*)node->data.map)->active_count : + t == SEQ ? node->data.seq->total : node != 0; +} + +inline CvFileNode* FileNode::operator *() { return (CvFileNode*)node; } +inline const CvFileNode* FileNode::operator* () const { return node; } + +static inline void read(const FileNode& node, int& value, int default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? cvRound(node.node->data.f) : 0x7fffffff; +} + +static inline void read(const FileNode& node, bool& value, bool default_value) +{ + int temp; read(node, temp, (int)default_value); + value = temp != 0; +} + +static inline void read(const FileNode& node, uchar& value, uchar default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, schar& value, schar default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, ushort& value, ushort default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, short& value, short default_value) +{ + int temp; read(node, temp, (int)default_value); + value = saturate_cast(temp); +} + +static inline void read(const FileNode& node, float& value, float default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? (float)node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? (float)node.node->data.f : 1e30f; +} + +static inline void read(const FileNode& node, double& value, double default_value) +{ + value = !node.node ? default_value : + CV_NODE_IS_INT(node.node->tag) ? (double)node.node->data.i : + CV_NODE_IS_REAL(node.node->tag) ? node.node->data.f : 1e300; +} + +static inline void read(const FileNode& node, string& value, const string& default_value) +{ + value = !node.node ? default_value : CV_NODE_IS_STRING(node.node->tag) ? string(node.node->data.str.ptr) : string(""); +} + +CV_EXPORTS_W void read(const FileNode& node, Mat& mat, const Mat& default_mat=Mat() ); +CV_EXPORTS void read(const FileNode& node, SparseMat& mat, const SparseMat& default_mat=SparseMat() ); + +inline FileNode::operator int() const +{ + int value; + read(*this, value, 0); + return value; +} +inline FileNode::operator float() const +{ + float value; + read(*this, value, 0.f); + return value; +} +inline FileNode::operator double() const +{ + double value; + read(*this, value, 0.); + return value; +} +inline FileNode::operator string() const +{ + string value; + read(*this, value, value); + return value; +} + +inline void FileNode::readRaw( const string& fmt, uchar* vec, size_t len ) const +{ + begin().readRaw( fmt, vec, len ); +} + +template class CV_EXPORTS VecReaderProxy +{ +public: + VecReaderProxy( FileNodeIterator* _it ) : it(_it) {} + void operator()(vector<_Tp>& vec, size_t count) const + { + count = std::min(count, it->remaining); + vec.resize(count); + for( size_t i = 0; i < count; i++, ++(*it) ) + read(**it, vec[i], _Tp()); + } + FileNodeIterator* it; +}; + +template class CV_EXPORTS VecReaderProxy<_Tp,1> +{ +public: + VecReaderProxy( FileNodeIterator* _it ) : it(_it) {} + void operator()(vector<_Tp>& vec, size_t count) const + { + size_t remaining = it->remaining, cn = DataType<_Tp>::channels; + int _fmt = DataType<_Tp>::fmt; + char fmt[] = { (char)((_fmt>>8)+'1'), (char)_fmt, '\0' }; + size_t remaining1 = remaining/cn; + count = count < remaining1 ? count : remaining1; + vec.resize(count); + it->readRaw( string(fmt), (uchar*)&vec[0], count*sizeof(_Tp) ); + } + FileNodeIterator* it; +}; + +template static inline void +read( FileNodeIterator& it, vector<_Tp>& vec, size_t maxCount=(size_t)INT_MAX ) +{ + VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it); + r(vec, maxCount); +} + +template static inline void +read( FileNode& node, vector<_Tp>& vec, const vector<_Tp>& default_value=vector<_Tp>() ) +{ + read( node.begin(), vec ); +} + +inline FileNodeIterator FileNode::begin() const +{ + return FileNodeIterator(fs, node); +} + +inline FileNodeIterator FileNode::end() const +{ + return FileNodeIterator(fs, node, size()); +} + +inline FileNode FileNodeIterator::operator *() const +{ return FileNode(fs, (const CvFileNode*)reader.ptr); } + +inline FileNode FileNodeIterator::operator ->() const +{ return FileNode(fs, (const CvFileNode*)reader.ptr); } + +template static inline FileNodeIterator& operator >> (FileNodeIterator& it, _Tp& value) +{ read( *it, value, _Tp()); return ++it; } + +template static inline +FileNodeIterator& operator >> (FileNodeIterator& it, vector<_Tp>& vec) +{ + VecReaderProxy<_Tp, DataType<_Tp>::fmt != 0> r(&it); + r(vec, (size_t)INT_MAX); + return it; +} + +template static inline void operator >> (const FileNode& n, _Tp& value) +{ read( n, value, _Tp()); } + +template static inline void operator >> (const FileNode& n, vector<_Tp>& vec) +{ FileNodeIterator it = n.begin(); it >> vec; } + +static inline bool operator == (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it1.fs == it2.fs && it1.container == it2.container && + it1.reader.ptr == it2.reader.ptr && it1.remaining == it2.remaining; +} + +static inline bool operator != (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return !(it1 == it2); +} + +static inline ptrdiff_t operator - (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it2.remaining - it1.remaining; +} + +static inline bool operator < (const FileNodeIterator& it1, const FileNodeIterator& it2) +{ + return it1.remaining > it2.remaining; +} + +inline FileNode FileStorage::getFirstTopLevelNode() const +{ + FileNode r = root(); + FileNodeIterator it = r.begin(); + return it != r.end() ? *it : FileNode(); +} + +//////////////////////////////////////// Various algorithms //////////////////////////////////// + +template static inline _Tp gcd(_Tp a, _Tp b) +{ + if( a < b ) + std::swap(a, b); + while( b > 0 ) + { + _Tp r = a % b; + a = b; + b = r; + } + return a; +} + +/****************************************************************************************\ + + Generic implementation of QuickSort algorithm + Use it as: vector<_Tp> a; ... sort(a,); + + The current implementation was derived from *BSD system qsort(): + + * Copyright (c) 1992, 1993 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + +\****************************************************************************************/ + +template void sort( vector<_Tp>& vec, _LT LT=_LT() ) +{ + int isort_thresh = 7; + int sp = 0; + + struct + { + _Tp *lb; + _Tp *ub; + } stack[48]; + + size_t total = vec.size(); + + if( total <= 1 ) + return; + + _Tp* arr = &vec[0]; + stack[0].lb = arr; + stack[0].ub = arr + (total - 1); + + while( sp >= 0 ) + { + _Tp* left = stack[sp].lb; + _Tp* right = stack[sp--].ub; + + for(;;) + { + int i, n = (int)(right - left) + 1, m; + _Tp* ptr; + _Tp* ptr2; + + if( n <= isort_thresh ) + { + insert_sort: + for( ptr = left + 1; ptr <= right; ptr++ ) + { + for( ptr2 = ptr; ptr2 > left && LT(ptr2[0],ptr2[-1]); ptr2--) + std::swap( ptr2[0], ptr2[-1] ); + } + break; + } + else + { + _Tp* left0; + _Tp* left1; + _Tp* right0; + _Tp* right1; + _Tp* pivot; + _Tp* a; + _Tp* b; + _Tp* c; + int swap_cnt = 0; + + left0 = left; + right0 = right; + pivot = left + (n/2); + + if( n > 40 ) + { + int d = n / 8; + a = left, b = left + d, c = left + 2*d; + left = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + + a = pivot - d, b = pivot, c = pivot + d; + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + + a = right - 2*d, b = right - d, c = right; + right = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + } + + a = left, b = pivot, c = right; + pivot = LT(*a, *b) ? (LT(*b, *c) ? b : (LT(*a, *c) ? c : a)) + : (LT(*c, *b) ? b : (LT(*a, *c) ? a : c)); + if( pivot != left0 ) + { + std::swap( *pivot, *left0 ); + pivot = left0; + } + left = left1 = left0 + 1; + right = right1 = right0; + + for(;;) + { + while( left <= right && !LT(*pivot, *left) ) + { + if( !LT(*left, *pivot) ) + { + if( left > left1 ) + std::swap( *left1, *left ); + swap_cnt = 1; + left1++; + } + left++; + } + + while( left <= right && !LT(*right, *pivot) ) + { + if( !LT(*pivot, *right) ) + { + if( right < right1 ) + std::swap( *right1, *right ); + swap_cnt = 1; + right1--; + } + right--; + } + + if( left > right ) + break; + std::swap( *left, *right ); + swap_cnt = 1; + left++; + right--; + } + + if( swap_cnt == 0 ) + { + left = left0, right = right0; + goto insert_sort; + } + + n = std::min( (int)(left1 - left0), (int)(left - left1) ); + for( i = 0; i < n; i++ ) + std::swap( left0[i], left[i-n] ); + + n = std::min( (int)(right0 - right1), (int)(right1 - right) ); + for( i = 0; i < n; i++ ) + std::swap( left[i], right0[i-n+1] ); + n = (int)(left - left1); + m = (int)(right1 - right); + if( n > 1 ) + { + if( m > 1 ) + { + if( n > m ) + { + stack[++sp].lb = left0; + stack[sp].ub = left0 + n - 1; + left = right0 - m + 1, right = right0; + } + else + { + stack[++sp].lb = right0 - m + 1; + stack[sp].ub = right0; + left = left0, right = left0 + n - 1; + } + } + else + left = left0, right = left0 + n - 1; + } + else if( m > 1 ) + left = right0 - m + 1, right = right0; + else + break; + } + } + } +} + +template class CV_EXPORTS LessThan +{ +public: + bool operator()(const _Tp& a, const _Tp& b) const { return a < b; } +}; + +template class CV_EXPORTS GreaterEq +{ +public: + bool operator()(const _Tp& a, const _Tp& b) const { return a >= b; } +}; + +template class CV_EXPORTS LessThanIdx +{ +public: + LessThanIdx( const _Tp* _arr ) : arr(_arr) {} + bool operator()(int a, int b) const { return arr[a] < arr[b]; } + const _Tp* arr; +}; + +template class CV_EXPORTS GreaterEqIdx +{ +public: + GreaterEqIdx( const _Tp* _arr ) : arr(_arr) {} + bool operator()(int a, int b) const { return arr[a] >= arr[b]; } + const _Tp* arr; +}; + + +// This function splits the input sequence or set into one or more equivalence classes and +// returns the vector of labels - 0-based class indexes for each element. +// predicate(a,b) returns true if the two sequence elements certainly belong to the same class. +// +// The algorithm is described in "Introduction to Algorithms" +// by Cormen, Leiserson and Rivest, the chapter "Data structures for disjoint sets" +template int +partition( const vector<_Tp>& _vec, vector& labels, + _EqPredicate predicate=_EqPredicate()) +{ + int i, j, N = (int)_vec.size(); + const _Tp* vec = &_vec[0]; + + const int PARENT=0; + const int RANK=1; + + vector _nodes(N*2); + int (*nodes)[2] = (int(*)[2])&_nodes[0]; + + // The first O(N) pass: create N single-vertex trees + for(i = 0; i < N; i++) + { + nodes[i][PARENT]=-1; + nodes[i][RANK] = 0; + } + + // The main O(N^2) pass: merge connected components + for( i = 0; i < N; i++ ) + { + int root = i; + + // find root + while( nodes[root][PARENT] >= 0 ) + root = nodes[root][PARENT]; + + for( j = 0; j < N; j++ ) + { + if( i == j || !predicate(vec[i], vec[j])) + continue; + int root2 = j; + + while( nodes[root2][PARENT] >= 0 ) + root2 = nodes[root2][PARENT]; + + if( root2 != root ) + { + // unite both trees + int rank = nodes[root][RANK], rank2 = nodes[root2][RANK]; + if( rank > rank2 ) + nodes[root2][PARENT] = root; + else + { + nodes[root][PARENT] = root2; + nodes[root2][RANK] += rank == rank2; + root = root2; + } + assert( nodes[root][PARENT] < 0 ); + + int k = j, parent; + + // compress the path from node2 to root + while( (parent = nodes[k][PARENT]) >= 0 ) + { + nodes[k][PARENT] = root; + k = parent; + } + + // compress the path from node to root + k = i; + while( (parent = nodes[k][PARENT]) >= 0 ) + { + nodes[k][PARENT] = root; + k = parent; + } + } + } + } + + // Final O(N) pass: enumerate classes + labels.resize(N); + int nclasses = 0; + + for( i = 0; i < N; i++ ) + { + int root = i; + while( nodes[root][PARENT] >= 0 ) + root = nodes[root][PARENT]; + // re-use the rank as the class label + if( nodes[root][RANK] >= 0 ) + nodes[root][RANK] = ~nclasses++; + labels[i] = ~nodes[root][RANK]; + } + + return nclasses; +} + + +////////////////////////////////////////////////////////////////////////////// + +// bridge C++ => C Seq API +CV_EXPORTS schar* seqPush( CvSeq* seq, const void* element=0); +CV_EXPORTS schar* seqPushFront( CvSeq* seq, const void* element=0); +CV_EXPORTS void seqPop( CvSeq* seq, void* element=0); +CV_EXPORTS void seqPopFront( CvSeq* seq, void* element=0); +CV_EXPORTS void seqPopMulti( CvSeq* seq, void* elements, + int count, int in_front=0 ); +CV_EXPORTS void seqRemove( CvSeq* seq, int index ); +CV_EXPORTS void clearSeq( CvSeq* seq ); +CV_EXPORTS schar* getSeqElem( const CvSeq* seq, int index ); +CV_EXPORTS void seqRemoveSlice( CvSeq* seq, CvSlice slice ); +CV_EXPORTS void seqInsertSlice( CvSeq* seq, int before_index, const CvArr* from_arr ); + +template inline Seq<_Tp>::Seq() : seq(0) {} +template inline Seq<_Tp>::Seq( const CvSeq* _seq ) : seq((CvSeq*)_seq) +{ + CV_Assert(!_seq || _seq->elem_size == sizeof(_Tp)); +} + +template inline Seq<_Tp>::Seq( MemStorage& storage, + int headerSize ) +{ + CV_Assert(headerSize >= (int)sizeof(CvSeq)); + seq = cvCreateSeq(DataType<_Tp>::type, headerSize, sizeof(_Tp), storage); +} + +template inline _Tp& Seq<_Tp>::operator [](int idx) +{ return *(_Tp*)getSeqElem(seq, idx); } + +template inline const _Tp& Seq<_Tp>::operator [](int idx) const +{ return *(_Tp*)getSeqElem(seq, idx); } + +template inline SeqIterator<_Tp> Seq<_Tp>::begin() const +{ return SeqIterator<_Tp>(*this); } + +template inline SeqIterator<_Tp> Seq<_Tp>::end() const +{ return SeqIterator<_Tp>(*this, true); } + +template inline size_t Seq<_Tp>::size() const +{ return seq ? seq->total : 0; } + +template inline int Seq<_Tp>::type() const +{ return seq ? CV_MAT_TYPE(seq->flags) : 0; } + +template inline int Seq<_Tp>::depth() const +{ return seq ? CV_MAT_DEPTH(seq->flags) : 0; } + +template inline int Seq<_Tp>::channels() const +{ return seq ? CV_MAT_CN(seq->flags) : 0; } + +template inline size_t Seq<_Tp>::elemSize() const +{ return seq ? seq->elem_size : 0; } + +template inline size_t Seq<_Tp>::index(const _Tp& elem) const +{ return cvSeqElemIdx(seq, &elem); } + +template inline void Seq<_Tp>::push_back(const _Tp& elem) +{ cvSeqPush(seq, &elem); } + +template inline void Seq<_Tp>::push_front(const _Tp& elem) +{ cvSeqPushFront(seq, &elem); } + +template inline void Seq<_Tp>::push_back(const _Tp* elem, size_t count) +{ cvSeqPushMulti(seq, elem, (int)count, 0); } + +template inline void Seq<_Tp>::push_front(const _Tp* elem, size_t count) +{ cvSeqPushMulti(seq, elem, (int)count, 1); } + +template inline _Tp& Seq<_Tp>::back() +{ return *(_Tp*)getSeqElem(seq, -1); } + +template inline const _Tp& Seq<_Tp>::back() const +{ return *(const _Tp*)getSeqElem(seq, -1); } + +template inline _Tp& Seq<_Tp>::front() +{ return *(_Tp*)getSeqElem(seq, 0); } + +template inline const _Tp& Seq<_Tp>::front() const +{ return *(const _Tp*)getSeqElem(seq, 0); } + +template inline bool Seq<_Tp>::empty() const +{ return !seq || seq->total == 0; } + +template inline void Seq<_Tp>::clear() +{ if(seq) clearSeq(seq); } + +template inline void Seq<_Tp>::pop_back() +{ seqPop(seq); } + +template inline void Seq<_Tp>::pop_front() +{ seqPopFront(seq); } + +template inline void Seq<_Tp>::pop_back(_Tp* elem, size_t count) +{ seqPopMulti(seq, elem, (int)count, 0); } + +template inline void Seq<_Tp>::pop_front(_Tp* elem, size_t count) +{ seqPopMulti(seq, elem, (int)count, 1); } + +template inline void Seq<_Tp>::insert(int idx, const _Tp& elem) +{ seqInsert(seq, idx, &elem); } + +template inline void Seq<_Tp>::insert(int idx, const _Tp* elems, size_t count) +{ + CvMat m = cvMat(1, count, DataType<_Tp>::type, elems); + seqInsertSlice(seq, idx, &m); +} + +template inline void Seq<_Tp>::remove(int idx) +{ seqRemove(seq, idx); } + +template inline void Seq<_Tp>::remove(const Range& r) +{ seqRemoveSlice(seq, r); } + +template inline void Seq<_Tp>::copyTo(vector<_Tp>& vec, const Range& range) const +{ + size_t len = !seq ? 0 : range == Range::all() ? seq->total : range.end - range.start; + vec.resize(len); + if( seq && len ) + cvCvtSeqToArray(seq, &vec[0], range); +} + +template inline Seq<_Tp>::operator vector<_Tp>() const +{ + vector<_Tp> vec; + copyTo(vec); + return vec; +} + +template inline SeqIterator<_Tp>::SeqIterator() +{ memset(this, 0, sizeof(*this)); } + +template inline SeqIterator<_Tp>::SeqIterator(const Seq<_Tp>& seq, bool seekEnd) +{ + cvStartReadSeq(seq.seq, this); + index = seekEnd ? seq.seq->total : 0; +} + +template inline void SeqIterator<_Tp>::seek(size_t pos) +{ + cvSetSeqReaderPos(this, (int)pos, false); + index = pos; +} + +template inline size_t SeqIterator<_Tp>::tell() const +{ return index; } + +template inline _Tp& SeqIterator<_Tp>::operator *() +{ return *(_Tp*)ptr; } + +template inline const _Tp& SeqIterator<_Tp>::operator *() const +{ return *(const _Tp*)ptr; } + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator ++() +{ + CV_NEXT_SEQ_ELEM(sizeof(_Tp), *this); + if( ++index >= seq->total*2 ) + index = 0; + return *this; +} + +template inline SeqIterator<_Tp> SeqIterator<_Tp>::operator ++(int) const +{ + SeqIterator<_Tp> it = *this; + ++*this; + return it; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator --() +{ + CV_PREV_SEQ_ELEM(sizeof(_Tp), *this); + if( --index < 0 ) + index = seq->total*2-1; + return *this; +} + +template inline SeqIterator<_Tp> SeqIterator<_Tp>::operator --(int) const +{ + SeqIterator<_Tp> it = *this; + --*this; + return it; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator +=(int delta) +{ + cvSetSeqReaderPos(this, delta, 1); + index += delta; + int n = seq->total*2; + if( index < 0 ) + index += n; + if( index >= n ) + index -= n; + return *this; +} + +template inline SeqIterator<_Tp>& SeqIterator<_Tp>::operator -=(int delta) +{ + return (*this += -delta); +} + +template inline ptrdiff_t operator - (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + ptrdiff_t delta = a.index - b.index, n = a.seq->total; + if( std::abs(static_cast(delta)) > n ) + delta += delta < 0 ? n : -n; + return delta; +} + +template inline bool operator == (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + return a.seq == b.seq && a.index == b.index; +} + +template inline bool operator != (const SeqIterator<_Tp>& a, + const SeqIterator<_Tp>& b) +{ + return !(a == b); +} + + +template struct CV_EXPORTS RTTIImpl +{ +public: + static int isInstance(const void* ptr) + { + static _ClsName dummy; + union + { + const void* p; + const void** pp; + } a, b; + a.p = &dummy; + b.p = ptr; + return *a.pp == *b.pp; + } + static void release(void** dbptr) + { + if(dbptr && *dbptr) + { + delete (_ClsName*)*dbptr; + *dbptr = 0; + } + } + static void* read(CvFileStorage* fs, CvFileNode* n) + { + FileNode fn(fs, n); + _ClsName* obj = new _ClsName; + if(obj->read(fn)) + return obj; + delete obj; + return 0; + } + + static void write(CvFileStorage* _fs, const char* name, const void* ptr, CvAttrList) + { + if(ptr && _fs) + { + FileStorage fs(_fs); + fs.fs.addref(); + ((const _ClsName*)ptr)->write(fs, string(name)); + } + } + + static void* clone(const void* ptr) + { + if(!ptr) + return 0; + return new _ClsName(*(const _ClsName*)ptr); + } +}; + + +class CV_EXPORTS Formatter +{ +public: + virtual ~Formatter() {} + virtual void write(std::ostream& out, const Mat& m, const int* params=0, int nparams=0) const = 0; + virtual void write(std::ostream& out, const void* data, int nelems, int type, + const int* params=0, int nparams=0) const = 0; + static const Formatter* get(const char* fmt=""); + static const Formatter* setDefault(const Formatter* fmt); +}; + + +struct CV_EXPORTS Formatted +{ + Formatted(const Mat& m, const Formatter* fmt, + const vector& params); + Formatted(const Mat& m, const Formatter* fmt, + const int* params=0); + Mat mtx; + const Formatter* fmt; + vector params; +}; + + +/** Writes a point to an output stream in Matlab notation + */ +template inline std::ostream& operator<<(std::ostream& out, const Point_<_Tp>& p) +{ + out << "[" << p.x << ", " << p.y << "]"; + return out; +} + +/** Writes a point to an output stream in Matlab notation + */ +template inline std::ostream& operator<<(std::ostream& out, const Point3_<_Tp>& p) +{ + out << "[" << p.x << ", " << p.y << ", " << p.z << "]"; + return out; +} + +static inline Formatted format(const Mat& mtx, const char* fmt, + const vector& params=vector()) +{ + return Formatted(mtx, Formatter::get(fmt), params); +} + +template static inline Formatted format(const vector >& vec, + const char* fmt, const vector& params=vector()) +{ + return Formatted(Mat(vec), Formatter::get(fmt), params); +} + +template static inline Formatted format(const vector >& vec, + const char* fmt, const vector& params=vector()) +{ + return Formatted(Mat(vec), Formatter::get(fmt), params); +} + +/** \brief prints Mat to the output stream in Matlab notation + * use like + @verbatim + Mat my_mat = Mat::eye(3,3,CV_32F); + std::cout << my_mat; + @endverbatim + */ +static inline std::ostream& operator << (std::ostream& out, const Mat& mtx) +{ + Formatter::get()->write(out, mtx); + return out; +} + +/** \brief prints Mat to the output stream allows in the specified notation (see format) + * use like + @verbatim + Mat my_mat = Mat::eye(3,3,CV_32F); + std::cout << my_mat; + @endverbatim + */ +static inline std::ostream& operator << (std::ostream& out, const Formatted& fmtd) +{ + fmtd.fmt->write(out, fmtd.mtx); + return out; +} + + +template static inline std::ostream& operator << (std::ostream& out, + const vector >& vec) +{ + Formatter::get()->write(out, Mat(vec)); + return out; +} + + +template static inline std::ostream& operator << (std::ostream& out, + const vector >& vec) +{ + Formatter::get()->write(out, Mat(vec)); + return out; +} + +/*template struct AlgorithmParamType {}; +template<> struct AlgorithmParamType { enum { type = CV_PARAM_TYPE_INT }; }; +template<> struct AlgorithmParamType { enum { type = CV_PARAM_TYPE_REAL }; }; +template<> struct AlgorithmParamType { enum { type = CV_PARAM_TYPE_STRING }; }; +template<> struct AlgorithmParamType { enum { type = CV_PARAM_TYPE_MAT }; }; + +template _Tp Algorithm::get(int paramId) const +{ + _Tp value = _Tp(); + get_(paramId, AlgorithmParamType<_Tp>::type, &value); + return value; +} + +template bool Algorithm::set(int paramId, const _Tp& value) +{ + set_(paramId, AlgorithmParamType<_Tp>::type, &value); + return value; +} + +template _Tp Algorithm::paramDefaultValue(int paramId) const +{ + _Tp value = _Tp(); + paramDefaultValue_(paramId, AlgorithmParamType<_Tp>::type, &value); + return value; +} + +template bool Algorithm::paramRange(int paramId, _Tp& minVal, _Tp& maxVal) const +{ + return paramRange_(paramId, AlgorithmParamType<_Tp>::type, &minVal, &maxVal); +} + +template void Algorithm::addParam(int propId, _Tp& value, bool readOnly, const string& name, + const string& help, const _Tp& defaultValue, + _Tp (Algorithm::*getter)(), bool (Algorithm::*setter)(const _Tp&)) +{ + addParam_(propId, AlgorithmParamType<_Tp>::type, &value, readOnly, name, help, &defaultValue, + (void*)getter, (void*)setter); +} + +template void Algorithm::setParamRange(int propId, const _Tp& minVal, const _Tp& maxVal) +{ + setParamRange_(propId, AlgorithmParamType<_Tp>::type, &minVal, &maxVal); +}*/ + +} + +#endif // __cplusplus +#endif diff --git a/opencv2/core/types_c.h b/opencv2/core/types_c.h new file mode 100644 index 0000000..68164fa --- /dev/null +++ b/opencv2/core/types_c.h @@ -0,0 +1,1875 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_CORE_TYPES_H__ +#define __OPENCV_CORE_TYPES_H__ + +#if !defined _CRT_SECURE_NO_DEPRECATE && _MSC_VER > 1300 +#define _CRT_SECURE_NO_DEPRECATE /* to avoid multiple Visual Studio 2005 warnings */ +#endif + + +#ifndef SKIP_INCLUDES + #include + #include + #include + #include + +#if !defined _MSC_VER && !defined __BORLANDC__ + #include +#endif + + #if defined __ICL + #define CV_ICC __ICL + #elif defined __ICC + #define CV_ICC __ICC + #elif defined __ECL + #define CV_ICC __ECL + #elif defined __ECC + #define CV_ICC __ECC + #elif defined __INTEL_COMPILER + #define CV_ICC __INTEL_COMPILER + #endif + + #if (_MSC_VER >= 1400 && defined _M_X64) || (__GNUC__ >= 4 && defined __x86_64__) + #if defined WIN32 + #include + #endif + #include + #endif + + #if defined __BORLANDC__ + #include + #else + #include + #endif + + #ifdef HAVE_IPL + #ifndef __IPL_H__ + #if defined WIN32 || defined _WIN32 + #include + #else + #include + #endif + #endif + #elif defined __IPL_H__ + #define HAVE_IPL + #endif +#endif // SKIP_INCLUDES + +#if defined WIN32 || defined _WIN32 + #define CV_CDECL __cdecl + #define CV_STDCALL __stdcall +#else + #define CV_CDECL + #define CV_STDCALL +#endif + +#ifndef CV_EXTERN_C + #ifdef __cplusplus + #define CV_EXTERN_C extern "C" + #define CV_DEFAULT(val) = val + #else + #define CV_EXTERN_C + #define CV_DEFAULT(val) + #endif +#endif + +#ifndef CV_EXTERN_C_FUNCPTR + #ifdef __cplusplus + #define CV_EXTERN_C_FUNCPTR(x) extern "C" { typedef x; } + #else + #define CV_EXTERN_C_FUNCPTR(x) typedef x + #endif +#endif + +#ifndef CV_INLINE +#if defined __cplusplus + #define CV_INLINE inline +#elif (defined WIN32 || defined _WIN32 || defined WINCE) && !defined __GNUC__ + #define CV_INLINE __inline +#else + #define CV_INLINE static +#endif +#endif /* CV_INLINE */ + +#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined CVAPI_EXPORTS + #define CV_EXPORTS __declspec(dllexport) +#else + #define CV_EXPORTS +#endif + +#ifndef CVAPI + #define CVAPI(rettype) CV_EXTERN_C CV_EXPORTS rettype CV_CDECL +#endif + +#if defined _MSC_VER || defined __BORLANDC__ +typedef __int64 int64; +typedef unsigned __int64 uint64; +#define CV_BIG_INT(n) n##I64 +#define CV_BIG_UINT(n) n##UI64 +#else +typedef int64_t int64; +typedef uint64_t uint64; +#define CV_BIG_INT(n) n##LL +#define CV_BIG_UINT(n) n##ULL +#endif + +#ifndef HAVE_IPL +typedef unsigned char uchar; +typedef unsigned short ushort; +#endif + +typedef signed char schar; + +/* special informative macros for wrapper generators */ +#define CV_CARRAY(counter) +#define CV_CUSTOM_CARRAY(args) +#define CV_EXPORTS_W CV_EXPORTS +#define CV_EXPORTS_W_SIMPLE CV_EXPORTS +#define CV_EXPORTS_AS(synonym) CV_EXPORTS +#define CV_EXPORTS_W_MAP CV_EXPORTS +#define CV_IN_OUT +#define CV_OUT +#define CV_PROP +#define CV_PROP_RW +#define CV_WRAP +#define CV_WRAP_AS(synonym) +#define CV_WRAP_DEFAULT(value) + +/* CvArr* is used to pass arbitrary + * array-like data structures + * into functions where the particular + * array type is recognized at runtime: + */ +typedef void CvArr; + +typedef union Cv32suf +{ + int i; + unsigned u; + float f; +} +Cv32suf; + +typedef union Cv64suf +{ + int64 i; + uint64 u; + double f; +} +Cv64suf; + +typedef int CVStatus; + +enum { + CV_StsOk= 0, /* everithing is ok */ + CV_StsBackTrace= -1, /* pseudo error for back trace */ + CV_StsError= -2, /* unknown /unspecified error */ + CV_StsInternal= -3, /* internal error (bad state) */ + CV_StsNoMem= -4, /* insufficient memory */ + CV_StsBadArg= -5, /* function arg/param is bad */ + CV_StsBadFunc= -6, /* unsupported function */ + CV_StsNoConv= -7, /* iter. didn't converge */ + CV_StsAutoTrace= -8, /* tracing */ + CV_HeaderIsNull= -9, /* image header is NULL */ + CV_BadImageSize= -10, /* image size is invalid */ + CV_BadOffset= -11, /* offset is invalid */ + CV_BadDataPtr= -12, /**/ + CV_BadStep= -13, /**/ + CV_BadModelOrChSeq= -14, /**/ + CV_BadNumChannels= -15, /**/ + CV_BadNumChannel1U= -16, /**/ + CV_BadDepth= -17, /**/ + CV_BadAlphaChannel= -18, /**/ + CV_BadOrder= -19, /**/ + CV_BadOrigin= -20, /**/ + CV_BadAlign= -21, /**/ + CV_BadCallBack= -22, /**/ + CV_BadTileSize= -23, /**/ + CV_BadCOI= -24, /**/ + CV_BadROISize= -25, /**/ + CV_MaskIsTiled= -26, /**/ + CV_StsNullPtr= -27, /* null pointer */ + CV_StsVecLengthErr= -28, /* incorrect vector length */ + CV_StsFilterStructContentErr= -29, /* incorr. filter structure content */ + CV_StsKernelStructContentErr= -30, /* incorr. transform kernel content */ + CV_StsFilterOffsetErr= -31, /* incorrect filter ofset value */ + CV_StsBadSize= -201, /* the input/output structure size is incorrect */ + CV_StsDivByZero= -202, /* division by zero */ + CV_StsInplaceNotSupported= -203, /* in-place operation is not supported */ + CV_StsObjectNotFound= -204, /* request can't be completed */ + CV_StsUnmatchedFormats= -205, /* formats of input/output arrays differ */ + CV_StsBadFlag= -206, /* flag is wrong or not supported */ + CV_StsBadPoint= -207, /* bad CvPoint */ + CV_StsBadMask= -208, /* bad format of mask (neither 8uC1 nor 8sC1)*/ + CV_StsUnmatchedSizes= -209, /* sizes of input/output structures do not match */ + CV_StsUnsupportedFormat= -210, /* the data format/type is not supported by the function*/ + CV_StsOutOfRange= -211, /* some of parameters are out of range */ + CV_StsParseError= -212, /* invalid syntax/structure of the parsed file */ + CV_StsNotImplemented= -213, /* the requested function/feature is not implemented */ + CV_StsBadMemBlock= -214, /* an allocated block has been corrupted */ + CV_StsAssert= -215, /* assertion failed */ + CV_GpuNotSupported= -216, + CV_GpuApiCallError= -217, + CV_GpuNppCallError= -218, + CV_GpuCufftCallError= -219 +}; + +/****************************************************************************************\ +* Common macros and inline functions * +\****************************************************************************************/ + +#define CV_PI 3.1415926535897932384626433832795 +#define CV_LOG2 0.69314718055994530941723212145818 + +#define CV_SWAP(a,b,t) ((t) = (a), (a) = (b), (b) = (t)) + +#ifndef MIN +#define MIN(a,b) ((a) > (b) ? (b) : (a)) +#endif + +#ifndef MAX +#define MAX(a,b) ((a) < (b) ? (b) : (a)) +#endif + +/* min & max without jumps */ +#define CV_IMIN(a, b) ((a) ^ (((a)^(b)) & (((a) < (b)) - 1))) + +#define CV_IMAX(a, b) ((a) ^ (((a)^(b)) & (((a) > (b)) - 1))) + +/* absolute value without jumps */ +#ifndef __cplusplus +#define CV_IABS(a) (((a) ^ ((a) < 0 ? -1 : 0)) - ((a) < 0 ? -1 : 0)) +#else +#define CV_IABS(a) abs(a) +#endif +#define CV_CMP(a,b) (((a) > (b)) - ((a) < (b))) +#define CV_SIGN(a) CV_CMP((a),0) + +CV_INLINE int cvRound( double value ) +{ +#if (defined _MSC_VER && defined _M_X64) || (defined __GNUC__ && defined __x86_64__ && !defined __APPLE__) + __m128d t = _mm_set_sd( value ); + return _mm_cvtsd_si32(t); +#elif defined _MSC_VER && defined _M_IX86 + int t; + __asm + { + fld value; + fistp t; + } + return t; +#elif defined HAVE_LRINT || defined CV_ICC || defined __GNUC__ + return (int)lrint(value); +#else + // while this is not IEEE754-compliant rounding, it's usually a good enough approximation + return (int)(value + (value >= 0 ? 0.5 : -0.5)); +#endif +} + + +CV_INLINE int cvFloor( double value ) +{ +#ifdef __GNUC__ + int i = (int)value; + return i - (i > value); +#elif defined _MSC_VER && defined _M_X64 + __m128d t = _mm_set_sd( value ); + int i = _mm_cvtsd_si32(t); + return i - _mm_movemask_pd(_mm_cmplt_sd(t, _mm_cvtsi32_sd(t,i))); +#else + int i = cvRound(value); + Cv32suf diff; + diff.f = (float)(value - i); + return i - (diff.i < 0); +#endif +} + + +CV_INLINE int cvCeil( double value ) +{ +#ifdef __GNUC__ + int i = (int)value; + return i + (i < value); +#elif defined _MSC_VER && defined _M_X64 + __m128d t = _mm_set_sd( value ); + int i = _mm_cvtsd_si32(t); + return i + _mm_movemask_pd(_mm_cmplt_sd(_mm_cvtsi32_sd(t,i), t)); +#else + int i = cvRound(value); + Cv32suf diff; + diff.f = (float)(i - value); + return i + (diff.i < 0); +#endif +} + +#define cvInvSqrt(value) ((float)(1./sqrt(value))) +#define cvSqrt(value) ((float)sqrt(value)) + +CV_INLINE int cvIsNaN( double value ) +{ +#if 1/*defined _MSC_VER || defined __BORLANDC__ + return _isnan(value); +#elif defined __GNUC__ + return isnan(value); +#else*/ + Cv64suf ieee754; + ieee754.f = value; + return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) + + ((unsigned)ieee754.u != 0) > 0x7ff00000; +#endif +} + + +CV_INLINE int cvIsInf( double value ) +{ +#if 1/*defined _MSC_VER || defined __BORLANDC__ + return !_finite(value); +#elif defined __GNUC__ + return isinf(value); +#else*/ + Cv64suf ieee754; + ieee754.f = value; + return ((unsigned)(ieee754.u >> 32) & 0x7fffffff) == 0x7ff00000 && + (unsigned)ieee754.u == 0; +#endif +} + + +/*************** Random number generation *******************/ + +typedef uint64 CvRNG; + +#define CV_RNG_COEFF 4164903690U + +CV_INLINE CvRNG cvRNG( int64 seed CV_DEFAULT(-1)) +{ + CvRNG rng = seed ? (uint64)seed : (uint64)(int64)-1; + return rng; +} + +/* Return random 32-bit unsigned integer: */ +CV_INLINE unsigned cvRandInt( CvRNG* rng ) +{ + uint64 temp = *rng; + temp = (uint64)(unsigned)temp*CV_RNG_COEFF + (temp >> 32); + *rng = temp; + return (unsigned)temp; +} + +/* Returns random floating-point number between 0 and 1: */ +CV_INLINE double cvRandReal( CvRNG* rng ) +{ + return cvRandInt(rng)*2.3283064365386962890625e-10 /* 2^-32 */; +} + +/****************************************************************************************\ +* Image type (IplImage) * +\****************************************************************************************/ + +#ifndef HAVE_IPL + +/* + * The following definitions (until #endif) + * is an extract from IPL headers. + * Copyright (c) 1995 Intel Corporation. + */ +#define IPL_DEPTH_SIGN 0x80000000 + +#define IPL_DEPTH_1U 1 +#define IPL_DEPTH_8U 8 +#define IPL_DEPTH_16U 16 +#define IPL_DEPTH_32F 32 + +#define IPL_DEPTH_8S (IPL_DEPTH_SIGN| 8) +#define IPL_DEPTH_16S (IPL_DEPTH_SIGN|16) +#define IPL_DEPTH_32S (IPL_DEPTH_SIGN|32) + +#define IPL_DATA_ORDER_PIXEL 0 +#define IPL_DATA_ORDER_PLANE 1 + +#define IPL_ORIGIN_TL 0 +#define IPL_ORIGIN_BL 1 + +#define IPL_ALIGN_4BYTES 4 +#define IPL_ALIGN_8BYTES 8 +#define IPL_ALIGN_16BYTES 16 +#define IPL_ALIGN_32BYTES 32 + +#define IPL_ALIGN_DWORD IPL_ALIGN_4BYTES +#define IPL_ALIGN_QWORD IPL_ALIGN_8BYTES + +#define IPL_BORDER_CONSTANT 0 +#define IPL_BORDER_REPLICATE 1 +#define IPL_BORDER_REFLECT 2 +#define IPL_BORDER_WRAP 3 + +typedef struct _IplImage +{ + int nSize; /* sizeof(IplImage) */ + int ID; /* version (=0)*/ + int nChannels; /* Most of OpenCV functions support 1,2,3 or 4 channels */ + int alphaChannel; /* Ignored by OpenCV */ + int depth; /* Pixel depth in bits: IPL_DEPTH_8U, IPL_DEPTH_8S, IPL_DEPTH_16S, + IPL_DEPTH_32S, IPL_DEPTH_32F and IPL_DEPTH_64F are supported. */ + char colorModel[4]; /* Ignored by OpenCV */ + char channelSeq[4]; /* ditto */ + int dataOrder; /* 0 - interleaved color channels, 1 - separate color channels. + cvCreateImage can only create interleaved images */ + int origin; /* 0 - top-left origin, + 1 - bottom-left origin (Windows bitmaps style). */ + int align; /* Alignment of image rows (4 or 8). + OpenCV ignores it and uses widthStep instead. */ + int width; /* Image width in pixels. */ + int height; /* Image height in pixels. */ + struct _IplROI *roi; /* Image ROI. If NULL, the whole image is selected. */ + struct _IplImage *maskROI; /* Must be NULL. */ + void *imageId; /* " " */ + struct _IplTileInfo *tileInfo; /* " " */ + int imageSize; /* Image data size in bytes + (==image->height*image->widthStep + in case of interleaved data)*/ + char *imageData; /* Pointer to aligned image data. */ + int widthStep; /* Size of aligned image row in bytes. */ + int BorderMode[4]; /* Ignored by OpenCV. */ + int BorderConst[4]; /* Ditto. */ + char *imageDataOrigin; /* Pointer to very origin of image data + (not necessarily aligned) - + needed for correct deallocation */ +} +IplImage; + +typedef struct _IplTileInfo IplTileInfo; + +typedef struct _IplROI +{ + int coi; /* 0 - no COI (all channels are selected), 1 - 0th channel is selected ...*/ + int xOffset; + int yOffset; + int width; + int height; +} +IplROI; + +typedef struct _IplConvKernel +{ + int nCols; + int nRows; + int anchorX; + int anchorY; + int *values; + int nShiftR; +} +IplConvKernel; + +typedef struct _IplConvKernelFP +{ + int nCols; + int nRows; + int anchorX; + int anchorY; + float *values; +} +IplConvKernelFP; + +#define IPL_IMAGE_HEADER 1 +#define IPL_IMAGE_DATA 2 +#define IPL_IMAGE_ROI 4 + +#endif/*HAVE_IPL*/ + +/* extra border mode */ +#define IPL_BORDER_REFLECT_101 4 +#define IPL_BORDER_TRANSPARENT 5 + +#define IPL_IMAGE_MAGIC_VAL ((int)sizeof(IplImage)) +#define CV_TYPE_NAME_IMAGE "opencv-image" + +#define CV_IS_IMAGE_HDR(img) \ + ((img) != NULL && ((const IplImage*)(img))->nSize == sizeof(IplImage)) + +#define CV_IS_IMAGE(img) \ + (CV_IS_IMAGE_HDR(img) && ((IplImage*)img)->imageData != NULL) + +/* for storing double-precision + floating point data in IplImage's */ +#define IPL_DEPTH_64F 64 + +/* get reference to pixel at (col,row), + for multi-channel images (col) should be multiplied by number of channels */ +#define CV_IMAGE_ELEM( image, elemtype, row, col ) \ + (((elemtype*)((image)->imageData + (image)->widthStep*(row)))[(col)]) + +/****************************************************************************************\ +* Matrix type (CvMat) * +\****************************************************************************************/ + +#define CV_CN_MAX 512 +#define CV_CN_SHIFT 3 +#define CV_DEPTH_MAX (1 << CV_CN_SHIFT) + +#define CV_8U 0 +#define CV_8S 1 +#define CV_16U 2 +#define CV_16S 3 +#define CV_32S 4 +#define CV_32F 5 +#define CV_64F 6 +#define CV_USRTYPE1 7 + +#define CV_MAT_DEPTH_MASK (CV_DEPTH_MAX - 1) +#define CV_MAT_DEPTH(flags) ((flags) & CV_MAT_DEPTH_MASK) + +#define CV_MAKETYPE(depth,cn) (CV_MAT_DEPTH(depth) + (((cn)-1) << CV_CN_SHIFT)) +#define CV_MAKE_TYPE CV_MAKETYPE + +#define CV_8UC1 CV_MAKETYPE(CV_8U,1) +#define CV_8UC2 CV_MAKETYPE(CV_8U,2) +#define CV_8UC3 CV_MAKETYPE(CV_8U,3) +#define CV_8UC4 CV_MAKETYPE(CV_8U,4) +#define CV_8UC(n) CV_MAKETYPE(CV_8U,(n)) + +#define CV_8SC1 CV_MAKETYPE(CV_8S,1) +#define CV_8SC2 CV_MAKETYPE(CV_8S,2) +#define CV_8SC3 CV_MAKETYPE(CV_8S,3) +#define CV_8SC4 CV_MAKETYPE(CV_8S,4) +#define CV_8SC(n) CV_MAKETYPE(CV_8S,(n)) + +#define CV_16UC1 CV_MAKETYPE(CV_16U,1) +#define CV_16UC2 CV_MAKETYPE(CV_16U,2) +#define CV_16UC3 CV_MAKETYPE(CV_16U,3) +#define CV_16UC4 CV_MAKETYPE(CV_16U,4) +#define CV_16UC(n) CV_MAKETYPE(CV_16U,(n)) + +#define CV_16SC1 CV_MAKETYPE(CV_16S,1) +#define CV_16SC2 CV_MAKETYPE(CV_16S,2) +#define CV_16SC3 CV_MAKETYPE(CV_16S,3) +#define CV_16SC4 CV_MAKETYPE(CV_16S,4) +#define CV_16SC(n) CV_MAKETYPE(CV_16S,(n)) + +#define CV_32SC1 CV_MAKETYPE(CV_32S,1) +#define CV_32SC2 CV_MAKETYPE(CV_32S,2) +#define CV_32SC3 CV_MAKETYPE(CV_32S,3) +#define CV_32SC4 CV_MAKETYPE(CV_32S,4) +#define CV_32SC(n) CV_MAKETYPE(CV_32S,(n)) + +#define CV_32FC1 CV_MAKETYPE(CV_32F,1) +#define CV_32FC2 CV_MAKETYPE(CV_32F,2) +#define CV_32FC3 CV_MAKETYPE(CV_32F,3) +#define CV_32FC4 CV_MAKETYPE(CV_32F,4) +#define CV_32FC(n) CV_MAKETYPE(CV_32F,(n)) + +#define CV_64FC1 CV_MAKETYPE(CV_64F,1) +#define CV_64FC2 CV_MAKETYPE(CV_64F,2) +#define CV_64FC3 CV_MAKETYPE(CV_64F,3) +#define CV_64FC4 CV_MAKETYPE(CV_64F,4) +#define CV_64FC(n) CV_MAKETYPE(CV_64F,(n)) + +#define CV_AUTO_STEP 0x7fffffff +#define CV_WHOLE_ARR cvSlice( 0, 0x3fffffff ) + +#define CV_MAT_CN_MASK ((CV_CN_MAX - 1) << CV_CN_SHIFT) +#define CV_MAT_CN(flags) ((((flags) & CV_MAT_CN_MASK) >> CV_CN_SHIFT) + 1) +#define CV_MAT_TYPE_MASK (CV_DEPTH_MAX*CV_CN_MAX - 1) +#define CV_MAT_TYPE(flags) ((flags) & CV_MAT_TYPE_MASK) +#define CV_MAT_CONT_FLAG_SHIFT 14 +#define CV_MAT_CONT_FLAG (1 << CV_MAT_CONT_FLAG_SHIFT) +#define CV_IS_MAT_CONT(flags) ((flags) & CV_MAT_CONT_FLAG) +#define CV_IS_CONT_MAT CV_IS_MAT_CONT +#define CV_SUBMAT_FLAG_SHIFT 15 +#define CV_SUBMAT_FLAG (1 << CV_SUBMAT_FLAG_SHIFT) +#define CV_IS_SUBMAT(flags) ((flags) & CV_MAT_SUBMAT_FLAG) + +#define CV_MAGIC_MASK 0xFFFF0000 +#define CV_MAT_MAGIC_VAL 0x42420000 +#define CV_TYPE_NAME_MAT "opencv-matrix" + +typedef struct CvMat +{ + int type; + int step; + + /* for internal use only */ + int* refcount; + int hdr_refcount; + + union + { + uchar* ptr; + short* s; + int* i; + float* fl; + double* db; + } data; + +#ifdef __cplusplus + union + { + int rows; + int height; + }; + + union + { + int cols; + int width; + }; +#else + int rows; + int cols; +#endif + +} +CvMat; + + +#define CV_IS_MAT_HDR(mat) \ + ((mat) != NULL && \ + (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \ + ((const CvMat*)(mat))->cols > 0 && ((const CvMat*)(mat))->rows > 0) + +#define CV_IS_MAT_HDR_Z(mat) \ + ((mat) != NULL && \ + (((const CvMat*)(mat))->type & CV_MAGIC_MASK) == CV_MAT_MAGIC_VAL && \ + ((const CvMat*)(mat))->cols >= 0 && ((const CvMat*)(mat))->rows >= 0) + +#define CV_IS_MAT(mat) \ + (CV_IS_MAT_HDR(mat) && ((const CvMat*)(mat))->data.ptr != NULL) + +#define CV_IS_MASK_ARR(mat) \ + (((mat)->type & (CV_MAT_TYPE_MASK & ~CV_8SC1)) == 0) + +#define CV_ARE_TYPES_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_TYPE_MASK) == 0) + +#define CV_ARE_CNS_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_CN_MASK) == 0) + +#define CV_ARE_DEPTHS_EQ(mat1, mat2) \ + ((((mat1)->type ^ (mat2)->type) & CV_MAT_DEPTH_MASK) == 0) + +#define CV_ARE_SIZES_EQ(mat1, mat2) \ + ((mat1)->rows == (mat2)->rows && (mat1)->cols == (mat2)->cols) + +#define CV_IS_MAT_CONST(mat) \ + (((mat)->rows|(mat)->cols) == 1) + +/* Size of each channel item, + 0x124489 = 1000 0100 0100 0010 0010 0001 0001 ~ array of sizeof(arr_type_elem) */ +#define CV_ELEM_SIZE1(type) \ + ((((sizeof(size_t)<<28)|0x8442211) >> CV_MAT_DEPTH(type)*4) & 15) + +/* 0x3a50 = 11 10 10 01 01 00 00 ~ array of log2(sizeof(arr_type_elem)) */ +#define CV_ELEM_SIZE(type) \ + (CV_MAT_CN(type) << ((((sizeof(size_t)/4+1)*16384|0x3a50) >> CV_MAT_DEPTH(type)*2) & 3)) + +#define IPL2CV_DEPTH(depth) \ + ((((CV_8U)+(CV_16U<<4)+(CV_32F<<8)+(CV_64F<<16)+(CV_8S<<20)+ \ + (CV_16S<<24)+(CV_32S<<28)) >> ((((depth) & 0xF0) >> 2) + \ + (((depth) & IPL_DEPTH_SIGN) ? 20 : 0))) & 15) + +/* Inline constructor. No data is allocated internally!!! + * (Use together with cvCreateData, or use cvCreateMat instead to + * get a matrix with allocated data): + */ +CV_INLINE CvMat cvMat( int rows, int cols, int type, void* data CV_DEFAULT(NULL)) +{ + CvMat m; + + assert( (unsigned)CV_MAT_DEPTH(type) <= CV_64F ); + type = CV_MAT_TYPE(type); + m.type = CV_MAT_MAGIC_VAL | CV_MAT_CONT_FLAG | type; + m.cols = cols; + m.rows = rows; + m.step = m.cols*CV_ELEM_SIZE(type); + m.data.ptr = (uchar*)data; + m.refcount = NULL; + m.hdr_refcount = 0; + + return m; +} + + +#define CV_MAT_ELEM_PTR_FAST( mat, row, col, pix_size ) \ + (assert( (unsigned)(row) < (unsigned)(mat).rows && \ + (unsigned)(col) < (unsigned)(mat).cols ), \ + (mat).data.ptr + (size_t)(mat).step*(row) + (pix_size)*(col)) + +#define CV_MAT_ELEM_PTR( mat, row, col ) \ + CV_MAT_ELEM_PTR_FAST( mat, row, col, CV_ELEM_SIZE((mat).type) ) + +#define CV_MAT_ELEM( mat, elemtype, row, col ) \ + (*(elemtype*)CV_MAT_ELEM_PTR_FAST( mat, row, col, sizeof(elemtype))) + + +CV_INLINE double cvmGet( const CvMat* mat, int row, int col ) +{ + int type; + + type = CV_MAT_TYPE(mat->type); + assert( (unsigned)row < (unsigned)mat->rows && + (unsigned)col < (unsigned)mat->cols ); + + if( type == CV_32FC1 ) + return ((float*)(mat->data.ptr + (size_t)mat->step*row))[col]; + else + { + assert( type == CV_64FC1 ); + return ((double*)(mat->data.ptr + (size_t)mat->step*row))[col]; + } +} + + +CV_INLINE void cvmSet( CvMat* mat, int row, int col, double value ) +{ + int type; + type = CV_MAT_TYPE(mat->type); + assert( (unsigned)row < (unsigned)mat->rows && + (unsigned)col < (unsigned)mat->cols ); + + if( type == CV_32FC1 ) + ((float*)(mat->data.ptr + (size_t)mat->step*row))[col] = (float)value; + else + { + assert( type == CV_64FC1 ); + ((double*)(mat->data.ptr + (size_t)mat->step*row))[col] = (double)value; + } +} + + +CV_INLINE int cvIplDepth( int type ) +{ + int depth = CV_MAT_DEPTH(type); + return CV_ELEM_SIZE1(depth)*8 | (depth == CV_8S || depth == CV_16S || + depth == CV_32S ? IPL_DEPTH_SIGN : 0); +} + + +/****************************************************************************************\ +* Multi-dimensional dense array (CvMatND) * +\****************************************************************************************/ + +#define CV_MATND_MAGIC_VAL 0x42430000 +#define CV_TYPE_NAME_MATND "opencv-nd-matrix" + +#define CV_MAX_DIM 32 +#define CV_MAX_DIM_HEAP 1024 + +typedef struct CvMatND +{ + int type; + int dims; + + int* refcount; + int hdr_refcount; + + union + { + uchar* ptr; + float* fl; + double* db; + int* i; + short* s; + } data; + + struct + { + int size; + int step; + } + dim[CV_MAX_DIM]; +} +CvMatND; + +#define CV_IS_MATND_HDR(mat) \ + ((mat) != NULL && (((const CvMatND*)(mat))->type & CV_MAGIC_MASK) == CV_MATND_MAGIC_VAL) + +#define CV_IS_MATND(mat) \ + (CV_IS_MATND_HDR(mat) && ((const CvMatND*)(mat))->data.ptr != NULL) + + +/****************************************************************************************\ +* Multi-dimensional sparse array (CvSparseMat) * +\****************************************************************************************/ + +#define CV_SPARSE_MAT_MAGIC_VAL 0x42440000 +#define CV_TYPE_NAME_SPARSE_MAT "opencv-sparse-matrix" + +struct CvSet; + +typedef struct CvSparseMat +{ + int type; + int dims; + int* refcount; + int hdr_refcount; + + struct CvSet* heap; + void** hashtable; + int hashsize; + int valoffset; + int idxoffset; + int size[CV_MAX_DIM]; +} +CvSparseMat; + +#define CV_IS_SPARSE_MAT_HDR(mat) \ + ((mat) != NULL && \ + (((const CvSparseMat*)(mat))->type & CV_MAGIC_MASK) == CV_SPARSE_MAT_MAGIC_VAL) + +#define CV_IS_SPARSE_MAT(mat) \ + CV_IS_SPARSE_MAT_HDR(mat) + +/**************** iteration through a sparse array *****************/ + +typedef struct CvSparseNode +{ + unsigned hashval; + struct CvSparseNode* next; +} +CvSparseNode; + +typedef struct CvSparseMatIterator +{ + CvSparseMat* mat; + CvSparseNode* node; + int curidx; +} +CvSparseMatIterator; + +#define CV_NODE_VAL(mat,node) ((void*)((uchar*)(node) + (mat)->valoffset)) +#define CV_NODE_IDX(mat,node) ((int*)((uchar*)(node) + (mat)->idxoffset)) + +/****************************************************************************************\ +* Histogram * +\****************************************************************************************/ + +typedef int CvHistType; + +#define CV_HIST_MAGIC_VAL 0x42450000 +#define CV_HIST_UNIFORM_FLAG (1 << 10) + +/* indicates whether bin ranges are set already or not */ +#define CV_HIST_RANGES_FLAG (1 << 11) + +#define CV_HIST_ARRAY 0 +#define CV_HIST_SPARSE 1 +#define CV_HIST_TREE CV_HIST_SPARSE + +/* should be used as a parameter only, + it turns to CV_HIST_UNIFORM_FLAG of hist->type */ +#define CV_HIST_UNIFORM 1 + +typedef struct CvHistogram +{ + int type; + CvArr* bins; + float thresh[CV_MAX_DIM][2]; /* For uniform histograms. */ + float** thresh2; /* For non-uniform histograms. */ + CvMatND mat; /* Embedded matrix header for array histograms. */ +} +CvHistogram; + +#define CV_IS_HIST( hist ) \ + ((hist) != NULL && \ + (((CvHistogram*)(hist))->type & CV_MAGIC_MASK) == CV_HIST_MAGIC_VAL && \ + (hist)->bins != NULL) + +#define CV_IS_UNIFORM_HIST( hist ) \ + (((hist)->type & CV_HIST_UNIFORM_FLAG) != 0) + +#define CV_IS_SPARSE_HIST( hist ) \ + CV_IS_SPARSE_MAT((hist)->bins) + +#define CV_HIST_HAS_RANGES( hist ) \ + (((hist)->type & CV_HIST_RANGES_FLAG) != 0) + +/****************************************************************************************\ +* Other supplementary data type definitions * +\****************************************************************************************/ + +/*************************************** CvRect *****************************************/ + +typedef struct CvRect +{ + int x; + int y; + int width; + int height; +} +CvRect; + +CV_INLINE CvRect cvRect( int x, int y, int width, int height ) +{ + CvRect r; + + r.x = x; + r.y = y; + r.width = width; + r.height = height; + + return r; +} + + +CV_INLINE IplROI cvRectToROI( CvRect rect, int coi ) +{ + IplROI roi; + roi.xOffset = rect.x; + roi.yOffset = rect.y; + roi.width = rect.width; + roi.height = rect.height; + roi.coi = coi; + + return roi; +} + + +CV_INLINE CvRect cvROIToRect( IplROI roi ) +{ + return cvRect( roi.xOffset, roi.yOffset, roi.width, roi.height ); +} + +/*********************************** CvTermCriteria *************************************/ + +#define CV_TERMCRIT_ITER 1 +#define CV_TERMCRIT_NUMBER CV_TERMCRIT_ITER +#define CV_TERMCRIT_EPS 2 + +typedef struct CvTermCriteria +{ + int type; /* may be combination of + CV_TERMCRIT_ITER + CV_TERMCRIT_EPS */ + int max_iter; + double epsilon; +} +CvTermCriteria; + +CV_INLINE CvTermCriteria cvTermCriteria( int type, int max_iter, double epsilon ) +{ + CvTermCriteria t; + + t.type = type; + t.max_iter = max_iter; + t.epsilon = (float)epsilon; + + return t; +} + + +/******************************* CvPoint and variants ***********************************/ + +typedef struct CvPoint +{ + int x; + int y; +} +CvPoint; + + +CV_INLINE CvPoint cvPoint( int x, int y ) +{ + CvPoint p; + + p.x = x; + p.y = y; + + return p; +} + + +typedef struct CvPoint2D32f +{ + float x; + float y; +} +CvPoint2D32f; + + +CV_INLINE CvPoint2D32f cvPoint2D32f( double x, double y ) +{ + CvPoint2D32f p; + + p.x = (float)x; + p.y = (float)y; + + return p; +} + + +CV_INLINE CvPoint2D32f cvPointTo32f( CvPoint point ) +{ + return cvPoint2D32f( (float)point.x, (float)point.y ); +} + + +CV_INLINE CvPoint cvPointFrom32f( CvPoint2D32f point ) +{ + CvPoint ipt; + ipt.x = cvRound(point.x); + ipt.y = cvRound(point.y); + + return ipt; +} + + +typedef struct CvPoint3D32f +{ + float x; + float y; + float z; +} +CvPoint3D32f; + + +CV_INLINE CvPoint3D32f cvPoint3D32f( double x, double y, double z ) +{ + CvPoint3D32f p; + + p.x = (float)x; + p.y = (float)y; + p.z = (float)z; + + return p; +} + + +typedef struct CvPoint2D64f +{ + double x; + double y; +} +CvPoint2D64f; + + +CV_INLINE CvPoint2D64f cvPoint2D64f( double x, double y ) +{ + CvPoint2D64f p; + + p.x = x; + p.y = y; + + return p; +} + + +typedef struct CvPoint3D64f +{ + double x; + double y; + double z; +} +CvPoint3D64f; + + +CV_INLINE CvPoint3D64f cvPoint3D64f( double x, double y, double z ) +{ + CvPoint3D64f p; + + p.x = x; + p.y = y; + p.z = z; + + return p; +} + + +/******************************** CvSize's & CvBox **************************************/ + +typedef struct +{ + int width; + int height; +} +CvSize; + +CV_INLINE CvSize cvSize( int width, int height ) +{ + CvSize s; + + s.width = width; + s.height = height; + + return s; +} + +typedef struct CvSize2D32f +{ + float width; + float height; +} +CvSize2D32f; + + +CV_INLINE CvSize2D32f cvSize2D32f( double width, double height ) +{ + CvSize2D32f s; + + s.width = (float)width; + s.height = (float)height; + + return s; +} + +typedef struct CvBox2D +{ + CvPoint2D32f center; /* Center of the box. */ + CvSize2D32f size; /* Box width and length. */ + float angle; /* Angle between the horizontal axis */ + /* and the first side (i.e. length) in degrees */ +} +CvBox2D; + + +/* Line iterator state: */ +typedef struct CvLineIterator +{ + /* Pointer to the current point: */ + uchar* ptr; + + /* Bresenham algorithm state: */ + int err; + int plus_delta; + int minus_delta; + int plus_step; + int minus_step; +} +CvLineIterator; + + + +/************************************* CvSlice ******************************************/ + +typedef struct CvSlice +{ + int start_index, end_index; +} +CvSlice; + +CV_INLINE CvSlice cvSlice( int start, int end ) +{ + CvSlice slice; + slice.start_index = start; + slice.end_index = end; + + return slice; +} + +#define CV_WHOLE_SEQ_END_INDEX 0x3fffffff +#define CV_WHOLE_SEQ cvSlice(0, CV_WHOLE_SEQ_END_INDEX) + + +/************************************* CvScalar *****************************************/ + +typedef struct CvScalar +{ + double val[4]; +} +CvScalar; + +CV_INLINE CvScalar cvScalar( double val0, double val1 CV_DEFAULT(0), + double val2 CV_DEFAULT(0), double val3 CV_DEFAULT(0)) +{ + CvScalar scalar; + scalar.val[0] = val0; scalar.val[1] = val1; + scalar.val[2] = val2; scalar.val[3] = val3; + return scalar; +} + + +CV_INLINE CvScalar cvRealScalar( double val0 ) +{ + CvScalar scalar; + scalar.val[0] = val0; + scalar.val[1] = scalar.val[2] = scalar.val[3] = 0; + return scalar; +} + +CV_INLINE CvScalar cvScalarAll( double val0123 ) +{ + CvScalar scalar; + scalar.val[0] = val0123; + scalar.val[1] = val0123; + scalar.val[2] = val0123; + scalar.val[3] = val0123; + return scalar; +} + +/****************************************************************************************\ +* Dynamic Data structures * +\****************************************************************************************/ + +/******************************** Memory storage ****************************************/ + +typedef struct CvMemBlock +{ + struct CvMemBlock* prev; + struct CvMemBlock* next; +} +CvMemBlock; + +#define CV_STORAGE_MAGIC_VAL 0x42890000 + +typedef struct CvMemStorage +{ + int signature; + CvMemBlock* bottom; /* First allocated block. */ + CvMemBlock* top; /* Current memory block - top of the stack. */ + struct CvMemStorage* parent; /* We get new blocks from parent as needed. */ + int block_size; /* Block size. */ + int free_space; /* Remaining free space in current block. */ +} +CvMemStorage; + +#define CV_IS_STORAGE(storage) \ + ((storage) != NULL && \ + (((CvMemStorage*)(storage))->signature & CV_MAGIC_MASK) == CV_STORAGE_MAGIC_VAL) + + +typedef struct CvMemStoragePos +{ + CvMemBlock* top; + int free_space; +} +CvMemStoragePos; + + +/*********************************** Sequence *******************************************/ + +typedef struct CvSeqBlock +{ + struct CvSeqBlock* prev; /* Previous sequence block. */ + struct CvSeqBlock* next; /* Next sequence block. */ + int start_index; /* Index of the first element in the block + */ + /* sequence->first->start_index. */ + int count; /* Number of elements in the block. */ + schar* data; /* Pointer to the first element of the block. */ +} +CvSeqBlock; + + +#define CV_TREE_NODE_FIELDS(node_type) \ + int flags; /* Miscellaneous flags. */ \ + int header_size; /* Size of sequence header. */ \ + struct node_type* h_prev; /* Previous sequence. */ \ + struct node_type* h_next; /* Next sequence. */ \ + struct node_type* v_prev; /* 2nd previous sequence. */ \ + struct node_type* v_next /* 2nd next sequence. */ + +/* + Read/Write sequence. + Elements can be dynamically inserted to or deleted from the sequence. +*/ +#define CV_SEQUENCE_FIELDS() \ + CV_TREE_NODE_FIELDS(CvSeq); \ + int total; /* Total number of elements. */ \ + int elem_size; /* Size of sequence element in bytes. */ \ + schar* block_max; /* Maximal bound of the last block. */ \ + schar* ptr; /* Current write pointer. */ \ + int delta_elems; /* Grow seq this many at a time. */ \ + CvMemStorage* storage; /* Where the seq is stored. */ \ + CvSeqBlock* free_blocks; /* Free blocks list. */ \ + CvSeqBlock* first; /* Pointer to the first sequence block. */ + +typedef struct CvSeq +{ + CV_SEQUENCE_FIELDS() +} +CvSeq; + +#define CV_TYPE_NAME_SEQ "opencv-sequence" +#define CV_TYPE_NAME_SEQ_TREE "opencv-sequence-tree" + +/*************************************** Set ********************************************/ +/* + Set. + Order is not preserved. There can be gaps between sequence elements. + After the element has been inserted it stays in the same place all the time. + The MSB(most-significant or sign bit) of the first field (flags) is 0 iff the element exists. +*/ +#define CV_SET_ELEM_FIELDS(elem_type) \ + int flags; \ + struct elem_type* next_free; + +typedef struct CvSetElem +{ + CV_SET_ELEM_FIELDS(CvSetElem) +} +CvSetElem; + +#define CV_SET_FIELDS() \ + CV_SEQUENCE_FIELDS() \ + CvSetElem* free_elems; \ + int active_count; + +typedef struct CvSet +{ + CV_SET_FIELDS() +} +CvSet; + + +#define CV_SET_ELEM_IDX_MASK ((1 << 26) - 1) +#define CV_SET_ELEM_FREE_FLAG (1 << (sizeof(int)*8-1)) + +/* Checks whether the element pointed by ptr belongs to a set or not */ +#define CV_IS_SET_ELEM( ptr ) (((CvSetElem*)(ptr))->flags >= 0) + +/************************************* Graph ********************************************/ + +/* + We represent a graph as a set of vertices. + Vertices contain their adjacency lists (more exactly, pointers to first incoming or + outcoming edge (or 0 if isolated vertex)). Edges are stored in another set. + There is a singly-linked list of incoming/outcoming edges for each vertex. + + Each edge consists of + + o Two pointers to the starting and ending vertices + (vtx[0] and vtx[1] respectively). + + A graph may be oriented or not. In the latter case, edges between + vertex i to vertex j are not distinguished during search operations. + + o Two pointers to next edges for the starting and ending vertices, where + next[0] points to the next edge in the vtx[0] adjacency list and + next[1] points to the next edge in the vtx[1] adjacency list. +*/ +#define CV_GRAPH_EDGE_FIELDS() \ + int flags; \ + float weight; \ + struct CvGraphEdge* next[2]; \ + struct CvGraphVtx* vtx[2]; + + +#define CV_GRAPH_VERTEX_FIELDS() \ + int flags; \ + struct CvGraphEdge* first; + + +typedef struct CvGraphEdge +{ + CV_GRAPH_EDGE_FIELDS() +} +CvGraphEdge; + +typedef struct CvGraphVtx +{ + CV_GRAPH_VERTEX_FIELDS() +} +CvGraphVtx; + +typedef struct CvGraphVtx2D +{ + CV_GRAPH_VERTEX_FIELDS() + CvPoint2D32f* ptr; +} +CvGraphVtx2D; + +/* + Graph is "derived" from the set (this is set a of vertices) + and includes another set (edges) +*/ +#define CV_GRAPH_FIELDS() \ + CV_SET_FIELDS() \ + CvSet* edges; + +typedef struct CvGraph +{ + CV_GRAPH_FIELDS() +} +CvGraph; + +#define CV_TYPE_NAME_GRAPH "opencv-graph" + +/*********************************** Chain/Countour *************************************/ + +typedef struct CvChain +{ + CV_SEQUENCE_FIELDS() + CvPoint origin; +} +CvChain; + +#define CV_CONTOUR_FIELDS() \ + CV_SEQUENCE_FIELDS() \ + CvRect rect; \ + int color; \ + int reserved[3]; + +typedef struct CvContour +{ + CV_CONTOUR_FIELDS() +} +CvContour; + +typedef CvContour CvPoint2DSeq; + +/****************************************************************************************\ +* Sequence types * +\****************************************************************************************/ + +#define CV_SEQ_MAGIC_VAL 0x42990000 + +#define CV_IS_SEQ(seq) \ + ((seq) != NULL && (((CvSeq*)(seq))->flags & CV_MAGIC_MASK) == CV_SEQ_MAGIC_VAL) + +#define CV_SET_MAGIC_VAL 0x42980000 +#define CV_IS_SET(set) \ + ((set) != NULL && (((CvSeq*)(set))->flags & CV_MAGIC_MASK) == CV_SET_MAGIC_VAL) + +#define CV_SEQ_ELTYPE_BITS 12 +#define CV_SEQ_ELTYPE_MASK ((1 << CV_SEQ_ELTYPE_BITS) - 1) + +#define CV_SEQ_ELTYPE_POINT CV_32SC2 /* (x,y) */ +#define CV_SEQ_ELTYPE_CODE CV_8UC1 /* freeman code: 0..7 */ +#define CV_SEQ_ELTYPE_GENERIC 0 +#define CV_SEQ_ELTYPE_PTR CV_USRTYPE1 +#define CV_SEQ_ELTYPE_PPOINT CV_SEQ_ELTYPE_PTR /* &(x,y) */ +#define CV_SEQ_ELTYPE_INDEX CV_32SC1 /* #(x,y) */ +#define CV_SEQ_ELTYPE_GRAPH_EDGE 0 /* &next_o, &next_d, &vtx_o, &vtx_d */ +#define CV_SEQ_ELTYPE_GRAPH_VERTEX 0 /* first_edge, &(x,y) */ +#define CV_SEQ_ELTYPE_TRIAN_ATR 0 /* vertex of the binary tree */ +#define CV_SEQ_ELTYPE_CONNECTED_COMP 0 /* connected component */ +#define CV_SEQ_ELTYPE_POINT3D CV_32FC3 /* (x,y,z) */ + +#define CV_SEQ_KIND_BITS 2 +#define CV_SEQ_KIND_MASK (((1 << CV_SEQ_KIND_BITS) - 1)<flags & CV_SEQ_ELTYPE_MASK) +#define CV_SEQ_KIND( seq ) ((seq)->flags & CV_SEQ_KIND_MASK ) + +/* flag checking */ +#define CV_IS_SEQ_INDEX( seq ) ((CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_INDEX) && \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_GENERIC)) + +#define CV_IS_SEQ_CURVE( seq ) (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE) +#define CV_IS_SEQ_CLOSED( seq ) (((seq)->flags & CV_SEQ_FLAG_CLOSED) != 0) +#define CV_IS_SEQ_CONVEX( seq ) 0 +#define CV_IS_SEQ_HOLE( seq ) (((seq)->flags & CV_SEQ_FLAG_HOLE) != 0) +#define CV_IS_SEQ_SIMPLE( seq ) 1 + +/* type checking macros */ +#define CV_IS_SEQ_POINT_SET( seq ) \ + ((CV_SEQ_ELTYPE(seq) == CV_32SC2 || CV_SEQ_ELTYPE(seq) == CV_32FC2)) + +#define CV_IS_SEQ_POINT_SUBSET( seq ) \ + (CV_IS_SEQ_INDEX( seq ) || CV_SEQ_ELTYPE(seq) == CV_SEQ_ELTYPE_PPOINT) + +#define CV_IS_SEQ_POLYLINE( seq ) \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && CV_IS_SEQ_POINT_SET(seq)) + +#define CV_IS_SEQ_POLYGON( seq ) \ + (CV_IS_SEQ_POLYLINE(seq) && CV_IS_SEQ_CLOSED(seq)) + +#define CV_IS_SEQ_CHAIN( seq ) \ + (CV_SEQ_KIND(seq) == CV_SEQ_KIND_CURVE && (seq)->elem_size == 1) + +#define CV_IS_SEQ_CONTOUR( seq ) \ + (CV_IS_SEQ_CLOSED(seq) && (CV_IS_SEQ_POLYLINE(seq) || CV_IS_SEQ_CHAIN(seq))) + +#define CV_IS_SEQ_CHAIN_CONTOUR( seq ) \ + (CV_IS_SEQ_CHAIN( seq ) && CV_IS_SEQ_CLOSED( seq )) + +#define CV_IS_SEQ_POLYGON_TREE( seq ) \ + (CV_SEQ_ELTYPE (seq) == CV_SEQ_ELTYPE_TRIAN_ATR && \ + CV_SEQ_KIND( seq ) == CV_SEQ_KIND_BIN_TREE ) + +#define CV_IS_GRAPH( seq ) \ + (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_GRAPH) + +#define CV_IS_GRAPH_ORIENTED( seq ) \ + (((seq)->flags & CV_GRAPH_FLAG_ORIENTED) != 0) + +#define CV_IS_SUBDIV2D( seq ) \ + (CV_IS_SET(seq) && CV_SEQ_KIND((CvSet*)(seq)) == CV_SEQ_KIND_SUBDIV2D) + +/****************************************************************************************/ +/* Sequence writer & reader */ +/****************************************************************************************/ + +#define CV_SEQ_WRITER_FIELDS() \ + int header_size; \ + CvSeq* seq; /* the sequence written */ \ + CvSeqBlock* block; /* current block */ \ + schar* ptr; /* pointer to free space */ \ + schar* block_min; /* pointer to the beginning of block*/\ + schar* block_max; /* pointer to the end of block */ + +typedef struct CvSeqWriter +{ + CV_SEQ_WRITER_FIELDS() +} +CvSeqWriter; + + +#define CV_SEQ_READER_FIELDS() \ + int header_size; \ + CvSeq* seq; /* sequence, beign read */ \ + CvSeqBlock* block; /* current block */ \ + schar* ptr; /* pointer to element be read next */ \ + schar* block_min; /* pointer to the beginning of block */\ + schar* block_max; /* pointer to the end of block */ \ + int delta_index;/* = seq->first->start_index */ \ + schar* prev_elem; /* pointer to previous element */ + + +typedef struct CvSeqReader +{ + CV_SEQ_READER_FIELDS() +} +CvSeqReader; + +/****************************************************************************************/ +/* Operations on sequences */ +/****************************************************************************************/ + +#define CV_SEQ_ELEM( seq, elem_type, index ) \ +/* assert gives some guarantee that parameter is valid */ \ +( assert(sizeof((seq)->first[0]) == sizeof(CvSeqBlock) && \ + (seq)->elem_size == sizeof(elem_type)), \ + (elem_type*)((seq)->first && (unsigned)index < \ + (unsigned)((seq)->first->count) ? \ + (seq)->first->data + (index) * sizeof(elem_type) : \ + cvGetSeqElem( (CvSeq*)(seq), (index) ))) +#define CV_GET_SEQ_ELEM( elem_type, seq, index ) CV_SEQ_ELEM( (seq), elem_type, (index) ) + +/* Add element to sequence: */ +#define CV_WRITE_SEQ_ELEM_VAR( elem_ptr, writer ) \ +{ \ + if( (writer).ptr >= (writer).block_max ) \ + { \ + cvCreateSeqBlock( &writer); \ + } \ + memcpy((writer).ptr, elem_ptr, (writer).seq->elem_size);\ + (writer).ptr += (writer).seq->elem_size; \ +} + +#define CV_WRITE_SEQ_ELEM( elem, writer ) \ +{ \ + assert( (writer).seq->elem_size == sizeof(elem)); \ + if( (writer).ptr >= (writer).block_max ) \ + { \ + cvCreateSeqBlock( &writer); \ + } \ + assert( (writer).ptr <= (writer).block_max - sizeof(elem));\ + memcpy((writer).ptr, &(elem), sizeof(elem)); \ + (writer).ptr += sizeof(elem); \ +} + + +/* Move reader position forward: */ +#define CV_NEXT_SEQ_ELEM( elem_size, reader ) \ +{ \ + if( ((reader).ptr += (elem_size)) >= (reader).block_max ) \ + { \ + cvChangeSeqBlock( &(reader), 1 ); \ + } \ +} + + +/* Move reader position backward: */ +#define CV_PREV_SEQ_ELEM( elem_size, reader ) \ +{ \ + if( ((reader).ptr -= (elem_size)) < (reader).block_min ) \ + { \ + cvChangeSeqBlock( &(reader), -1 ); \ + } \ +} + +/* Read element and move read position forward: */ +#define CV_READ_SEQ_ELEM( elem, reader ) \ +{ \ + assert( (reader).seq->elem_size == sizeof(elem)); \ + memcpy( &(elem), (reader).ptr, sizeof((elem))); \ + CV_NEXT_SEQ_ELEM( sizeof(elem), reader ) \ +} + +/* Read element and move read position backward: */ +#define CV_REV_READ_SEQ_ELEM( elem, reader ) \ +{ \ + assert( (reader).seq->elem_size == sizeof(elem)); \ + memcpy(&(elem), (reader).ptr, sizeof((elem))); \ + CV_PREV_SEQ_ELEM( sizeof(elem), reader ) \ +} + + +#define CV_READ_CHAIN_POINT( _pt, reader ) \ +{ \ + (_pt) = (reader).pt; \ + if( (reader).ptr ) \ + { \ + CV_READ_SEQ_ELEM( (reader).code, (reader)); \ + assert( ((reader).code & ~7) == 0 ); \ + (reader).pt.x += (reader).deltas[(int)(reader).code][0]; \ + (reader).pt.y += (reader).deltas[(int)(reader).code][1]; \ + } \ +} + +#define CV_CURRENT_POINT( reader ) (*((CvPoint*)((reader).ptr))) +#define CV_PREV_POINT( reader ) (*((CvPoint*)((reader).prev_elem))) + +#define CV_READ_EDGE( pt1, pt2, reader ) \ +{ \ + assert( sizeof(pt1) == sizeof(CvPoint) && \ + sizeof(pt2) == sizeof(CvPoint) && \ + reader.seq->elem_size == sizeof(CvPoint)); \ + (pt1) = CV_PREV_POINT( reader ); \ + (pt2) = CV_CURRENT_POINT( reader ); \ + (reader).prev_elem = (reader).ptr; \ + CV_NEXT_SEQ_ELEM( sizeof(CvPoint), (reader)); \ +} + +/************ Graph macros ************/ + +/* Return next graph edge for given vertex: */ +#define CV_NEXT_GRAPH_EDGE( edge, vertex ) \ + (assert((edge)->vtx[0] == (vertex) || (edge)->vtx[1] == (vertex)), \ + (edge)->next[(edge)->vtx[1] == (vertex)]) + + + +/****************************************************************************************\ +* Data structures for persistence (a.k.a serialization) functionality * +\****************************************************************************************/ + +/* "black box" file storage */ +typedef struct CvFileStorage CvFileStorage; + +/* Storage flags: */ +#define CV_STORAGE_READ 0 +#define CV_STORAGE_WRITE 1 +#define CV_STORAGE_WRITE_TEXT CV_STORAGE_WRITE +#define CV_STORAGE_WRITE_BINARY CV_STORAGE_WRITE +#define CV_STORAGE_APPEND 2 + +/* List of attributes: */ +typedef struct CvAttrList +{ + const char** attr; /* NULL-terminated array of (attribute_name,attribute_value) pairs. */ + struct CvAttrList* next; /* Pointer to next chunk of the attributes list. */ +} +CvAttrList; + +CV_INLINE CvAttrList cvAttrList( const char** attr CV_DEFAULT(NULL), + CvAttrList* next CV_DEFAULT(NULL) ) +{ + CvAttrList l; + l.attr = attr; + l.next = next; + + return l; +} + +struct CvTypeInfo; + +#define CV_NODE_NONE 0 +#define CV_NODE_INT 1 +#define CV_NODE_INTEGER CV_NODE_INT +#define CV_NODE_REAL 2 +#define CV_NODE_FLOAT CV_NODE_REAL +#define CV_NODE_STR 3 +#define CV_NODE_STRING CV_NODE_STR +#define CV_NODE_REF 4 /* not used */ +#define CV_NODE_SEQ 5 +#define CV_NODE_MAP 6 +#define CV_NODE_TYPE_MASK 7 + +#define CV_NODE_TYPE(flags) ((flags) & CV_NODE_TYPE_MASK) + +/* file node flags */ +#define CV_NODE_FLOW 8 /* Used only for writing structures in YAML format. */ +#define CV_NODE_USER 16 +#define CV_NODE_EMPTY 32 +#define CV_NODE_NAMED 64 + +#define CV_NODE_IS_INT(flags) (CV_NODE_TYPE(flags) == CV_NODE_INT) +#define CV_NODE_IS_REAL(flags) (CV_NODE_TYPE(flags) == CV_NODE_REAL) +#define CV_NODE_IS_STRING(flags) (CV_NODE_TYPE(flags) == CV_NODE_STRING) +#define CV_NODE_IS_SEQ(flags) (CV_NODE_TYPE(flags) == CV_NODE_SEQ) +#define CV_NODE_IS_MAP(flags) (CV_NODE_TYPE(flags) == CV_NODE_MAP) +#define CV_NODE_IS_COLLECTION(flags) (CV_NODE_TYPE(flags) >= CV_NODE_SEQ) +#define CV_NODE_IS_FLOW(flags) (((flags) & CV_NODE_FLOW) != 0) +#define CV_NODE_IS_EMPTY(flags) (((flags) & CV_NODE_EMPTY) != 0) +#define CV_NODE_IS_USER(flags) (((flags) & CV_NODE_USER) != 0) +#define CV_NODE_HAS_NAME(flags) (((flags) & CV_NODE_NAMED) != 0) + +#define CV_NODE_SEQ_SIMPLE 256 +#define CV_NODE_SEQ_IS_SIMPLE(seq) (((seq)->flags & CV_NODE_SEQ_SIMPLE) != 0) + +typedef struct CvString +{ + int len; + char* ptr; +} +CvString; + +/* All the keys (names) of elements in the readed file storage + are stored in the hash to speed up the lookup operations: */ +typedef struct CvStringHashNode +{ + unsigned hashval; + CvString str; + struct CvStringHashNode* next; +} +CvStringHashNode; + +typedef struct CvGenericHash CvFileNodeHash; + +/* Basic element of the file storage - scalar or collection: */ +typedef struct CvFileNode +{ + int tag; + struct CvTypeInfo* info; /* type information + (only for user-defined object, for others it is 0) */ + union + { + double f; /* scalar floating-point number */ + int i; /* scalar integer number */ + CvString str; /* text string */ + CvSeq* seq; /* sequence (ordered collection of file nodes) */ + CvFileNodeHash* map; /* map (collection of named file nodes) */ + } data; +} +CvFileNode; + +#ifdef __cplusplus +extern "C" { +#endif +typedef int (CV_CDECL *CvIsInstanceFunc)( const void* struct_ptr ); +typedef void (CV_CDECL *CvReleaseFunc)( void** struct_dblptr ); +typedef void* (CV_CDECL *CvReadFunc)( CvFileStorage* storage, CvFileNode* node ); +typedef void (CV_CDECL *CvWriteFunc)( CvFileStorage* storage, const char* name, + const void* struct_ptr, CvAttrList attributes ); +typedef void* (CV_CDECL *CvCloneFunc)( const void* struct_ptr ); +#ifdef __cplusplus +} +#endif + +typedef struct CvTypeInfo +{ + int flags; + int header_size; + struct CvTypeInfo* prev; + struct CvTypeInfo* next; + const char* type_name; + CvIsInstanceFunc is_instance; + CvReleaseFunc release; + CvReadFunc read; + CvWriteFunc write; + CvCloneFunc clone; +} +CvTypeInfo; + + +/**** System data types ******/ + +typedef struct CvPluginFuncInfo +{ + void** func_addr; + void* default_func_addr; + const char* func_names; + int search_modules; + int loaded_from; +} +CvPluginFuncInfo; + +typedef struct CvModuleInfo +{ + struct CvModuleInfo* next; + const char* name; + const char* version; + CvPluginFuncInfo* func_tab; +} +CvModuleInfo; + +enum { CV_PARAM_TYPE_INT=0, CV_PARAM_TYPE_REAL=1, CV_PARAM_TYPE_STRING=2, CV_PARAM_TYPE_MAT=3 }; + +#endif /*_CXCORE_TYPES_H_*/ + +/* End of file. */ diff --git a/opencv2/core/version.hpp b/opencv2/core/version.hpp new file mode 100644 index 0000000..0cd25ca --- /dev/null +++ b/opencv2/core/version.hpp @@ -0,0 +1,58 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// Intel License Agreement +// For Open Source Computer Vision Library +// +// Copyright( C) 2000, Intel Corporation, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +//(including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort(including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +/* + definition of the current version of OpenCV + Usefull to test in user programs +*/ + +#ifndef __OPENCV_VERSION_HPP__ +#define __OPENCV_VERSION_HPP__ + +#define CV_MAJOR_VERSION 2 +#define CV_MINOR_VERSION 3 +#define CV_SUBMINOR_VERSION 1 + +#define CVAUX_STR_EXP(__A) #__A +#define CVAUX_STR(__A) CVAUX_STR_EXP(__A) +#define CV_VERSION CVAUX_STR(CV_MAJOR_VERSION) "." CVAUX_STR(CV_MINOR_VERSION) "." CVAUX_STR(CV_SUBMINOR_VERSION) + +#endif diff --git a/opencv2/core/wimage.hpp b/opencv2/core/wimage.hpp new file mode 100644 index 0000000..579c009 --- /dev/null +++ b/opencv2/core/wimage.hpp @@ -0,0 +1,621 @@ +/////////////////////////////////////////////////////////////////////////////// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to +// this license. If you do not agree to this license, do not download, +// install, copy or use the software. +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2008, Google, all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of Intel Corporation or contributors may not be used to endorse +// or promote products derived from this software without specific +// prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" +// and any express or implied warranties, including, but not limited to, the +// implied warranties of merchantability and fitness for a particular purpose +// are disclaimed. In no event shall the Intel Corporation or contributors be +// liable for any direct, indirect, incidental, special, exemplary, or +// consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. + + +///////////////////////////////////////////////////////////////////////////////// +// +// Image class which provides a thin layer around an IplImage. The goals +// of the class design are: +// 1. All the data has explicit ownership to avoid memory leaks +// 2. No hidden allocations or copies for performance. +// 3. Easy access to OpenCV methods (which will access IPP if available) +// 4. Can easily treat external data as an image +// 5. Easy to create images which are subsets of other images +// 6. Fast pixel access which can take advantage of number of channels +// if known at compile time. +// +// The WImage class is the image class which provides the data accessors. +// The 'W' comes from the fact that it is also a wrapper around the popular +// but inconvenient IplImage class. A WImage can be constructed either using a +// WImageBuffer class which allocates and frees the data, +// or using a WImageView class which constructs a subimage or a view into +// external data. The view class does no memory management. Each class +// actually has two versions, one when the number of channels is known at +// compile time and one when it isn't. Using the one with the number of +// channels specified can provide some compile time optimizations by using the +// fact that the number of channels is a constant. +// +// We use the convention (c,r) to refer to column c and row r with (0,0) being +// the upper left corner. This is similar to standard Euclidean coordinates +// with the first coordinate varying in the horizontal direction and the second +// coordinate varying in the vertical direction. +// Thus (c,r) is usually in the domain [0, width) X [0, height) +// +// Example usage: +// WImageBuffer3_b im(5,7); // Make a 5X7 3 channel image of type uchar +// WImageView3_b sub_im(im, 2,2, 3,3); // 3X3 submatrix +// vector vec(10, 3.0f); +// WImageView1_f user_im(&vec[0], 2, 5); // 2X5 image w/ supplied data +// +// im.SetZero(); // same as cvSetZero(im.Ipl()) +// *im(2, 3) = 15; // Modify the element at column 2, row 3 +// MySetRand(&sub_im); +// +// // Copy the second row into the first. This can be done with no memory +// // allocation and will use SSE if IPP is available. +// int w = im.Width(); +// im.View(0,0, w,1).CopyFrom(im.View(0,1, w,1)); +// +// // Doesn't care about source of data since using WImage +// void MySetRand(WImage_b* im) { // Works with any number of channels +// for (int r = 0; r < im->Height(); ++r) { +// float* row = im->Row(r); +// for (int c = 0; c < im->Width(); ++c) { +// for (int ch = 0; ch < im->Channels(); ++ch, ++row) { +// *row = uchar(rand() & 255); +// } +// } +// } +// } +// +// Functions that are not part of the basic image allocation, viewing, and +// access should come from OpenCV, except some useful functions that are not +// part of OpenCV can be found in wimage_util.h +#ifndef __OPENCV_CORE_WIMAGE_HPP__ +#define __OPENCV_CORE_WIMAGE_HPP__ + +#include "opencv2/core/core_c.h" + +#ifdef __cplusplus + +namespace cv { + +template class WImage; +template class WImageBuffer; +template class WImageView; + +template class WImageC; +template class WImageBufferC; +template class WImageViewC; + +// Commonly used typedefs. +typedef WImage WImage_b; +typedef WImageView WImageView_b; +typedef WImageBuffer WImageBuffer_b; + +typedef WImageC WImage1_b; +typedef WImageViewC WImageView1_b; +typedef WImageBufferC WImageBuffer1_b; + +typedef WImageC WImage3_b; +typedef WImageViewC WImageView3_b; +typedef WImageBufferC WImageBuffer3_b; + +typedef WImage WImage_f; +typedef WImageView WImageView_f; +typedef WImageBuffer WImageBuffer_f; + +typedef WImageC WImage1_f; +typedef WImageViewC WImageView1_f; +typedef WImageBufferC WImageBuffer1_f; + +typedef WImageC WImage3_f; +typedef WImageViewC WImageView3_f; +typedef WImageBufferC WImageBuffer3_f; + +// There isn't a standard for signed and unsigned short so be more +// explicit in the typename for these cases. +typedef WImage WImage_16s; +typedef WImageView WImageView_16s; +typedef WImageBuffer WImageBuffer_16s; + +typedef WImageC WImage1_16s; +typedef WImageViewC WImageView1_16s; +typedef WImageBufferC WImageBuffer1_16s; + +typedef WImageC WImage3_16s; +typedef WImageViewC WImageView3_16s; +typedef WImageBufferC WImageBuffer3_16s; + +typedef WImage WImage_16u; +typedef WImageView WImageView_16u; +typedef WImageBuffer WImageBuffer_16u; + +typedef WImageC WImage1_16u; +typedef WImageViewC WImageView1_16u; +typedef WImageBufferC WImageBuffer1_16u; + +typedef WImageC WImage3_16u; +typedef WImageViewC WImageView3_16u; +typedef WImageBufferC WImageBuffer3_16u; + +// +// WImage definitions +// +// This WImage class gives access to the data it refers to. It can be +// constructed either by allocating the data with a WImageBuffer class or +// using the WImageView class to refer to a subimage or outside data. +template +class WImage +{ +public: + typedef T BaseType; + + // WImage is an abstract class with no other virtual methods so make the + // destructor virtual. + virtual ~WImage() = 0; + + // Accessors + IplImage* Ipl() {return image_; } + const IplImage* Ipl() const {return image_; } + T* ImageData() { return reinterpret_cast(image_->imageData); } + const T* ImageData() const { + return reinterpret_cast(image_->imageData); + } + + int Width() const {return image_->width; } + int Height() const {return image_->height; } + + // WidthStep is the number of bytes to go to the pixel with the next y coord + int WidthStep() const {return image_->widthStep; } + + int Channels() const {return image_->nChannels; } + int ChannelSize() const {return sizeof(T); } // number of bytes per channel + + // Number of bytes per pixel + int PixelSize() const {return Channels() * ChannelSize(); } + + // Return depth type (e.g. IPL_DEPTH_8U, IPL_DEPTH_32F) which is the number + // of bits per channel and with the signed bit set. + // This is known at compile time using specializations. + int Depth() const; + + inline const T* Row(int r) const { + return reinterpret_cast(image_->imageData + r*image_->widthStep); + } + + inline T* Row(int r) { + return reinterpret_cast(image_->imageData + r*image_->widthStep); + } + + // Pixel accessors which returns a pointer to the start of the channel + inline T* operator() (int c, int r) { + return reinterpret_cast(image_->imageData + r*image_->widthStep) + + c*Channels(); + } + + inline const T* operator() (int c, int r) const { + return reinterpret_cast(image_->imageData + r*image_->widthStep) + + c*Channels(); + } + + // Copy the contents from another image which is just a convenience to cvCopy + void CopyFrom(const WImage& src) { cvCopy(src.Ipl(), image_); } + + // Set contents to zero which is just a convenient to cvSetZero + void SetZero() { cvSetZero(image_); } + + // Construct a view into a region of this image + WImageView View(int c, int r, int width, int height); + +protected: + // Disallow copy and assignment + WImage(const WImage&); + void operator=(const WImage&); + + explicit WImage(IplImage* img) : image_(img) { + assert(!img || img->depth == Depth()); + } + + void SetIpl(IplImage* image) { + assert(!image || image->depth == Depth()); + image_ = image; + } + + IplImage* image_; +}; + + + +// Image class when both the pixel type and number of channels +// are known at compile time. This wrapper will speed up some of the operations +// like accessing individual pixels using the () operator. +template +class WImageC : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + explicit WImageC(IplImage* img) : WImage(img) { + assert(!img || img->nChannels == Channels()); + } + + // Construct a view into a region of this image + WImageViewC View(int c, int r, int width, int height); + + // Copy the contents from another image which is just a convenience to cvCopy + void CopyFrom(const WImageC& src) { + cvCopy(src.Ipl(), WImage::image_); + } + + // WImageC is an abstract class with no other virtual methods so make the + // destructor virtual. + virtual ~WImageC() = 0; + + int Channels() const {return C; } + +protected: + // Disallow copy and assignment + WImageC(const WImageC&); + void operator=(const WImageC&); + + void SetIpl(IplImage* image) { + assert(!image || image->depth == WImage::Depth()); + WImage::SetIpl(image); + } +}; + +// +// WImageBuffer definitions +// +// Image class which owns the data, so it can be allocated and is always +// freed. It cannot be copied but can be explicity cloned. +// +template +class WImageBuffer : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + + // Default constructor which creates an object that can be + WImageBuffer() : WImage(0) {} + + WImageBuffer(int width, int height, int nchannels) : WImage(0) { + Allocate(width, height, nchannels); + } + + // Constructor which takes ownership of a given IplImage so releases + // the image on destruction. + explicit WImageBuffer(IplImage* img) : WImage(img) {} + + // Allocate an image. Does nothing if current size is the same as + // the new size. + void Allocate(int width, int height, int nchannels); + + // Set the data to point to an image, releasing the old data + void SetIpl(IplImage* img) { + ReleaseImage(); + WImage::SetIpl(img); + } + + // Clone an image which reallocates the image if of a different dimension. + void CloneFrom(const WImage& src) { + Allocate(src.Width(), src.Height(), src.Channels()); + CopyFrom(src); + } + + ~WImageBuffer() { + ReleaseImage(); + } + + // Release the image if it isn't null. + void ReleaseImage() { + if (WImage::image_) { + IplImage* image = WImage::image_; + cvReleaseImage(&image); + WImage::SetIpl(0); + } + } + + bool IsNull() const {return WImage::image_ == NULL; } + +private: + // Disallow copy and assignment + WImageBuffer(const WImageBuffer&); + void operator=(const WImageBuffer&); +}; + +// Like a WImageBuffer class but when the number of channels is known +// at compile time. +template +class WImageBufferC : public WImageC +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + // Default constructor which creates an object that can be + WImageBufferC() : WImageC(0) {} + + WImageBufferC(int width, int height) : WImageC(0) { + Allocate(width, height); + } + + // Constructor which takes ownership of a given IplImage so releases + // the image on destruction. + explicit WImageBufferC(IplImage* img) : WImageC(img) {} + + // Allocate an image. Does nothing if current size is the same as + // the new size. + void Allocate(int width, int height); + + // Set the data to point to an image, releasing the old data + void SetIpl(IplImage* img) { + ReleaseImage(); + WImageC::SetIpl(img); + } + + // Clone an image which reallocates the image if of a different dimension. + void CloneFrom(const WImageC& src) { + Allocate(src.Width(), src.Height()); + CopyFrom(src); + } + + ~WImageBufferC() { + ReleaseImage(); + } + + // Release the image if it isn't null. + void ReleaseImage() { + if (WImage::image_) { + IplImage* image = WImage::image_; + cvReleaseImage(&image); + WImageC::SetIpl(0); + } + } + + bool IsNull() const {return WImage::image_ == NULL; } + +private: + // Disallow copy and assignment + WImageBufferC(const WImageBufferC&); + void operator=(const WImageBufferC&); +}; + +// +// WImageView definitions +// +// View into an image class which allows treating a subimage as an image +// or treating external data as an image +// +template +class WImageView : public WImage +{ +public: + typedef typename WImage::BaseType BaseType; + + // Construct a subimage. No checks are done that the subimage lies + // completely inside the original image. + WImageView(WImage* img, int c, int r, int width, int height); + + // Refer to external data. + // If not given width_step assumed to be same as width. + WImageView(T* data, int width, int height, int channels, int width_step = -1); + + // Refer to external data. This does NOT take ownership + // of the supplied IplImage. + WImageView(IplImage* img) : WImage(img) {} + + // Copy constructor + WImageView(const WImage& img) : WImage(0) { + header_ = *(img.Ipl()); + WImage::SetIpl(&header_); + } + + WImageView& operator=(const WImage& img) { + header_ = *(img.Ipl()); + WImage::SetIpl(&header_); + return *this; + } + +protected: + IplImage header_; +}; + + +template +class WImageViewC : public WImageC +{ +public: + typedef typename WImage::BaseType BaseType; + enum { kChannels = C }; + + // Default constructor needed for vectors of views. + WImageViewC(); + + virtual ~WImageViewC() {} + + // Construct a subimage. No checks are done that the subimage lies + // completely inside the original image. + WImageViewC(WImageC* img, + int c, int r, int width, int height); + + // Refer to external data + WImageViewC(T* data, int width, int height, int width_step = -1); + + // Refer to external data. This does NOT take ownership + // of the supplied IplImage. + WImageViewC(IplImage* img) : WImageC(img) {} + + // Copy constructor which does a shallow copy to allow multiple views + // of same data. gcc-4.1.1 gets confused if both versions of + // the constructor and assignment operator are not provided. + WImageViewC(const WImageC& img) : WImageC(0) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + } + WImageViewC(const WImageViewC& img) : WImageC(0) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + } + + WImageViewC& operator=(const WImageC& img) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + return *this; + } + WImageViewC& operator=(const WImageViewC& img) { + header_ = *(img.Ipl()); + WImageC::SetIpl(&header_); + return *this; + } + +protected: + IplImage header_; +}; + + +// Specializations for depth +template<> +inline int WImage::Depth() const {return IPL_DEPTH_8U; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_8S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_16S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_16U; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_32S; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_32F; } +template<> +inline int WImage::Depth() const {return IPL_DEPTH_64F; } + +// +// Pure virtual destructors still need to be defined. +// +template inline WImage::~WImage() {} +template inline WImageC::~WImageC() {} + +// +// Allocate ImageData +// +template +inline void WImageBuffer::Allocate(int width, int height, int nchannels) +{ + if (IsNull() || WImage::Width() != width || + WImage::Height() != height || WImage::Channels() != nchannels) { + ReleaseImage(); + WImage::image_ = cvCreateImage(cvSize(width, height), + WImage::Depth(), nchannels); + } +} + +template +inline void WImageBufferC::Allocate(int width, int height) +{ + if (IsNull() || WImage::Width() != width || WImage::Height() != height) { + ReleaseImage(); + WImageC::SetIpl(cvCreateImage(cvSize(width, height),WImage::Depth(), C)); + } +} + +// +// ImageView methods +// +template +WImageView::WImageView(WImage* img, int c, int r, int width, int height) + : WImage(0) +{ + header_ = *(img->Ipl()); + header_.imageData = reinterpret_cast((*img)(c, r)); + header_.width = width; + header_.height = height; + WImage::SetIpl(&header_); +} + +template +WImageView::WImageView(T* data, int width, int height, int nchannels, int width_step) + : WImage(0) +{ + cvInitImageHeader(&header_, cvSize(width, height), WImage::Depth(), nchannels); + header_.imageData = reinterpret_cast(data); + if (width_step > 0) { + header_.widthStep = width_step; + } + WImage::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC(WImageC* img, int c, int r, int width, int height) + : WImageC(0) +{ + header_ = *(img->Ipl()); + header_.imageData = reinterpret_cast((*img)(c, r)); + header_.width = width; + header_.height = height; + WImageC::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC() : WImageC(0) { + cvInitImageHeader(&header_, cvSize(0, 0), WImage::Depth(), C); + header_.imageData = reinterpret_cast(0); + WImageC::SetIpl(&header_); +} + +template +WImageViewC::WImageViewC(T* data, int width, int height, int width_step) + : WImageC(0) +{ + cvInitImageHeader(&header_, cvSize(width, height), WImage::Depth(), C); + header_.imageData = reinterpret_cast(data); + if (width_step > 0) { + header_.widthStep = width_step; + } + WImageC::SetIpl(&header_); +} + +// Construct a view into a region of an image +template +WImageView WImage::View(int c, int r, int width, int height) { + return WImageView(this, c, r, width, height); +} + +template +WImageViewC WImageC::View(int c, int r, int width, int height) { + return WImageViewC(this, c, r, width, height); +} + +} // end of namespace + +#endif // __cplusplus + +#endif diff --git a/opencv2/imgproc/imgproc.hpp b/opencv2/imgproc/imgproc.hpp new file mode 100644 index 0000000..bd79955 --- /dev/null +++ b/opencv2/imgproc/imgproc.hpp @@ -0,0 +1,1139 @@ +/*! \file imgproc.hpp + \brief The Image Processing + */ + +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_HPP__ +#define __OPENCV_IMGPROC_HPP__ + +#include "opencv2/core/core.hpp" +#include "opencv2/imgproc/types_c.h" + +#ifdef __cplusplus + +/*! \namespace cv + Namespace where all the C++ OpenCV functionality resides + */ +namespace cv +{ + +//! various border interpolation methods +enum { BORDER_REPLICATE=IPL_BORDER_REPLICATE, BORDER_CONSTANT=IPL_BORDER_CONSTANT, + BORDER_REFLECT=IPL_BORDER_REFLECT, BORDER_WRAP=IPL_BORDER_WRAP, + BORDER_REFLECT_101=IPL_BORDER_REFLECT_101, BORDER_REFLECT101=BORDER_REFLECT_101, + BORDER_TRANSPARENT=IPL_BORDER_TRANSPARENT, + BORDER_DEFAULT=BORDER_REFLECT_101, BORDER_ISOLATED=16 }; + +//! 1D interpolation function: returns coordinate of the "donor" pixel for the specified location p. +CV_EXPORTS_W int borderInterpolate( int p, int len, int borderType ); + +/*! + The Base Class for 1D or Row-wise Filters + + This is the base class for linear or non-linear filters that process 1D data. + In particular, such filters are used for the "horizontal" filtering parts in separable filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. +*/ +class CV_EXPORTS BaseRowFilter +{ +public: + //! the default constructor + BaseRowFilter(); + //! the destructor + virtual ~BaseRowFilter(); + //! the filtering operator. Must be overrided in the derived classes. The horizontal border interpolation is done outside of the class. + virtual void operator()(const uchar* src, uchar* dst, + int width, int cn) = 0; + int ksize, anchor; +}; + + +/*! + The Base Class for Column-wise Filters + + This is the base class for linear or non-linear filters that process columns of 2D arrays. + Such filters are used for the "vertical" filtering parts in separable filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. + + Unlike cv::BaseRowFilter, cv::BaseColumnFilter may have some context information, + i.e. box filter keeps the sliding sum of elements. To reset the state BaseColumnFilter::reset() + must be called (e.g. the method is called by cv::FilterEngine) + */ +class CV_EXPORTS BaseColumnFilter +{ +public: + //! the default constructor + BaseColumnFilter(); + //! the destructor + virtual ~BaseColumnFilter(); + //! the filtering operator. Must be overrided in the derived classes. The vertical border interpolation is done outside of the class. + virtual void operator()(const uchar** src, uchar* dst, int dststep, + int dstcount, int width) = 0; + //! resets the internal buffers, if any + virtual void reset(); + int ksize, anchor; +}; + +/*! + The Base Class for Non-Separable 2D Filters. + + This is the base class for linear or non-linear 2D filters. + + Several functions in OpenCV return Ptr for the specific types of filters, + and those pointers can be used directly or within cv::FilterEngine. + + Similar to cv::BaseColumnFilter, the class may have some context information, + that should be reset using BaseFilter::reset() method before processing the new array. +*/ +class CV_EXPORTS BaseFilter +{ +public: + //! the default constructor + BaseFilter(); + //! the destructor + virtual ~BaseFilter(); + //! the filtering operator. The horizontal and the vertical border interpolation is done outside of the class. + virtual void operator()(const uchar** src, uchar* dst, int dststep, + int dstcount, int width, int cn) = 0; + //! resets the internal buffers, if any + virtual void reset(); + Size ksize; + Point anchor; +}; + +/*! + The Main Class for Image Filtering. + + The class can be used to apply an arbitrary filtering operation to an image. + It contains all the necessary intermediate buffers, it computes extrapolated values + of the "virtual" pixels outside of the image etc. + Pointers to the initialized cv::FilterEngine instances + are returned by various OpenCV functions, such as cv::createSeparableLinearFilter(), + cv::createLinearFilter(), cv::createGaussianFilter(), cv::createDerivFilter(), + cv::createBoxFilter() and cv::createMorphologyFilter(). + + Using the class you can process large images by parts and build complex pipelines + that include filtering as some of the stages. If all you need is to apply some pre-defined + filtering operation, you may use cv::filter2D(), cv::erode(), cv::dilate() etc. + functions that create FilterEngine internally. + + Here is the example on how to use the class to implement Laplacian operator, which is the sum of + second-order derivatives. More complex variant for different types is implemented in cv::Laplacian(). + + \code + void laplace_f(const Mat& src, Mat& dst) + { + CV_Assert( src.type() == CV_32F ); + // make sure the destination array has the proper size and type + dst.create(src.size(), src.type()); + + // get the derivative and smooth kernels for d2I/dx2. + // for d2I/dy2 we could use the same kernels, just swapped + Mat kd, ks; + getSobelKernels( kd, ks, 2, 0, ksize, false, ktype ); + + // let's process 10 source rows at once + int DELTA = std::min(10, src.rows); + Ptr Fxx = createSeparableLinearFilter(src.type(), + dst.type(), kd, ks, Point(-1,-1), 0, borderType, borderType, Scalar() ); + Ptr Fyy = createSeparableLinearFilter(src.type(), + dst.type(), ks, kd, Point(-1,-1), 0, borderType, borderType, Scalar() ); + + int y = Fxx->start(src), dsty = 0, dy = 0; + Fyy->start(src); + const uchar* sptr = src.data + y*src.step; + + // allocate the buffers for the spatial image derivatives; + // the buffers need to have more than DELTA rows, because at the + // last iteration the output may take max(kd.rows-1,ks.rows-1) + // rows more than the input. + Mat Ixx( DELTA + kd.rows - 1, src.cols, dst.type() ); + Mat Iyy( DELTA + kd.rows - 1, src.cols, dst.type() ); + + // inside the loop we always pass DELTA rows to the filter + // (note that the "proceed" method takes care of possibe overflow, since + // it was given the actual image height in the "start" method) + // on output we can get: + // * < DELTA rows (the initial buffer accumulation stage) + // * = DELTA rows (settled state in the middle) + // * > DELTA rows (then the input image is over, but we generate + // "virtual" rows using the border mode and filter them) + // this variable number of output rows is dy. + // dsty is the current output row. + // sptr is the pointer to the first input row in the portion to process + for( ; dsty < dst.rows; sptr += DELTA*src.step, dsty += dy ) + { + Fxx->proceed( sptr, (int)src.step, DELTA, Ixx.data, (int)Ixx.step ); + dy = Fyy->proceed( sptr, (int)src.step, DELTA, d2y.data, (int)Iyy.step ); + if( dy > 0 ) + { + Mat dstripe = dst.rowRange(dsty, dsty + dy); + add(Ixx.rowRange(0, dy), Iyy.rowRange(0, dy), dstripe); + } + } + } + \endcode +*/ +class CV_EXPORTS FilterEngine +{ +public: + //! the default constructor + FilterEngine(); + //! the full constructor. Either _filter2D or both _rowFilter and _columnFilter must be non-empty. + FilterEngine(const Ptr& _filter2D, + const Ptr& _rowFilter, + const Ptr& _columnFilter, + int srcType, int dstType, int bufType, + int _rowBorderType=BORDER_REPLICATE, + int _columnBorderType=-1, + const Scalar& _borderValue=Scalar()); + //! the destructor + virtual ~FilterEngine(); + //! reinitializes the engine. The previously assigned filters are released. + void init(const Ptr& _filter2D, + const Ptr& _rowFilter, + const Ptr& _columnFilter, + int srcType, int dstType, int bufType, + int _rowBorderType=BORDER_REPLICATE, int _columnBorderType=-1, + const Scalar& _borderValue=Scalar()); + //! starts filtering of the specified ROI of an image of size wholeSize. + virtual int start(Size wholeSize, Rect roi, int maxBufRows=-1); + //! starts filtering of the specified ROI of the specified image. + virtual int start(const Mat& src, const Rect& srcRoi=Rect(0,0,-1,-1), + bool isolated=false, int maxBufRows=-1); + //! processes the next srcCount rows of the image. + virtual int proceed(const uchar* src, int srcStep, int srcCount, + uchar* dst, int dstStep); + //! applies filter to the specified ROI of the image. if srcRoi=(0,0,-1,-1), the whole image is filtered. + virtual void apply( const Mat& src, Mat& dst, + const Rect& srcRoi=Rect(0,0,-1,-1), + Point dstOfs=Point(0,0), + bool isolated=false); + //! returns true if the filter is separable + bool isSeparable() const { return (const BaseFilter*)filter2D == 0; } + //! returns the number + int remainingInputRows() const; + int remainingOutputRows() const; + + int srcType, dstType, bufType; + Size ksize; + Point anchor; + int maxWidth; + Size wholeSize; + Rect roi; + int dx1, dx2; + int rowBorderType, columnBorderType; + vector borderTab; + int borderElemSize; + vector ringBuf; + vector srcRow; + vector constBorderValue; + vector constBorderRow; + int bufStep, startY, startY0, endY, rowCount, dstY; + vector rows; + + Ptr filter2D; + Ptr rowFilter; + Ptr columnFilter; +}; + +//! type of the kernel +enum { KERNEL_GENERAL=0, KERNEL_SYMMETRICAL=1, KERNEL_ASYMMETRICAL=2, + KERNEL_SMOOTH=4, KERNEL_INTEGER=8 }; + +//! returns type (one of KERNEL_*) of 1D or 2D kernel specified by its coefficients. +CV_EXPORTS int getKernelType(InputArray kernel, Point anchor); + +//! returns the primitive row filter with the specified kernel +CV_EXPORTS Ptr getLinearRowFilter(int srcType, int bufType, + InputArray kernel, int anchor, + int symmetryType); + +//! returns the primitive column filter with the specified kernel +CV_EXPORTS Ptr getLinearColumnFilter(int bufType, int dstType, + InputArray kernel, int anchor, + int symmetryType, double delta=0, + int bits=0); + +//! returns 2D filter with the specified kernel +CV_EXPORTS Ptr getLinearFilter(int srcType, int dstType, + InputArray kernel, + Point anchor=Point(-1,-1), + double delta=0, int bits=0); + +//! returns the separable linear filter engine +CV_EXPORTS Ptr createSeparableLinearFilter(int srcType, int dstType, + InputArray rowKernel, InputArray columnKernel, + Point _anchor=Point(-1,-1), double delta=0, + int _rowBorderType=BORDER_DEFAULT, + int _columnBorderType=-1, + const Scalar& _borderValue=Scalar()); + +//! returns the non-separable linear filter engine +CV_EXPORTS Ptr createLinearFilter(int srcType, int dstType, + InputArray kernel, Point _anchor=Point(-1,-1), + double delta=0, int _rowBorderType=BORDER_DEFAULT, + int _columnBorderType=-1, const Scalar& _borderValue=Scalar()); + +//! returns the Gaussian kernel with the specified parameters +CV_EXPORTS_W Mat getGaussianKernel( int ksize, double sigma, int ktype=CV_64F ); + +//! returns the Gaussian filter engine +CV_EXPORTS Ptr createGaussianFilter( int type, Size ksize, + double sigma1, double sigma2=0, + int borderType=BORDER_DEFAULT); +//! initializes kernels of the generalized Sobel operator +CV_EXPORTS_W void getDerivKernels( OutputArray kx, OutputArray ky, + int dx, int dy, int ksize, + bool normalize=false, int ktype=CV_32F ); +//! returns filter engine for the generalized Sobel operator +CV_EXPORTS Ptr createDerivFilter( int srcType, int dstType, + int dx, int dy, int ksize, + int borderType=BORDER_DEFAULT ); +//! returns horizontal 1D box filter +CV_EXPORTS Ptr getRowSumFilter(int srcType, int sumType, + int ksize, int anchor=-1); +//! returns vertical 1D box filter +CV_EXPORTS Ptr getColumnSumFilter( int sumType, int dstType, + int ksize, int anchor=-1, + double scale=1); +//! returns box filter engine +CV_EXPORTS Ptr createBoxFilter( int srcType, int dstType, Size ksize, + Point anchor=Point(-1,-1), + bool normalize=true, + int borderType=BORDER_DEFAULT); +//! type of morphological operation +enum { MORPH_ERODE=CV_MOP_ERODE, MORPH_DILATE=CV_MOP_DILATE, + MORPH_OPEN=CV_MOP_OPEN, MORPH_CLOSE=CV_MOP_CLOSE, + MORPH_GRADIENT=CV_MOP_GRADIENT, MORPH_TOPHAT=CV_MOP_TOPHAT, + MORPH_BLACKHAT=CV_MOP_BLACKHAT }; + +//! returns horizontal 1D morphological filter +CV_EXPORTS Ptr getMorphologyRowFilter(int op, int type, int ksize, int anchor=-1); +//! returns vertical 1D morphological filter +CV_EXPORTS Ptr getMorphologyColumnFilter(int op, int type, int ksize, int anchor=-1); +//! returns 2D morphological filter +CV_EXPORTS Ptr getMorphologyFilter(int op, int type, InputArray kernel, + Point anchor=Point(-1,-1)); + +//! returns "magic" border value for erosion and dilation. It is automatically transformed to Scalar::all(-DBL_MAX) for dilation. +static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); } + +//! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported. +CV_EXPORTS Ptr createMorphologyFilter(int op, int type, InputArray kernel, + Point anchor=Point(-1,-1), int _rowBorderType=BORDER_CONSTANT, + int _columnBorderType=-1, + const Scalar& _borderValue=morphologyDefaultBorderValue()); + +//! shape of the structuring element +enum { MORPH_RECT=0, MORPH_CROSS=1, MORPH_ELLIPSE=2 }; +//! returns structuring element of the specified shape and size +CV_EXPORTS_W Mat getStructuringElement(int shape, Size ksize, Point anchor=Point(-1,-1)); + +template<> CV_EXPORTS void Ptr::delete_obj(); + +//! copies 2D array to a larger destination array with extrapolation of the outer part of src using the specified border mode +CV_EXPORTS_W void copyMakeBorder( InputArray src, OutputArray dst, + int top, int bottom, int left, int right, + int borderType, const Scalar& value=Scalar() ); + +//! smooths the image using median filter. +CV_EXPORTS_W void medianBlur( InputArray src, OutputArray dst, int ksize ); +//! smooths the image using Gaussian filter. +CV_EXPORTS_W void GaussianBlur( InputArray src, + OutputArray dst, Size ksize, + double sigma1, double sigma2=0, + int borderType=BORDER_DEFAULT ); +//! smooths the image using bilateral filter +CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d, + double sigmaColor, double sigmaSpace, + int borderType=BORDER_DEFAULT ); +//! smooths the image using the box filter. Each pixel is processed in O(1) time +CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth, + Size ksize, Point anchor=Point(-1,-1), + bool normalize=true, + int borderType=BORDER_DEFAULT ); +//! a synonym for normalized box filter +CV_EXPORTS_W void blur( InputArray src, OutputArray dst, + Size ksize, Point anchor=Point(-1,-1), + int borderType=BORDER_DEFAULT ); + +//! applies non-separable 2D linear filter to the image +CV_EXPORTS_W void filter2D( InputArray src, OutputArray dst, int ddepth, + InputArray kernel, Point anchor=Point(-1,-1), + double delta=0, int borderType=BORDER_DEFAULT ); + +//! applies separable 2D linear filter to the image +CV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth, + InputArray kernelX, InputArray kernelY, + Point anchor=Point(-1,-1), + double delta=0, int borderType=BORDER_DEFAULT ); + +//! applies generalized Sobel operator to the image +CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth, + int dx, int dy, int ksize=3, + double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies the vertical or horizontal Scharr operator to the image +CV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth, + int dx, int dy, double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies Laplacian operator to the image +CV_EXPORTS_W void Laplacian( InputArray src, OutputArray dst, int ddepth, + int ksize=1, double scale=1, double delta=0, + int borderType=BORDER_DEFAULT ); + +//! applies Canny edge detector and produces the edge map. +CV_EXPORTS_W void Canny( InputArray image, OutputArray edges, + double threshold1, double threshold2, + int apertureSize=3, bool L2gradient=false ); + +//! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria +CV_EXPORTS_W void cornerMinEigenVal( InputArray src, OutputArray dst, + int blockSize, int ksize=3, + int borderType=BORDER_DEFAULT ); + +//! computes Harris cornerness criteria at each image pixel +CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize, + int ksize, double k, + int borderType=BORDER_DEFAULT ); + +//! computes both eigenvalues and the eigenvectors of 2x2 derivative covariation matrix at each pixel. The output is stored as 6-channel matrix. +CV_EXPORTS_W void cornerEigenValsAndVecs( InputArray src, OutputArray dst, + int blockSize, int ksize, + int borderType=BORDER_DEFAULT ); + +//! computes another complex cornerness criteria at each pixel +CV_EXPORTS_W void preCornerDetect( InputArray src, OutputArray dst, int ksize, + int borderType=BORDER_DEFAULT ); + +//! adjusts the corner locations with sub-pixel accuracy to maximize the certain cornerness criteria +CV_EXPORTS_W void cornerSubPix( InputArray image, InputOutputArray corners, + Size winSize, Size zeroZone, + TermCriteria criteria ); + +//! finds the strong enough corners where the cornerMinEigenVal() or cornerHarris() report the local maxima +CV_EXPORTS_W void goodFeaturesToTrack( InputArray image, OutputArray corners, + int maxCorners, double qualityLevel, double minDistance, + InputArray mask=noArray(), int blockSize=3, + bool useHarrisDetector=false, double k=0.04 ); + +//! finds lines in the black-n-white image using the standard or pyramid Hough transform +CV_EXPORTS_W void HoughLines( InputArray image, OutputArray lines, + double rho, double theta, int threshold, + double srn=0, double stn=0 ); + +//! finds line segments in the black-n-white image using probabalistic Hough transform +CV_EXPORTS_W void HoughLinesP( InputArray image, OutputArray lines, + double rho, double theta, int threshold, + double minLineLength=0, double maxLineGap=0 ); + +//! finds circles in the grayscale image using 2+1 gradient Hough transform +CV_EXPORTS_W void HoughCircles( InputArray image, OutputArray circles, + int method, double dp, double minDist, + double param1=100, double param2=100, + int minRadius=0, int maxRadius=0 ); + +//! erodes the image (applies the local minimum operator) +CV_EXPORTS_W void erode( InputArray src, OutputArray dst, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! dilates the image (applies the local maximum operator) +CV_EXPORTS_W void dilate( InputArray src, OutputArray dst, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! applies an advanced morphological operation to the image +CV_EXPORTS_W void morphologyEx( InputArray src, OutputArray dst, + int op, InputArray kernel, + Point anchor=Point(-1,-1), int iterations=1, + int borderType=BORDER_CONSTANT, + const Scalar& borderValue=morphologyDefaultBorderValue() ); + +//! interpolation algorithm +enum +{ + INTER_NEAREST=CV_INTER_NN, //!< nearest neighbor interpolation + INTER_LINEAR=CV_INTER_LINEAR, //!< bilinear interpolation + INTER_CUBIC=CV_INTER_CUBIC, //!< bicubic interpolation + INTER_AREA=CV_INTER_AREA, //!< area-based (or super) interpolation + INTER_LANCZOS4=CV_INTER_LANCZOS4, //!< Lanczos interpolation over 8x8 neighborhood + INTER_MAX=7, + WARP_INVERSE_MAP=CV_WARP_INVERSE_MAP +}; + +//! resizes the image +CV_EXPORTS_W void resize( InputArray src, OutputArray dst, + Size dsize, double fx=0, double fy=0, + int interpolation=INTER_LINEAR ); + +//! warps the image using affine transformation +CV_EXPORTS_W void warpAffine( InputArray src, OutputArray dst, + InputArray M, Size dsize, + int flags=INTER_LINEAR, + int borderMode=BORDER_CONSTANT, + const Scalar& borderValue=Scalar()); + +//! warps the image using perspective transformation +CV_EXPORTS_W void warpPerspective( InputArray src, OutputArray dst, + InputArray M, Size dsize, + int flags=INTER_LINEAR, + int borderMode=BORDER_CONSTANT, + const Scalar& borderValue=Scalar()); + +enum +{ + INTER_BITS=5, INTER_BITS2=INTER_BITS*2, + INTER_TAB_SIZE=(1< CV_EXPORTS void Ptr::delete_obj(); + +//! computes the joint dense histogram for a set of images. +CV_EXPORTS void calcHist( const Mat* images, int nimages, + const int* channels, InputArray mask, + OutputArray hist, int dims, const int* histSize, + const float** ranges, bool uniform=true, bool accumulate=false ); + +//! computes the joint sparse histogram for a set of images. +CV_EXPORTS void calcHist( const Mat* images, int nimages, + const int* channels, InputArray mask, + SparseMat& hist, int dims, + const int* histSize, const float** ranges, + bool uniform=true, bool accumulate=false ); + +CV_EXPORTS_W void calcHist( InputArrayOfArrays images, + const vector& channels, + InputArray mask, OutputArray hist, + const vector& histSize, + const vector& ranges, + bool accumulate=false ); + +//! computes back projection for the set of images +CV_EXPORTS void calcBackProject( const Mat* images, int nimages, + const int* channels, InputArray hist, + OutputArray backProject, const float** ranges, + double scale=1, bool uniform=true ); + +//! computes back projection for the set of images +CV_EXPORTS void calcBackProject( const Mat* images, int nimages, + const int* channels, const SparseMat& hist, + OutputArray backProject, const float** ranges, + double scale=1, bool uniform=true ); + +CV_EXPORTS_W void calcBackProject( InputArrayOfArrays images, const vector& channels, + InputArray hist, OutputArray dst, + const vector& ranges, + double scale ); + +//! compares two histograms stored in dense arrays +CV_EXPORTS_W double compareHist( InputArray H1, InputArray H2, int method ); + +//! compares two histograms stored in sparse arrays +CV_EXPORTS double compareHist( const SparseMat& H1, const SparseMat& H2, int method ); + +//! normalizes the grayscale image brightness and contrast by normalizing its histogram +CV_EXPORTS_W void equalizeHist( InputArray src, OutputArray dst ); + +CV_EXPORTS float EMD( InputArray signature1, InputArray signature2, + int distType, InputArray cost=noArray(), + float* lowerBound=0, OutputArray flow=noArray() ); + +//! segments the image using watershed algorithm +CV_EXPORTS_W void watershed( InputArray image, InputOutputArray markers ); + +//! filters image using meanshift algorithm +CV_EXPORTS_W void pyrMeanShiftFiltering( InputArray src, OutputArray dst, + double sp, double sr, int maxLevel=1, + TermCriteria termcrit=TermCriteria( + TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) ); + +//! class of the pixel in GrabCut algorithm +enum +{ + GC_BGD = 0, //!< background + GC_FGD = 1, //!< foreground + GC_PR_BGD = 2, //!< most probably background + GC_PR_FGD = 3 //!< most probably foreground +}; + +//! GrabCut algorithm flags +enum +{ + GC_INIT_WITH_RECT = 0, + GC_INIT_WITH_MASK = 1, + GC_EVAL = 2 +}; + +//! segments the image using GrabCut algorithm +CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect, + InputOutputArray bgdModel, InputOutputArray fgdModel, + int iterCount, int mode = GC_EVAL ); + +//! the inpainting algorithm +enum +{ + INPAINT_NS=CV_INPAINT_NS, // Navier-Stokes algorithm + INPAINT_TELEA=CV_INPAINT_TELEA // A. Telea algorithm +}; + +//! restores the damaged image areas using one of the available intpainting algorithms +CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask, + OutputArray dst, double inpaintRange, int flags ); + +//! builds the discrete Voronoi diagram +CV_EXPORTS_W void distanceTransform( InputArray src, OutputArray dst, + OutputArray labels, int distanceType, int maskSize ); + +//! computes the distance transform map +CV_EXPORTS void distanceTransform( InputArray src, OutputArray dst, + int distanceType, int maskSize ); + +enum { FLOODFILL_FIXED_RANGE = 1 << 16, FLOODFILL_MASK_ONLY = 1 << 17 }; + +//! fills the semi-uniform image region starting from the specified seed point +CV_EXPORTS int floodFill( InputOutputArray image, + Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0, + Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), + int flags=4 ); + +//! fills the semi-uniform image region and/or the mask starting from the specified seed point +CV_EXPORTS_W int floodFill( InputOutputArray image, InputOutputArray mask, + Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0, + Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), + int flags=4 ); + + +enum +{ + COLOR_BGR2BGRA =0, + COLOR_RGB2RGBA =COLOR_BGR2BGRA, + + COLOR_BGRA2BGR =1, + COLOR_RGBA2RGB =COLOR_BGRA2BGR, + + COLOR_BGR2RGBA =2, + COLOR_RGB2BGRA =COLOR_BGR2RGBA, + + COLOR_RGBA2BGR =3, + COLOR_BGRA2RGB =COLOR_RGBA2BGR, + + COLOR_BGR2RGB =4, + COLOR_RGB2BGR =COLOR_BGR2RGB, + + COLOR_BGRA2RGBA =5, + COLOR_RGBA2BGRA =COLOR_BGRA2RGBA, + + COLOR_BGR2GRAY =6, + COLOR_RGB2GRAY =7, + COLOR_GRAY2BGR =8, + COLOR_GRAY2RGB =COLOR_GRAY2BGR, + COLOR_GRAY2BGRA =9, + COLOR_GRAY2RGBA =COLOR_GRAY2BGRA, + COLOR_BGRA2GRAY =10, + COLOR_RGBA2GRAY =11, + + COLOR_BGR2BGR565 =12, + COLOR_RGB2BGR565 =13, + COLOR_BGR5652BGR =14, + COLOR_BGR5652RGB =15, + COLOR_BGRA2BGR565 =16, + COLOR_RGBA2BGR565 =17, + COLOR_BGR5652BGRA =18, + COLOR_BGR5652RGBA =19, + + COLOR_GRAY2BGR565 =20, + COLOR_BGR5652GRAY =21, + + COLOR_BGR2BGR555 =22, + COLOR_RGB2BGR555 =23, + COLOR_BGR5552BGR =24, + COLOR_BGR5552RGB =25, + COLOR_BGRA2BGR555 =26, + COLOR_RGBA2BGR555 =27, + COLOR_BGR5552BGRA =28, + COLOR_BGR5552RGBA =29, + + COLOR_GRAY2BGR555 =30, + COLOR_BGR5552GRAY =31, + + COLOR_BGR2XYZ =32, + COLOR_RGB2XYZ =33, + COLOR_XYZ2BGR =34, + COLOR_XYZ2RGB =35, + + COLOR_BGR2YCrCb =36, + COLOR_RGB2YCrCb =37, + COLOR_YCrCb2BGR =38, + COLOR_YCrCb2RGB =39, + + COLOR_BGR2HSV =40, + COLOR_RGB2HSV =41, + + COLOR_BGR2Lab =44, + COLOR_RGB2Lab =45, + + COLOR_BayerBG2BGR =46, + COLOR_BayerGB2BGR =47, + COLOR_BayerRG2BGR =48, + COLOR_BayerGR2BGR =49, + + COLOR_BayerBG2RGB =COLOR_BayerRG2BGR, + COLOR_BayerGB2RGB =COLOR_BayerGR2BGR, + COLOR_BayerRG2RGB =COLOR_BayerBG2BGR, + COLOR_BayerGR2RGB =COLOR_BayerGB2BGR, + + COLOR_BGR2Luv =50, + COLOR_RGB2Luv =51, + COLOR_BGR2HLS =52, + COLOR_RGB2HLS =53, + + COLOR_HSV2BGR =54, + COLOR_HSV2RGB =55, + + COLOR_Lab2BGR =56, + COLOR_Lab2RGB =57, + COLOR_Luv2BGR =58, + COLOR_Luv2RGB =59, + COLOR_HLS2BGR =60, + COLOR_HLS2RGB =61, + + COLOR_BayerBG2BGR_VNG =62, + COLOR_BayerGB2BGR_VNG =63, + COLOR_BayerRG2BGR_VNG =64, + COLOR_BayerGR2BGR_VNG =65, + + COLOR_BayerBG2RGB_VNG =COLOR_BayerRG2BGR_VNG, + COLOR_BayerGB2RGB_VNG =COLOR_BayerGR2BGR_VNG, + COLOR_BayerRG2RGB_VNG =COLOR_BayerBG2BGR_VNG, + COLOR_BayerGR2RGB_VNG =COLOR_BayerGB2BGR_VNG, + + COLOR_BGR2HSV_FULL = 66, + COLOR_RGB2HSV_FULL = 67, + COLOR_BGR2HLS_FULL = 68, + COLOR_RGB2HLS_FULL = 69, + + COLOR_HSV2BGR_FULL = 70, + COLOR_HSV2RGB_FULL = 71, + COLOR_HLS2BGR_FULL = 72, + COLOR_HLS2RGB_FULL = 73, + + COLOR_LBGR2Lab = 74, + COLOR_LRGB2Lab = 75, + COLOR_LBGR2Luv = 76, + COLOR_LRGB2Luv = 77, + + COLOR_Lab2LBGR = 78, + COLOR_Lab2LRGB = 79, + COLOR_Luv2LBGR = 80, + COLOR_Luv2LRGB = 81, + + COLOR_BGR2YUV = 82, + COLOR_RGB2YUV = 83, + COLOR_YUV2BGR = 84, + COLOR_YUV2RGB = 85, + + COLOR_BayerBG2GRAY = 86, + COLOR_BayerGB2GRAY = 87, + COLOR_BayerRG2GRAY = 88, + COLOR_BayerGR2GRAY = 89, + + COLOR_YUV420i2RGB = 90, + COLOR_YUV420i2BGR = 91, + COLOR_YUV420sp2RGB = 92, + COLOR_YUV420sp2BGR = 93, + + COLOR_COLORCVT_MAX =100 +}; + + +//! converts image from one color space to another +CV_EXPORTS_W void cvtColor( InputArray src, OutputArray dst, int code, int dstCn=0 ); + +//! raster image moments +class CV_EXPORTS_W_MAP Moments +{ +public: + //! the default constructor + Moments(); + //! the full constructor + Moments(double m00, double m10, double m01, double m20, double m11, + double m02, double m30, double m21, double m12, double m03 ); + //! the conversion from CvMoments + Moments( const CvMoments& moments ); + //! the conversion to CvMoments + operator CvMoments() const; + + //! spatial moments + CV_PROP_RW double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; + //! central moments + CV_PROP_RW double mu20, mu11, mu02, mu30, mu21, mu12, mu03; + //! central normalized moments + CV_PROP_RW double nu20, nu11, nu02, nu30, nu21, nu12, nu03; +}; + +//! computes moments of the rasterized shape or a vector of points +CV_EXPORTS_W Moments moments( InputArray array, bool binaryImage=false ); + +//! computes 7 Hu invariants from the moments +CV_EXPORTS void HuMoments( const Moments& moments, double hu[7] ); +CV_EXPORTS_W void HuMoments( const Moments& m, CV_OUT OutputArray hu ); + +//! type of the template matching operation +enum { TM_SQDIFF=0, TM_SQDIFF_NORMED=1, TM_CCORR=2, TM_CCORR_NORMED=3, TM_CCOEFF=4, TM_CCOEFF_NORMED=5 }; + +//! computes the proximity map for the raster template and the image where the template is searched for +CV_EXPORTS_W void matchTemplate( InputArray image, InputArray templ, + OutputArray result, int method ); + +//! mode of the contour retrieval algorithm +enum +{ + RETR_EXTERNAL=CV_RETR_EXTERNAL, //!< retrieve only the most external (top-level) contours + RETR_LIST=CV_RETR_LIST, //!< retrieve all the contours without any hierarchical information + RETR_CCOMP=CV_RETR_CCOMP, //!< retrieve the connected components (that can possibly be nested) + RETR_TREE=CV_RETR_TREE //!< retrieve all the contours and the whole hierarchy +}; + +//! the contour approximation algorithm +enum +{ + CHAIN_APPROX_NONE=CV_CHAIN_APPROX_NONE, + CHAIN_APPROX_SIMPLE=CV_CHAIN_APPROX_SIMPLE, + CHAIN_APPROX_TC89_L1=CV_CHAIN_APPROX_TC89_L1, + CHAIN_APPROX_TC89_KCOS=CV_CHAIN_APPROX_TC89_KCOS +}; + +//! retrieves contours and the hierarchical information from black-n-white image. +CV_EXPORTS_W void findContours( InputOutputArray image, OutputArrayOfArrays contours, + OutputArray hierarchy, int mode, + int method, Point offset=Point()); + +//! retrieves contours from black-n-white image. +CV_EXPORTS void findContours( InputOutputArray image, OutputArrayOfArrays contours, + int mode, int method, Point offset=Point()); + +//! draws contours in the image +CV_EXPORTS_W void drawContours( InputOutputArray image, InputArrayOfArrays contours, + int contourIdx, const Scalar& color, + int thickness=1, int lineType=8, + InputArray hierarchy=noArray(), + int maxLevel=INT_MAX, Point offset=Point() ); + +//! approximates contour or a curve using Douglas-Peucker algorithm +CV_EXPORTS_W void approxPolyDP( InputArray curve, + OutputArray approxCurve, + double epsilon, bool closed ); + +//! computes the contour perimeter (closed=true) or a curve length +CV_EXPORTS_W double arcLength( InputArray curve, bool closed ); +//! computes the bounding rectangle for a contour +CV_EXPORTS_W Rect boundingRect( InputArray points ); +//! computes the contour area +CV_EXPORTS_W double contourArea( InputArray contour, bool oriented=false ); +//! computes the minimal rotated rectangle for a set of points +CV_EXPORTS_W RotatedRect minAreaRect( InputArray points ); +//! computes the minimal enclosing circle for a set of points +CV_EXPORTS_W void minEnclosingCircle( InputArray points, + CV_OUT Point2f& center, CV_OUT float& radius ); +//! matches two contours using one of the available algorithms +CV_EXPORTS_W double matchShapes( InputArray contour1, InputArray contour2, + int method, double parameter ); +//! computes convex hull for a set of 2D points. +CV_EXPORTS_W void convexHull( InputArray points, OutputArray hull, + bool clockwise=false, bool returnPoints=true ); + +//! returns true iff the contour is convex. Does not support contours with self-intersection +CV_EXPORTS_W bool isContourConvex( InputArray contour ); + +//! fits ellipse to the set of 2D points +CV_EXPORTS_W RotatedRect fitEllipse( InputArray points ); + +//! fits line to the set of 2D points using M-estimator algorithm +CV_EXPORTS_W void fitLine( InputArray points, OutputArray line, int distType, + double param, double reps, double aeps ); +//! checks if the point is inside the contour. Optionally computes the signed distance from the point to the contour boundary +CV_EXPORTS_W double pointPolygonTest( InputArray contour, Point2f pt, bool measureDist ); + + +class CV_EXPORTS_W Subdiv2D +{ +public: + enum + { + PTLOC_ERROR = -2, + PTLOC_OUTSIDE_RECT = -1, + PTLOC_INSIDE = 0, + PTLOC_VERTEX = 1, + PTLOC_ON_EDGE = 2 + }; + + enum + { + NEXT_AROUND_ORG = 0x00, + NEXT_AROUND_DST = 0x22, + PREV_AROUND_ORG = 0x11, + PREV_AROUND_DST = 0x33, + NEXT_AROUND_LEFT = 0x13, + NEXT_AROUND_RIGHT = 0x31, + PREV_AROUND_LEFT = 0x20, + PREV_AROUND_RIGHT = 0x02 + }; + + CV_WRAP Subdiv2D(); + CV_WRAP Subdiv2D(Rect rect); + CV_WRAP void initDelaunay(Rect rect); + + CV_WRAP int insert(Point2f pt); + CV_WRAP void insert(const vector& ptvec); + CV_WRAP int locate(Point2f pt, CV_OUT int& edge, CV_OUT int& vertex); + + CV_WRAP int findNearest(Point2f pt, CV_OUT Point2f* nearestPt=0); + CV_WRAP void getEdgeList(CV_OUT vector& edgeList) const; + CV_WRAP void getTriangleList(CV_OUT vector& triangleList) const; + CV_WRAP void getVoronoiFacetList(const vector& idx, CV_OUT vector >& facetList, + CV_OUT vector& facetCenters); + + CV_WRAP Point2f getVertex(int vertex, CV_OUT int* firstEdge=0) const; + + CV_WRAP int getEdge( int edge, int nextEdgeType ) const; + CV_WRAP int nextEdge(int edge) const; + CV_WRAP int rotateEdge(int edge, int rotate) const; + CV_WRAP int symEdge(int edge) const; + CV_WRAP int edgeOrg(int edge, CV_OUT Point2f* orgpt=0) const; + CV_WRAP int edgeDst(int edge, CV_OUT Point2f* dstpt=0) const; + +protected: + int newEdge(); + void deleteEdge(int edge); + int newPoint(Point2f pt, bool isvirtual, int firstEdge=0); + void deletePoint(int vtx); + void setEdgePoints( int edge, int orgPt, int dstPt ); + void splice( int edgeA, int edgeB ); + int connectEdges( int edgeA, int edgeB ); + void swapEdges( int edge ); + int isRightOf(Point2f pt, int edge) const; + void calcVoronoi(); + void clearVoronoi(); + void check() const; + + struct CV_EXPORTS Vertex + { + Vertex(); + Vertex(Point2f pt, bool _isvirtual, int _firstEdge=0); + bool isvirtual() const; + bool isfree() const; + int firstEdge; + int type; + Point2f pt; + }; + struct CV_EXPORTS QuadEdge + { + QuadEdge(); + QuadEdge(int edgeidx); + bool isfree() const; + int next[4]; + int pt[4]; + }; + + vector vtx; + vector qedges; + int freeQEdge; + int freePoint; + bool validGeometry; + + int recentEdge; + Point2f topLeft; + Point2f bottomRight; +}; + +} + +// 2009-01-12, Xavier Delacour + +struct lsh_hash { + int h1, h2; +}; + +struct CvLSHOperations +{ + virtual ~CvLSHOperations() {} + + virtual int vector_add(const void* data) = 0; + virtual void vector_remove(int i) = 0; + virtual const void* vector_lookup(int i) = 0; + virtual void vector_reserve(int n) = 0; + virtual unsigned int vector_count() = 0; + + virtual void hash_insert(lsh_hash h, int l, int i) = 0; + virtual void hash_remove(lsh_hash h, int l, int i) = 0; + virtual int hash_lookup(lsh_hash h, int l, int* ret_i, int ret_i_max) = 0; +}; + +#endif /* __cplusplus */ + +#endif + +/* End of file. */ diff --git a/opencv2/imgproc/imgproc_c.h b/opencv2/imgproc/imgproc_c.h new file mode 100644 index 0000000..b845e1c --- /dev/null +++ b/opencv2/imgproc/imgproc_c.h @@ -0,0 +1,783 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_IMGPROC_C_H__ +#define __OPENCV_IMGPROC_IMGPROC_C_H__ + +#include "opencv2/core/core_c.h" +#include "opencv2/imgproc/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/*********************** Background statistics accumulation *****************************/ + +/* Adds image to accumulator */ +CVAPI(void) cvAcc( const CvArr* image, CvArr* sum, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds squared image to accumulator */ +CVAPI(void) cvSquareAcc( const CvArr* image, CvArr* sqsum, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds a product of two images to accumulator */ +CVAPI(void) cvMultiplyAcc( const CvArr* image1, const CvArr* image2, CvArr* acc, + const CvArr* mask CV_DEFAULT(NULL) ); + +/* Adds image to accumulator with weights: acc = acc*(1-alpha) + image*alpha */ +CVAPI(void) cvRunningAvg( const CvArr* image, CvArr* acc, double alpha, + const CvArr* mask CV_DEFAULT(NULL) ); + +/****************************************************************************************\ +* Image Processing * +\****************************************************************************************/ + +/* Copies source 2D array inside of the larger destination array and + makes a border of the specified type (IPL_BORDER_*) around the copied area. */ +CVAPI(void) cvCopyMakeBorder( const CvArr* src, CvArr* dst, CvPoint offset, + int bordertype, CvScalar value CV_DEFAULT(cvScalarAll(0))); + +/* Smoothes array (removes noise) */ +CVAPI(void) cvSmooth( const CvArr* src, CvArr* dst, + int smoothtype CV_DEFAULT(CV_GAUSSIAN), + int size1 CV_DEFAULT(3), + int size2 CV_DEFAULT(0), + double sigma1 CV_DEFAULT(0), + double sigma2 CV_DEFAULT(0)); + +/* Convolves the image with the kernel */ +CVAPI(void) cvFilter2D( const CvArr* src, CvArr* dst, const CvMat* kernel, + CvPoint anchor CV_DEFAULT(cvPoint(-1,-1))); + +/* Finds integral image: SUM(X,Y) = sum(xnext[(edge + (int)type) & 3]; + return (edge & ~3) + ((edge + ((int)type >> 4)) & 3); +} + + +CV_INLINE CvSubdiv2DPoint* cvSubdiv2DEdgeOrg( CvSubdiv2DEdge edge ) +{ + CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3); + return (CvSubdiv2DPoint*)e->pt[edge & 3]; +} + + +CV_INLINE CvSubdiv2DPoint* cvSubdiv2DEdgeDst( CvSubdiv2DEdge edge ) +{ + CvQuadEdge2D* e = (CvQuadEdge2D*)(edge & ~3); + return (CvSubdiv2DPoint*)e->pt[(edge + 2) & 3]; +} + + +CV_INLINE double cvTriangleArea( CvPoint2D32f a, CvPoint2D32f b, CvPoint2D32f c ) +{ + return ((double)b.x - a.x) * ((double)c.y - a.y) - ((double)b.y - a.y) * ((double)c.x - a.x); +} + + +/****************************************************************************************\ +* Contour Processing and Shape Analysis * +\****************************************************************************************/ + +/* Approximates a single polygonal curve (contour) or + a tree of polygonal curves (contours) */ +CVAPI(CvSeq*) cvApproxPoly( const void* src_seq, + int header_size, CvMemStorage* storage, + int method, double parameter, + int parameter2 CV_DEFAULT(0)); + +/* Calculates perimeter of a contour or length of a part of contour */ +CVAPI(double) cvArcLength( const void* curve, + CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ), + int is_closed CV_DEFAULT(-1)); + +CV_INLINE double cvContourPerimeter( const void* contour ) +{ + return cvArcLength( contour, CV_WHOLE_SEQ, 1 ); +} + + +/* Calculates contour boundning rectangle (update=1) or + just retrieves pre-calculated rectangle (update=0) */ +CVAPI(CvRect) cvBoundingRect( CvArr* points, int update CV_DEFAULT(0) ); + +/* Calculates area of a contour or contour segment */ +CVAPI(double) cvContourArea( const CvArr* contour, + CvSlice slice CV_DEFAULT(CV_WHOLE_SEQ), + int oriented CV_DEFAULT(0)); + +/* Finds minimum area rotated rectangle bounding a set of points */ +CVAPI(CvBox2D) cvMinAreaRect2( const CvArr* points, + CvMemStorage* storage CV_DEFAULT(NULL)); + +/* Finds minimum enclosing circle for a set of points */ +CVAPI(int) cvMinEnclosingCircle( const CvArr* points, + CvPoint2D32f* center, float* radius ); + +/* Compares two contours by matching their moments */ +CVAPI(double) cvMatchShapes( const void* object1, const void* object2, + int method, double parameter CV_DEFAULT(0)); + +/* Calculates exact convex hull of 2d point set */ +CVAPI(CvSeq*) cvConvexHull2( const CvArr* input, + void* hull_storage CV_DEFAULT(NULL), + int orientation CV_DEFAULT(CV_CLOCKWISE), + int return_points CV_DEFAULT(0)); + +/* Checks whether the contour is convex or not (returns 1 if convex, 0 if not) */ +CVAPI(int) cvCheckContourConvexity( const CvArr* contour ); + + +/* Finds convexity defects for the contour */ +CVAPI(CvSeq*) cvConvexityDefects( const CvArr* contour, const CvArr* convexhull, + CvMemStorage* storage CV_DEFAULT(NULL)); + +/* Fits ellipse into a set of 2d points */ +CVAPI(CvBox2D) cvFitEllipse2( const CvArr* points ); + +/* Finds minimum rectangle containing two given rectangles */ +CVAPI(CvRect) cvMaxRect( const CvRect* rect1, const CvRect* rect2 ); + +/* Finds coordinates of the box vertices */ +CVAPI(void) cvBoxPoints( CvBox2D box, CvPoint2D32f pt[4] ); + +/* Initializes sequence header for a matrix (column or row vector) of points - + a wrapper for cvMakeSeqHeaderForArray (it does not initialize bounding rectangle!!!) */ +CVAPI(CvSeq*) cvPointSeqFromMat( int seq_kind, const CvArr* mat, + CvContour* contour_header, + CvSeqBlock* block ); + +/* Checks whether the point is inside polygon, outside, on an edge (at a vertex). + Returns positive, negative or zero value, correspondingly. + Optionally, measures a signed distance between + the point and the nearest polygon edge (measure_dist=1) */ +CVAPI(double) cvPointPolygonTest( const CvArr* contour, + CvPoint2D32f pt, int measure_dist ); + +/****************************************************************************************\ +* Histogram functions * +\****************************************************************************************/ + +/* Creates new histogram */ +CVAPI(CvHistogram*) cvCreateHist( int dims, int* sizes, int type, + float** ranges CV_DEFAULT(NULL), + int uniform CV_DEFAULT(1)); + +/* Assignes histogram bin ranges */ +CVAPI(void) cvSetHistBinRanges( CvHistogram* hist, float** ranges, + int uniform CV_DEFAULT(1)); + +/* Creates histogram header for array */ +CVAPI(CvHistogram*) cvMakeHistHeaderForArray( + int dims, int* sizes, CvHistogram* hist, + float* data, float** ranges CV_DEFAULT(NULL), + int uniform CV_DEFAULT(1)); + +/* Releases histogram */ +CVAPI(void) cvReleaseHist( CvHistogram** hist ); + +/* Clears all the histogram bins */ +CVAPI(void) cvClearHist( CvHistogram* hist ); + +/* Finds indices and values of minimum and maximum histogram bins */ +CVAPI(void) cvGetMinMaxHistValue( const CvHistogram* hist, + float* min_value, float* max_value, + int* min_idx CV_DEFAULT(NULL), + int* max_idx CV_DEFAULT(NULL)); + + +/* Normalizes histogram by dividing all bins by sum of the bins, multiplied by . + After that sum of histogram bins is equal to */ +CVAPI(void) cvNormalizeHist( CvHistogram* hist, double factor ); + + +/* Clear all histogram bins that are below the threshold */ +CVAPI(void) cvThreshHist( CvHistogram* hist, double threshold ); + + +/* Compares two histogram */ +CVAPI(double) cvCompareHist( const CvHistogram* hist1, + const CvHistogram* hist2, + int method); + +/* Copies one histogram to another. Destination histogram is created if + the destination pointer is NULL */ +CVAPI(void) cvCopyHist( const CvHistogram* src, CvHistogram** dst ); + + +/* Calculates bayesian probabilistic histograms + (each or src and dst is an array of histograms */ +CVAPI(void) cvCalcBayesianProb( CvHistogram** src, int number, + CvHistogram** dst); + +/* Calculates array histogram */ +CVAPI(void) cvCalcArrHist( CvArr** arr, CvHistogram* hist, + int accumulate CV_DEFAULT(0), + const CvArr* mask CV_DEFAULT(NULL) ); + +CV_INLINE void cvCalcHist( IplImage** image, CvHistogram* hist, + int accumulate CV_DEFAULT(0), + const CvArr* mask CV_DEFAULT(NULL) ) +{ + cvCalcArrHist( (CvArr**)image, hist, accumulate, mask ); +} + +/* Calculates back project */ +CVAPI(void) cvCalcArrBackProject( CvArr** image, CvArr* dst, + const CvHistogram* hist ); +#define cvCalcBackProject(image, dst, hist) cvCalcArrBackProject((CvArr**)image, dst, hist) + + +/* Does some sort of template matching but compares histograms of + template and each window location */ +CVAPI(void) cvCalcArrBackProjectPatch( CvArr** image, CvArr* dst, CvSize range, + CvHistogram* hist, int method, + double factor ); +#define cvCalcBackProjectPatch( image, dst, range, hist, method, factor ) \ + cvCalcArrBackProjectPatch( (CvArr**)image, dst, range, hist, method, factor ) + + +/* calculates probabilistic density (divides one histogram by another) */ +CVAPI(void) cvCalcProbDensity( const CvHistogram* hist1, const CvHistogram* hist2, + CvHistogram* dst_hist, double scale CV_DEFAULT(255) ); + +/* equalizes histogram of 8-bit single-channel image */ +CVAPI(void) cvEqualizeHist( const CvArr* src, CvArr* dst ); + + +/* Applies distance transform to binary image */ +CVAPI(void) cvDistTransform( const CvArr* src, CvArr* dst, + int distance_type CV_DEFAULT(CV_DIST_L2), + int mask_size CV_DEFAULT(3), + const float* mask CV_DEFAULT(NULL), + CvArr* labels CV_DEFAULT(NULL)); + + +/* Applies fixed-level threshold to grayscale image. + This is a basic operation applied before retrieving contours */ +CVAPI(double) cvThreshold( const CvArr* src, CvArr* dst, + double threshold, double max_value, + int threshold_type ); + +/* Applies adaptive threshold to grayscale image. + The two parameters for methods CV_ADAPTIVE_THRESH_MEAN_C and + CV_ADAPTIVE_THRESH_GAUSSIAN_C are: + neighborhood size (3, 5, 7 etc.), + and a constant subtracted from mean (...,-3,-2,-1,0,1,2,3,...) */ +CVAPI(void) cvAdaptiveThreshold( const CvArr* src, CvArr* dst, double max_value, + int adaptive_method CV_DEFAULT(CV_ADAPTIVE_THRESH_MEAN_C), + int threshold_type CV_DEFAULT(CV_THRESH_BINARY), + int block_size CV_DEFAULT(3), + double param1 CV_DEFAULT(5)); + +/* Fills the connected component until the color difference gets large enough */ +CVAPI(void) cvFloodFill( CvArr* image, CvPoint seed_point, + CvScalar new_val, CvScalar lo_diff CV_DEFAULT(cvScalarAll(0)), + CvScalar up_diff CV_DEFAULT(cvScalarAll(0)), + CvConnectedComp* comp CV_DEFAULT(NULL), + int flags CV_DEFAULT(4), + CvArr* mask CV_DEFAULT(NULL)); + +/****************************************************************************************\ +* Feature detection * +\****************************************************************************************/ + +/* Runs canny edge detector */ +CVAPI(void) cvCanny( const CvArr* image, CvArr* edges, double threshold1, + double threshold2, int aperture_size CV_DEFAULT(3) ); + +/* Calculates constraint image for corner detection + Dx^2 * Dyy + Dxx * Dy^2 - 2 * Dx * Dy * Dxy. + Applying threshold to the result gives coordinates of corners */ +CVAPI(void) cvPreCornerDetect( const CvArr* image, CvArr* corners, + int aperture_size CV_DEFAULT(3) ); + +/* Calculates eigen values and vectors of 2x2 + gradient covariation matrix at every image pixel */ +CVAPI(void) cvCornerEigenValsAndVecs( const CvArr* image, CvArr* eigenvv, + int block_size, int aperture_size CV_DEFAULT(3) ); + +/* Calculates minimal eigenvalue for 2x2 gradient covariation matrix at + every image pixel */ +CVAPI(void) cvCornerMinEigenVal( const CvArr* image, CvArr* eigenval, + int block_size, int aperture_size CV_DEFAULT(3) ); + +/* Harris corner detector: + Calculates det(M) - k*(trace(M)^2), where M is 2x2 gradient covariation matrix for each pixel */ +CVAPI(void) cvCornerHarris( const CvArr* image, CvArr* harris_responce, + int block_size, int aperture_size CV_DEFAULT(3), + double k CV_DEFAULT(0.04) ); + +/* Adjust corner position using some sort of gradient search */ +CVAPI(void) cvFindCornerSubPix( const CvArr* image, CvPoint2D32f* corners, + int count, CvSize win, CvSize zero_zone, + CvTermCriteria criteria ); + +/* Finds a sparse set of points within the selected region + that seem to be easy to track */ +CVAPI(void) cvGoodFeaturesToTrack( const CvArr* image, CvArr* eig_image, + CvArr* temp_image, CvPoint2D32f* corners, + int* corner_count, double quality_level, + double min_distance, + const CvArr* mask CV_DEFAULT(NULL), + int block_size CV_DEFAULT(3), + int use_harris CV_DEFAULT(0), + double k CV_DEFAULT(0.04) ); + +/* Finds lines on binary image using one of several methods. + line_storage is either memory storage or 1 x CvMat, its + number of columns is changed by the function. + method is one of CV_HOUGH_*; + rho, theta and threshold are used for each of those methods; + param1 ~ line length, param2 ~ line gap - for probabilistic, + param1 ~ srn, param2 ~ stn - for multi-scale */ +CVAPI(CvSeq*) cvHoughLines2( CvArr* image, void* line_storage, int method, + double rho, double theta, int threshold, + double param1 CV_DEFAULT(0), double param2 CV_DEFAULT(0)); + +/* Finds circles in the image */ +CVAPI(CvSeq*) cvHoughCircles( CvArr* image, void* circle_storage, + int method, double dp, double min_dist, + double param1 CV_DEFAULT(100), + double param2 CV_DEFAULT(100), + int min_radius CV_DEFAULT(0), + int max_radius CV_DEFAULT(0)); + +/* Fits a line into set of 2d or 3d points in a robust way (M-estimator technique) */ +CVAPI(void) cvFitLine( const CvArr* points, int dist_type, double param, + double reps, double aeps, float* line ); + + +/* Constructs kd-tree from set of feature descriptors */ +CVAPI(struct CvFeatureTree*) cvCreateKDTree(CvMat* desc); + +/* Constructs spill-tree from set of feature descriptors */ +CVAPI(struct CvFeatureTree*) cvCreateSpillTree( const CvMat* raw_data, + const int naive CV_DEFAULT(50), + const double rho CV_DEFAULT(.7), + const double tau CV_DEFAULT(.1) ); + +/* Release feature tree */ +CVAPI(void) cvReleaseFeatureTree(struct CvFeatureTree* tr); + +/* Searches feature tree for k nearest neighbors of given reference points, + searching (in case of kd-tree/bbf) at most emax leaves. */ +CVAPI(void) cvFindFeatures(struct CvFeatureTree* tr, const CvMat* query_points, + CvMat* indices, CvMat* dist, int k, int emax CV_DEFAULT(20)); + +/* Search feature tree for all points that are inlier to given rect region. + Only implemented for kd trees */ +CVAPI(int) cvFindFeaturesBoxed(struct CvFeatureTree* tr, + CvMat* bounds_min, CvMat* bounds_max, + CvMat* out_indices); + + +/* Construct a Locality Sensitive Hash (LSH) table, for indexing d-dimensional vectors of + given type. Vectors will be hashed L times with k-dimensional p-stable (p=2) functions. */ +CVAPI(struct CvLSH*) cvCreateLSH(struct CvLSHOperations* ops, int d, + int L CV_DEFAULT(10), int k CV_DEFAULT(10), + int type CV_DEFAULT(CV_64FC1), double r CV_DEFAULT(4), + int64 seed CV_DEFAULT(-1)); + +/* Construct in-memory LSH table, with n bins. */ +CVAPI(struct CvLSH*) cvCreateMemoryLSH(int d, int n, int L CV_DEFAULT(10), int k CV_DEFAULT(10), + int type CV_DEFAULT(CV_64FC1), double r CV_DEFAULT(4), + int64 seed CV_DEFAULT(-1)); + +/* Free the given LSH structure. */ +CVAPI(void) cvReleaseLSH(struct CvLSH** lsh); + +/* Return the number of vectors in the LSH. */ +CVAPI(unsigned int) LSHSize(struct CvLSH* lsh); + +/* Add vectors to the LSH structure, optionally returning indices. */ +CVAPI(void) cvLSHAdd(struct CvLSH* lsh, const CvMat* data, CvMat* indices CV_DEFAULT(0)); + +/* Remove vectors from LSH, as addressed by given indices. */ +CVAPI(void) cvLSHRemove(struct CvLSH* lsh, const CvMat* indices); + +/* Query the LSH n times for at most k nearest points; data is n x d, + indices and dist are n x k. At most emax stored points will be accessed. */ +CVAPI(void) cvLSHQuery(struct CvLSH* lsh, const CvMat* query_points, + CvMat* indices, CvMat* dist, int k, int emax); + + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/opencv2/imgproc/types_c.h b/opencv2/imgproc/types_c.h new file mode 100644 index 0000000..5a984fd --- /dev/null +++ b/opencv2/imgproc/types_c.h @@ -0,0 +1,538 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_IMGPROC_TYPES_C_H__ +#define __OPENCV_IMGPROC_TYPES_C_H__ + +#include "opencv2/core/core_c.h" +#include "opencv2/imgproc/types_c.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* Connected component structure */ +typedef struct CvConnectedComp +{ + double area; /* area of the connected component */ + CvScalar value; /* average color of the connected component */ + CvRect rect; /* ROI of the component */ + CvSeq* contour; /* optional component boundary + (the contour might have child contours corresponding to the holes)*/ +} +CvConnectedComp; + +/* Image smooth methods */ +enum +{ + CV_BLUR_NO_SCALE =0, + CV_BLUR =1, + CV_GAUSSIAN =2, + CV_MEDIAN =3, + CV_BILATERAL =4 +}; + +/* Filters used in pyramid decomposition */ +enum +{ + CV_GAUSSIAN_5x5 = 7 +}; + +/* Inpainting algorithms */ +enum +{ + CV_INPAINT_NS =0, + CV_INPAINT_TELEA =1 +}; + +/* Special filters */ +enum +{ + CV_SCHARR =-1, + CV_MAX_SOBEL_KSIZE =7 +}; + +/* Constants for color conversion */ +enum +{ + CV_BGR2BGRA =0, + CV_RGB2RGBA =CV_BGR2BGRA, + + CV_BGRA2BGR =1, + CV_RGBA2RGB =CV_BGRA2BGR, + + CV_BGR2RGBA =2, + CV_RGB2BGRA =CV_BGR2RGBA, + + CV_RGBA2BGR =3, + CV_BGRA2RGB =CV_RGBA2BGR, + + CV_BGR2RGB =4, + CV_RGB2BGR =CV_BGR2RGB, + + CV_BGRA2RGBA =5, + CV_RGBA2BGRA =CV_BGRA2RGBA, + + CV_BGR2GRAY =6, + CV_RGB2GRAY =7, + CV_GRAY2BGR =8, + CV_GRAY2RGB =CV_GRAY2BGR, + CV_GRAY2BGRA =9, + CV_GRAY2RGBA =CV_GRAY2BGRA, + CV_BGRA2GRAY =10, + CV_RGBA2GRAY =11, + + CV_BGR2BGR565 =12, + CV_RGB2BGR565 =13, + CV_BGR5652BGR =14, + CV_BGR5652RGB =15, + CV_BGRA2BGR565 =16, + CV_RGBA2BGR565 =17, + CV_BGR5652BGRA =18, + CV_BGR5652RGBA =19, + + CV_GRAY2BGR565 =20, + CV_BGR5652GRAY =21, + + CV_BGR2BGR555 =22, + CV_RGB2BGR555 =23, + CV_BGR5552BGR =24, + CV_BGR5552RGB =25, + CV_BGRA2BGR555 =26, + CV_RGBA2BGR555 =27, + CV_BGR5552BGRA =28, + CV_BGR5552RGBA =29, + + CV_GRAY2BGR555 =30, + CV_BGR5552GRAY =31, + + CV_BGR2XYZ =32, + CV_RGB2XYZ =33, + CV_XYZ2BGR =34, + CV_XYZ2RGB =35, + + CV_BGR2YCrCb =36, + CV_RGB2YCrCb =37, + CV_YCrCb2BGR =38, + CV_YCrCb2RGB =39, + + CV_BGR2HSV =40, + CV_RGB2HSV =41, + + CV_BGR2Lab =44, + CV_RGB2Lab =45, + + CV_BayerBG2BGR =46, + CV_BayerGB2BGR =47, + CV_BayerRG2BGR =48, + CV_BayerGR2BGR =49, + + CV_BayerBG2RGB =CV_BayerRG2BGR, + CV_BayerGB2RGB =CV_BayerGR2BGR, + CV_BayerRG2RGB =CV_BayerBG2BGR, + CV_BayerGR2RGB =CV_BayerGB2BGR, + + CV_BGR2Luv =50, + CV_RGB2Luv =51, + CV_BGR2HLS =52, + CV_RGB2HLS =53, + + CV_HSV2BGR =54, + CV_HSV2RGB =55, + + CV_Lab2BGR =56, + CV_Lab2RGB =57, + CV_Luv2BGR =58, + CV_Luv2RGB =59, + CV_HLS2BGR =60, + CV_HLS2RGB =61, + + CV_BayerBG2BGR_VNG =62, + CV_BayerGB2BGR_VNG =63, + CV_BayerRG2BGR_VNG =64, + CV_BayerGR2BGR_VNG =65, + + CV_BayerBG2RGB_VNG =CV_BayerRG2BGR_VNG, + CV_BayerGB2RGB_VNG =CV_BayerGR2BGR_VNG, + CV_BayerRG2RGB_VNG =CV_BayerBG2BGR_VNG, + CV_BayerGR2RGB_VNG =CV_BayerGB2BGR_VNG, + + CV_BGR2HSV_FULL = 66, + CV_RGB2HSV_FULL = 67, + CV_BGR2HLS_FULL = 68, + CV_RGB2HLS_FULL = 69, + + CV_HSV2BGR_FULL = 70, + CV_HSV2RGB_FULL = 71, + CV_HLS2BGR_FULL = 72, + CV_HLS2RGB_FULL = 73, + + CV_LBGR2Lab = 74, + CV_LRGB2Lab = 75, + CV_LBGR2Luv = 76, + CV_LRGB2Luv = 77, + + CV_Lab2LBGR = 78, + CV_Lab2LRGB = 79, + CV_Luv2LBGR = 80, + CV_Luv2LRGB = 81, + + CV_BGR2YUV = 82, + CV_RGB2YUV = 83, + CV_YUV2BGR = 84, + CV_YUV2RGB = 85, + + CV_BayerBG2GRAY = 86, + CV_BayerGB2GRAY = 87, + CV_BayerRG2GRAY = 88, + CV_BayerGR2GRAY = 89, + + CV_YUV420i2RGB = 90, + CV_YUV420i2BGR = 91, + CV_YUV420sp2RGB = 92, + CV_YUV420sp2BGR = 93, + + CV_COLORCVT_MAX =100 +}; + + +/* Sub-pixel interpolation methods */ +enum +{ + CV_INTER_NN =0, + CV_INTER_LINEAR =1, + CV_INTER_CUBIC =2, + CV_INTER_AREA =3, + CV_INTER_LANCZOS4 =4 +}; + +/* ... and other image warping flags */ +enum +{ + CV_WARP_FILL_OUTLIERS =8, + CV_WARP_INVERSE_MAP =16 +}; + +/* Shapes of a structuring element for morphological operations */ +enum +{ + CV_SHAPE_RECT =0, + CV_SHAPE_CROSS =1, + CV_SHAPE_ELLIPSE =2, + CV_SHAPE_CUSTOM =100 +}; + +/* Morphological operations */ +enum +{ + CV_MOP_ERODE =0, + CV_MOP_DILATE =1, + CV_MOP_OPEN =2, + CV_MOP_CLOSE =3, + CV_MOP_GRADIENT =4, + CV_MOP_TOPHAT =5, + CV_MOP_BLACKHAT =6 +}; + +/* Spatial and central moments */ +typedef struct CvMoments +{ + double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; /* spatial moments */ + double mu20, mu11, mu02, mu30, mu21, mu12, mu03; /* central moments */ + double inv_sqrt_m00; /* m00 != 0 ? 1/sqrt(m00) : 0 */ +} +CvMoments; + +/* Hu invariants */ +typedef struct CvHuMoments +{ + double hu1, hu2, hu3, hu4, hu5, hu6, hu7; /* Hu invariants */ +} +CvHuMoments; + +/* Template matching methods */ +enum +{ + CV_TM_SQDIFF =0, + CV_TM_SQDIFF_NORMED =1, + CV_TM_CCORR =2, + CV_TM_CCORR_NORMED =3, + CV_TM_CCOEFF =4, + CV_TM_CCOEFF_NORMED =5 +}; + +typedef float (CV_CDECL * CvDistanceFunction)( const float* a, const float* b, void* user_param ); + +/* Contour retrieval modes */ +enum +{ + CV_RETR_EXTERNAL=0, + CV_RETR_LIST=1, + CV_RETR_CCOMP=2, + CV_RETR_TREE=3 +}; + +/* Contour approximation methods */ +enum +{ + CV_CHAIN_CODE=0, + CV_CHAIN_APPROX_NONE=1, + CV_CHAIN_APPROX_SIMPLE=2, + CV_CHAIN_APPROX_TC89_L1=3, + CV_CHAIN_APPROX_TC89_KCOS=4, + CV_LINK_RUNS=5 +}; + +/* +Internal structure that is used for sequental retrieving contours from the image. +It supports both hierarchical and plane variants of Suzuki algorithm. +*/ +typedef struct _CvContourScanner* CvContourScanner; + +/* Freeman chain reader state */ +typedef struct CvChainPtReader +{ + CV_SEQ_READER_FIELDS() + char code; + CvPoint pt; + schar deltas[8][2]; +} +CvChainPtReader; + +/* initializes 8-element array for fast access to 3x3 neighborhood of a pixel */ +#define CV_INIT_3X3_DELTAS( deltas, step, nch ) \ + ((deltas)[0] = (nch), (deltas)[1] = -(step) + (nch), \ + (deltas)[2] = -(step), (deltas)[3] = -(step) - (nch), \ + (deltas)[4] = -(nch), (deltas)[5] = (step) - (nch), \ + (deltas)[6] = (step), (deltas)[7] = (step) + (nch)) + + +/****************************************************************************************\ +* Planar subdivisions * +\****************************************************************************************/ + +typedef size_t CvSubdiv2DEdge; + +#define CV_QUADEDGE2D_FIELDS() \ + int flags; \ + struct CvSubdiv2DPoint* pt[4]; \ + CvSubdiv2DEdge next[4]; + +#define CV_SUBDIV2D_POINT_FIELDS()\ + int flags; \ + CvSubdiv2DEdge first; \ + CvPoint2D32f pt; \ + int id; + +#define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30) + +typedef struct CvQuadEdge2D +{ + CV_QUADEDGE2D_FIELDS() +} +CvQuadEdge2D; + +typedef struct CvSubdiv2DPoint +{ + CV_SUBDIV2D_POINT_FIELDS() +} +CvSubdiv2DPoint; + +#define CV_SUBDIV2D_FIELDS() \ + CV_GRAPH_FIELDS() \ + int quad_edges; \ + int is_geometry_valid; \ + CvSubdiv2DEdge recent_edge; \ + CvPoint2D32f topleft; \ + CvPoint2D32f bottomright; + +typedef struct CvSubdiv2D +{ + CV_SUBDIV2D_FIELDS() +} +CvSubdiv2D; + + +typedef enum CvSubdiv2DPointLocation +{ + CV_PTLOC_ERROR = -2, + CV_PTLOC_OUTSIDE_RECT = -1, + CV_PTLOC_INSIDE = 0, + CV_PTLOC_VERTEX = 1, + CV_PTLOC_ON_EDGE = 2 +} +CvSubdiv2DPointLocation; + +typedef enum CvNextEdgeType +{ + CV_NEXT_AROUND_ORG = 0x00, + CV_NEXT_AROUND_DST = 0x22, + CV_PREV_AROUND_ORG = 0x11, + CV_PREV_AROUND_DST = 0x33, + CV_NEXT_AROUND_LEFT = 0x13, + CV_NEXT_AROUND_RIGHT = 0x31, + CV_PREV_AROUND_LEFT = 0x20, + CV_PREV_AROUND_RIGHT = 0x02 +} +CvNextEdgeType; + +/* get the next edge with the same origin point (counterwise) */ +#define CV_SUBDIV2D_NEXT_EDGE( edge ) (((CvQuadEdge2D*)((edge) & ~3))->next[(edge)&3]) + + +/* Contour approximation algorithms */ +enum +{ + CV_POLY_APPROX_DP = 0 +}; + +/* Shape matching methods */ +enum +{ + CV_CONTOURS_MATCH_I1 =1, + CV_CONTOURS_MATCH_I2 =2, + CV_CONTOURS_MATCH_I3 =3 +}; + +/* Shape orientation */ +enum +{ + CV_CLOCKWISE =1, + CV_COUNTER_CLOCKWISE =2 +}; + + +/* Convexity defect */ +typedef struct CvConvexityDefect +{ + CvPoint* start; /* point of the contour where the defect begins */ + CvPoint* end; /* point of the contour where the defect ends */ + CvPoint* depth_point; /* the farthest from the convex hull point within the defect */ + float depth; /* distance between the farthest point and the convex hull */ +} CvConvexityDefect; + + +/* Histogram comparison methods */ +enum +{ + CV_COMP_CORREL =0, + CV_COMP_CHISQR =1, + CV_COMP_INTERSECT =2, + CV_COMP_BHATTACHARYYA =3 +}; + +/* Mask size for distance transform */ +enum +{ + CV_DIST_MASK_3 =3, + CV_DIST_MASK_5 =5, + CV_DIST_MASK_PRECISE =0 +}; + +/* Distance types for Distance Transform and M-estimators */ +enum +{ + CV_DIST_USER =-1, /* User defined distance */ + CV_DIST_L1 =1, /* distance = |x1-x2| + |y1-y2| */ + CV_DIST_L2 =2, /* the simple euclidean distance */ + CV_DIST_C =3, /* distance = max(|x1-x2|,|y1-y2|) */ + CV_DIST_L12 =4, /* L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) */ + CV_DIST_FAIR =5, /* distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 */ + CV_DIST_WELSCH =6, /* distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 */ + CV_DIST_HUBER =7 /* distance = |x| threshold ? max_value : 0 */ + CV_THRESH_BINARY_INV =1, /* value = value > threshold ? 0 : max_value */ + CV_THRESH_TRUNC =2, /* value = value > threshold ? threshold : value */ + CV_THRESH_TOZERO =3, /* value = value > threshold ? value : 0 */ + CV_THRESH_TOZERO_INV =4, /* value = value > threshold ? 0 : value */ + CV_THRESH_MASK =7, + CV_THRESH_OTSU =8 /* use Otsu algorithm to choose the optimal threshold value; + combine the flag with one of the above CV_THRESH_* values */ +}; + +/* Adaptive threshold methods */ +enum +{ + CV_ADAPTIVE_THRESH_MEAN_C =0, + CV_ADAPTIVE_THRESH_GAUSSIAN_C =1 +}; + +/* FloodFill flags */ +enum +{ + CV_FLOODFILL_FIXED_RANGE =(1 << 16), + CV_FLOODFILL_MASK_ONLY =(1 << 17) +}; + + +/* Canny edge detector flags */ +enum +{ + CV_CANNY_L2_GRADIENT =(1 << 31) +}; + +/* Variants of a Hough transform */ +enum +{ + CV_HOUGH_STANDARD =0, + CV_HOUGH_PROBABILISTIC =1, + CV_HOUGH_MULTI_SCALE =2, + CV_HOUGH_GRADIENT =3 +}; + + +/* Fast search data structures */ +struct CvFeatureTree; +struct CvLSH; +struct CvLSHOperations; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/opencv2/opencv.hpp b/opencv2/opencv.hpp new file mode 100644 index 0000000..13ab7de --- /dev/null +++ b/opencv2/opencv.hpp @@ -0,0 +1,60 @@ +/*M/////////////////////////////////////////////////////////////////////////////////////// +// +// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. +// +// By downloading, copying, installing or using the software you agree to this license. +// If you do not agree to this license, do not download, install, +// copy or use the software. +// +// +// License Agreement +// For Open Source Computer Vision Library +// +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2010, Willow Garage Inc., all rights reserved. +// Third party copyrights are property of their respective owners. +// +// Redistribution and use in source and binary forms, with or without modification, +// are permitted provided that the following conditions are met: +// +// * Redistribution's of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// * Redistribution's in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// * The name of the copyright holders may not be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// This software is provided by the copyright holders and contributors "as is" and +// any express or implied warranties, including, but not limited to, the implied +// warranties of merchantability and fitness for a particular purpose are disclaimed. +// In no event shall the Intel Corporation or contributors be liable for any direct, +// indirect, incidental, special, exemplary, or consequential damages +// (including, but not limited to, procurement of substitute goods or services; +// loss of use, data, or profits; or business interruption) however caused +// and on any theory of liability, whether in contract, strict liability, +// or tort (including negligence or otherwise) arising in any way out of +// the use of this software, even if advised of the possibility of such damage. +// +//M*/ + +#ifndef __OPENCV_ALL_HPP__ +#define __OPENCV_ALL_HPP__ + +#include "opencv2/core/core_c.h" +#include "opencv2/core/core.hpp" +//#include "opencv2/flann/miniflann.hpp" +#include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/imgproc/imgproc.hpp" +//#include "opencv2/video/video.hpp" +//#include "opencv2/features2d/features2d.hpp" +//#include "opencv2/objdetect/objdetect.hpp" +//#include "opencv2/calib3d/calib3d.hpp" +//#include "opencv2/ml/ml.hpp" +//#include "opencv2/highgui/highgui_c.h" +//#include "opencv2/highgui/highgui.hpp" +//#include "opencv2/contrib/contrib.hpp" + +#endif diff --git a/readme.txt b/readme.txt new file mode 100644 index 0000000..eb5baf2 --- /dev/null +++ b/readme.txt @@ -0,0 +1,60 @@ + +This project shows how to use OpenCV within Marmalade. It uses OpenCV 2.3.1 so you might need to re-import the OpenCV files. All license conditions are the same as in OpenCV. + +This is what we did: + +1. add all OpenCV source file to the mkb file. Add also package and subproject zlib to mkb file + +2. add options {enable-exeptions } to mkb file + +3. change cstdlib file (the one from marmalade installation!) to add abs definition for int and short +add the following (XXX) after +# if !defined ( _STLP_LABS ) +inline long abs(long __x) { return _STLP_VENDOR_CSTD::labs(__x); } +//XXXXXXXXXXXX +inline int abs(int __x) { return _STLP_VENDOR_CSTD::labs((long)__x); } +inline long abs(short __x) { return _STLP_VENDOR_CSTD::labs((long)__x); } +//XXXXXXXXXXXX +we then copied cstdlib file into include folder + +4. change operations.hpp so that GNU_C will be considered undefined. you can do this, for example, by changing the #ifdef _GNUC_ to #ifdef __GNUC_Lior_ + +5. enlarge stack (preferably memory, too) in the icf file: +[S3E] +MemSize=50485760 +SysStackSize=4000000 # almost 4mb stack size + +6. for x86 compilations remark __cpuid at system.cpp (search for Lior) + +that's it - enjoy! + +-- +Lior + +www.lgorithms.com + +Global setup: + Set up git + git config --global user.name "Your Name" + git config --global user.email lmessinger@lgorithms.com + + Next steps: + mkdir MarmaladeCV + cd MarmaladeCV + git init + touch README + git add README + git commit -m 'first commit' + git remote add origin git@github.com:lmessinger/MarmaladeCV.git + git push -u origin master + + Existing Git Repo? + cd existing_git_repo + git remote add origin git@github.com:lmessinger/MarmaladeCV.git + git push -u origin master + + Importing a Subversion Repo? + Check out the guide for step by step instructions. + + When you're done: + Continue \ No newline at end of file diff --git a/source/ExampleMain.cpp b/source/ExampleMain.cpp new file mode 100644 index 0000000..f41a3f1 --- /dev/null +++ b/source/ExampleMain.cpp @@ -0,0 +1,188 @@ +/*C-ScanMain.cpp + * + */ +#include "ExampleMain.h" +#include +#include +#include +#include +#include "../opencv2/opencv.hpp" +#include "../opencv2/core/core.hpp" + +#include "s3eImagePicker.h" +#include "Iw2D.h" +#include "IwJPEG.h" + +#include "camera.h" + +#define SCREEN_SIZE_X 320.0 +#define SCREEN_SIZE_Y 480.0 + +CIw2DImage* g_2dimage = NULL; +CIwTexture* g_Texture = NULL; + +CIwImage* ConvtImage(CIwImage* image) +{ + CIwImage::Format format = image->GetFormat(); + + int nChannels = image->GetPitch()/image->GetWidth(); + IplImage * colorRes = cvCreateImage( cvSize(image->GetWidth(),image->GetHeight()),IPL_DEPTH_8U, nChannels); + + if (colorRes == NULL) + return false; + + memcpy(colorRes->imageData,image->GetTexels(),colorRes->imageSize); + + IplImage * dstImage = cvCreateImage( cvSize(image->GetWidth(),image->GetHeight()),IPL_DEPTH_8U, 3 ); + //cvCvtColor(colorRes,dstImage, CV_BGR2GRAY); + cvNot(colorRes,dstImage); + // create a new image + uint32 * new_buf = new uint32[ image->GetWidth()* image->GetHeight()]; + memcpy( new_buf, (uchar *)dstImage->imageData, ( image->GetWidth()* image->GetHeight() * nChannels ) ); + + static CIwImage imageDest; + imageDest.SetFormat( format ); + imageDest.SetWidth( image->GetWidth() ); + imageDest.SetHeight( image->GetHeight() ); + imageDest.SetOwnedBuffers( (uint8*)new_buf, NULL ); + g_2dimage = Iw2DCreateImage( imageDest ); + //Iw2DDrawImage( image2D, CIwSVec2(0,0) ); + // delete image2D; + //Texture->CopyFromBuffer(Width, Height, CIwImage::PVRTC_2, Width / 8, texture_data, 0); + + return &imageDest; +} + +void GetScreen(); + +//======================================================= +// Function name : GetScreen +// Description : +// Return type : void +//======================================================= +void GetScreen() +{ + char path[1024]; + CIwTexture* g_Texture = new CIwTexture; + if(!Camera::Instance()->CameraScreenShot(path,1024)) + return; + else + { + s3eDebugOutputString("Ok, jpg pass it:"); + s3eDebugOutputString(path); + void* data = NULL; + int len = 0; + s3eFile *f = 0; + + if (!(f = s3eFileOpen( path, "rb"))) + return; + len = (int)s3eFileGetSize(f); + if (len <= 0) + return; + + data = s3eMalloc(len); + if (!data) + { + s3eFileClose(f); + return; + } + + uint32 rtn = s3eFileRead(data, 1, len, f); + s3eFileClose(f); + + if (rtn != (uint32)len) + { + s3eFree(data); + } + + JPEGTexture((char*) data,len,*g_Texture); + } + + //g_2dimage = Iw2DCreateImage(g_Texture->GetImage()); + + if(g_Texture ) + { + // convert + CIwImage* imgDest = ConvtImage(&g_Texture->GetImage()); + s3eDebugOutputString("Ok, end convert"); + float indexX = (float)s3eSurfaceGetInt(S3E_SURFACE_WIDTH)/(float)g_Texture->GetImage().GetWidth(); + float indexY = (float)s3eSurfaceGetInt(S3E_SURFACE_HEIGHT)/(float)g_Texture->GetImage().GetHeight(); + + } +} + + + + + +//======================================================= +// Function name : MainUpdate +// Description : +// Return type : bool +//======================================================= +bool MainUpdate() +{ + s3eDeviceYield(0); + s3eKeyboardUpdate(); + s3ePointerUpdate(); + + return true; +} + + +//======================================================= +// Function name : MainTerm +// Description : +// Return type : void +//======================================================= + +void MainTerm() +{ + + + IwGxTerminate(); + + +} + + +//======================================================= +// Function name : MainInit +// Description : +// Return type : void +//======================================================= +void MainInit() +{ + // initializing global environments + s3eDebugSetInt(S3E_DEBUG_TRACE_TO_FILE,1); + Iw2DInit(); + IwGxInit(); + + // initializing camera and taking a screenshot + Camera::Instance()->InitCamera(); + + GetScreen(); + +} + + + + +//======================================================= +// Function name : UIRender +// Description : +// Return type : void +//======================================================= +void UIRender() +{ + IwGxClear(IW_GX_COLOUR_BUFFER_F | IW_GX_DEPTH_BUFFER_F); + + if(g_2dimage) + { + Iw2DDrawImage(g_2dimage,CIwSVec2(0,0),CIwSVec2(s3eSurfaceGetInt(S3E_SURFACE_WIDTH),s3eSurfaceGetInt(S3E_SURFACE_HEIGHT))); + } + + IwGxFlush(); + IwGxSwapBuffers(); + +} diff --git a/source/ExampleMain.h b/source/ExampleMain.h new file mode 100644 index 0000000..5665f3b --- /dev/null +++ b/source/ExampleMain.h @@ -0,0 +1,23 @@ +/*C-ScanMain.h + * + */ + +#include "s3e.h" +#include "IwGx.h" +#include "IwUI.h" + + + +#include +#include + + + +void MainInit(); +bool MainUpdate(); +void MainTerm(); + +void UIRender(); + + + diff --git a/source/camera.cpp b/source/camera.cpp new file mode 100644 index 0000000..643e02e --- /dev/null +++ b/source/camera.cpp @@ -0,0 +1,498 @@ +/*camera.cpp + * + */ + +#include "ExampleMain.h" + + +#include "s3eDebug.h" +#include +#include "math.h" +#include "../opencv2/opencv.hpp" +#include "camera.h" + + +static s3eCameraCaptureResult* g_BufferedCapture = 0;; + char buf[128]; +//Alexsandr Evmenchik function + +//init static of camera class +Camera* Camera::CameraPtr = NULL; + + +CIwTexture* Camera::m_ImageTexture = NULL; +s3eCameraFrameRotation Camera::m_FrameRotation = S3E_CAMERA_FRAME_ROTNORMAL;//S3E_CAMERA_FRAME_ROT90; + + +CameraState Camera::m_Status = kUnSupported; + +//Function defenition of class Camera + + + + + + +//======================================================= +// Function name : Camera::cameraUpdate +// Description : +// Return type : int32 +// Argument : void* systemData +// Argument : void* userData +//======================================================= + +int32 Camera::cameraUpdate(void* systemData, void* userData) +{ + + if (m_Status != kStarted) + return 0; + + s3eCameraFrameData *data = (s3eCameraFrameData*)systemData; + + // If there is no texture, create one. + // This is a slow operation compared to memcpy so we don't want to do it every frame. + if (m_ImageTexture == NULL) + { + m_ImageTexture = new CIwTexture(); + m_ImageTexture->SetMipMapping(false); + m_ImageTexture->SetModifiable(true); + + m_ImageTexture->CopyFromBuffer(data->m_Width, data->m_Height, CIwImage::RGB_565, data->m_Pitch, (uint8*)data->m_Data, NULL); + m_ImageTexture->Upload(); + + + } + + // Copy the camera image data into the texture. Note that it does not get copied to VRAM at this point. + memcpy(m_ImageTexture->GetTexels(), data->m_Data, data->m_Height * data->m_Pitch); + + m_FrameRotation = data->m_Rotation; + + return 0; +} + + + + + + +//======================================================= +// Function name : Camera::InitCamera +// Description : +// Return type : bool +//======================================================= +bool Camera::InitCamera() +{ + if(s3eCameraAvailable()) + { + if (S3E_RESULT_ERROR == s3eCameraRegister(S3E_CAMERA_UPDATE_STREAMING, Camera::cameraUpdate, 0)) + return false; + m_Status = kStopped; + + + } + else + return false; + + return true; +} + + + + + +//======================================================= +// Function name : Camera::CameraStart +// Description : +// Return type : bool +//======================================================= + +bool Camera::CameraStart() +{ + if(m_Status == kStopped) + if (s3eCameraStart(S3E_CAMERA_STREAMING_SIZE_HINT_LARGEST, S3E_CAMERA_PIXEL_TYPE_RGB565_CONVERTED) == S3E_RESULT_SUCCESS) + return true; + return false; +} + + void Camera::StopCamera() +{ + if (m_Status == kStarted) + { + s3eCameraStop(); + s3eCameraUnRegister(S3E_CAMERA_UPDATE_STREAMING, Camera::cameraUpdate); + m_Status = kStopped; + } + +} + + + + + //======================================================= + // Function name : Camera::GetCameraState + // Description : + // Return type : CameraState + //======================================================= + +CameraState Camera::GetCameraState() +{ + return m_Status; +} + + +//======================================================= +// Function name : Camera::GetCamFrame +// Description : +// Return type : bool +// Argument : CIwImage& frame +//======================================================= +bool Camera::GetCamFrame(CIwImage& frame) +{ + if(m_Status == kStarted) + { + frame = this->m_ImageTexture->GetImage(); + return true; + } + + return false; +} + + + + + +//======================================================= +// Function name : Camera::CameraScreenShot +// Description : +// Return type : bool +// Argument : char* buff +// Argument : int buffSize +//======================================================= +bool Camera::CameraScreenShot(char* buff,int buffSize) +{ + bool res = false; + s3eResult result; + + if(m_Status == kStarted) + s3eCameraStop(); + // s3eCameraUnRegister(S3E_CAMERA_UPDATE_STREAMING, Camera::cameraUpdate); + if(s3eCameraCaptureAvailable()) + { + + res = s3eCameraCaptureIsFormatSupported(S3E_CAMERACAPTURE_FORMAT_JPG); + if(res) + { + result = s3eCameraCaptureToFile(buff,buffSize,S3E_CAMERACAPTURE_FORMAT_JPG); + if(result!=S3E_RESULT_SUCCESS) + { + char buf[128]; + sprintf (buf,"capture have error %d ",result); + s3eDebugOutputString(buf); + res = false; + } + + + } + + } + + if(m_Status == kStarted) + s3eCameraStart(S3E_CAMERA_STREAMING_SIZE_HINT_LARGEST, S3E_CAMERA_PIXEL_TYPE_RGB565_CONVERTED); + return res; +} + + + + +//**************************************** +//Border detect functions +//************************************* + + +//======================================================= +// Function name : angle +// Description : +// Return type : double +// Argument : cv::Point pt1 +// Argument : cv::Point pt2 +// Argument : cv::Point pt0 +//======================================================= +double angle( cv::Point pt1, cv::Point pt2, cv::Point pt0 ) +{ + double dx1 = pt1.x - pt0.x; + double dy1 = pt1.y - pt0.y; + double dx2 = pt2.x - pt0.x; + double dy2 = pt2.y - pt0.y; + return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10); +} + + +//======================================================= +// Function name : histogramEqualize +// Description : +// Return type : void +// Argument : const IplImage * const pSource +// Argument : IplImage * pTarget +//======================================================= +void histogramEqualize(const IplImage * const pSource, IplImage * pTarget) +{ + assert(pSource!=NULL); + assert(pTarget!=NULL); + assert(pSource->nChannels ==1); + assert(pTarget->nChannels ==1); + assert(pSource->width == pTarget->width); + assert(pSource->height == pTarget->height); + CvHistogram *hist; + uchar lut[1024]; + double lut1[1024]; + CvMat* lut_mat; + int hist_size = 256; + float range_0[]={0,256}; + float* ranges[] = { range_0 }; + + int high=0; + int low =0; + float hist_value = 0.0; + + hist = cvCreateHist(1, &hist_size, CV_HIST_ARRAY, ranges, 1); + lut_mat = cvCreateMatHeader( 1, 256, CV_8UC1 ); + cvSetData( lut_mat, lut, 0 ); + //cvCalcHist( &pSource, hist, 0, NULL ); + cvCalcHist( const_cast(&pSource), hist, 0, NULL ); + + //CUMULATIVE ARRAY + lut1[0] = 0; + for(int index = 0; index != 256; ++index) + { + // hist_value = cvQueryHistValue_1D(hist,index); + hist_value = ::cvGetReal1D(hist->bins,index); + lut1[index+1]= lut1[index] + hist_value; + } + + + //CALCULATE THE NEW LUT + float scale_factor; + scale_factor = 256.0f / (float) (pSource->width * pSource->height); + for (int index=0; index!=256; ++index) + { + lut[index]= (unsigned char)((float)lut1[index]*scale_factor); + } + //PERFORM IT ON THE CHANNEL + cvLUT( pSource, pTarget, lut_mat ); + cvReleaseMat( &lut_mat); + cvReleaseHist(&hist); +} + + +//======================================================= +// Function name : DrawContour +// Description : +// Return type : bool +// Argument : IplImage* colorRes +// Argument : bool contrast +// Argument : bool smooth +// Argument : bool dilate +// Argument : CIwSVec2* coords +//======================================================= +bool DrawContour(IplImage* colorRes, bool contrast,bool smooth, bool dilate,CIwSVec2* coords) +{ + bool result = true; + double maxSquare = 0.0; + double maxCosine = 0; + double square = 0; + CvSeq* pointsMax = NULL; + IplImage* gray = NULL; + IplImage* colored = NULL; + CvMemStorage* storage2 = NULL; + CvMemStorage* storageHull = NULL; + static char dbuf[256]; + + + gray = cvCreateImage(::cvGetSize(colorRes), IPL_DEPTH_8U, 1); + storage2 = cvCreateMemStorage(0); + storageHull = cvCreateMemStorage(0); + + + /*colored = cvCreateImage( cvSize(colorRes->width,colorRes->height),IPL_DEPTH_8U, 3 ); + IplImage* out = cvCreateImage( cvSize(colored->width/2,colored->height/2), colored->depth, colored->nChannels ); + //if(pyrDown) + { + ::cvPyrDown(colorRes,out); + ::cvPyrUp(out,colored); + } + cvFree_(out);*/ + CIw2DImage* image; + + s3eResult ret = s3eDebugSetInt(S3E_DEBUG_TRACE_TO_FILE,1); + + for( int c = 0; c < colorRes->nChannels; c++ ) + { + int ch[] = {c, 0}; + const CvArr* in[] = {colorRes}; + CvArr* out[] = {gray}; + ::cvMixChannels(in,1,out,1,ch,1); + + if(contrast) + histogramEqualize(gray,gray); + + if(smooth) + cvSmooth( gray, gray, CV_BLUR, 9, 9, 2, 2); + + int thresh = 50, N = 11; + cvCanny(gray, gray, 0, thresh, 3); + if(dilate) + ::cvDilate(gray,gray); + + + int total = 0; + int counter =0; + CvSeq* contour2; + + + + total = cvFindContours(gray, storage2, &contour2, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE); + if(total > 1) + { + while (contour2 != NULL) + { + if (contour2->elem_size > 0) + { + CvSeq* points = cvApproxPoly(contour2,sizeof(CvContour), storage2, CV_POLY_APPROX_DP,cvContourPerimeter(contour2)*0.02, 0); + int point_num = points->total; + int square = cvContourArea(points); + int convexity = cvCheckContourConvexity(points); + + sprintf (dbuf,"points: point_num=%d area=%d convexity=%d ",point_num, square, convexity); + s3eDebugOutputString(dbuf); + if(points->total == 4 && fabs(square)>1000 && convexity > 0) + { + ::CvSeqBlock * seg = points->first; + ::CvPoint poiunt[4]; + + for(int i=0; i<4; i++) + { + poiunt[i] = *((CvPoint*)(points->first->data)+i); + + } + maxCosine = 0; + for( int j = 2; j < 5; j++ ) + { + // find the maximum cosine of the angle between joint edges + double cosine = fabs(angle(poiunt[j%4], poiunt[j-2],poiunt[j-1])); + maxCosine = MAX(maxCosine, cosine); + sprintf (dbuf,"cosine=%f max=%f ",cosine, maxCosine); + s3eDebugOutputString(dbuf); + } + s3eDebugOutputString("here"); + s3eDebugTracePrintf ("max is=%f ",maxCosine); + + if( maxCosine < 0.7 ) + { + sprintf (dbuf,"max found=%f ",maxCosine); + s3eDebugOutputString(dbuf); + if (maxSquare < square) + { + maxSquare = square; + pointsMax = points; + + sprintf (dbuf,"maxS=%f pointsMax=%p",maxSquare,pointsMax ); + s3eDebugOutputString(dbuf); + } + + } + } + + } + + contour2 = contour2->h_next; + }//end while + ::cvFree_(contour2); + }//end if (total > 1) + + }//end for + //cvCvtColor(colorRes, gray, CV_RGB2GRAY); + if(pointsMax) + { + ::CvPoint poiunt[4]; + for(int i=0; i<4; i++){ + poiunt[i] = *((CvPoint*)(pointsMax->first->data)+i); + (coords+i)->x = poiunt[i].x; + (coords+i)->y = poiunt[i].y; + } + + } + else + result = false; + + cvFree_(storage2); + cvFree_(storageHull); + cvFree_(gray); + //cvFree_(colored); + + //cvFree_(pointsMax); + sprintf (dbuf,"result=%d pointsMax=%p maxSquare=%f (%d,%d) (%d,%d) (%d,%d) (%d,%d)",result, pointsMax, maxSquare, (coords)->x, (coords)->y, + (coords+1)->x, (coords+1)->y, (coords+2)->x, (coords+2)->y, (coords+3)->x, (coords+3)->y ); + s3eDebugOutputString(dbuf); + s3eDebugTraceFlush (); + return result; +} + + + + + + + +//======================================================= +// Function name : FindContour +// Description : +// Return type : bool +// Argument : CIwImage* m_ImageRes +// Argument : CIwSVec2* coords +//======================================================= + +bool FindContour(CIwImage* m_ImageRes,CIwSVec2* coords) +{ + + + IplImage* colorRes = NULL; + + + // CIwImage tempBGR; + // CIwImage oldFormat(*m_ImageRes); + // tempBGR.SetFormat(CIwImage::BGR_888); + // oldFormat.ConvertToImage(&tempBGR); + + + //IplImage* colorRes = cvCreateImage( cvSize(tempBGR.GetWidth(),tempBGR.GetHeight()),IPL_DEPTH_8U, 3 ); + CIwImage::Format format = m_ImageRes->GetFormat(); + + + colorRes = cvCreateImage( cvSize(m_ImageRes->GetWidth(),m_ImageRes->GetHeight()),IPL_DEPTH_8U, m_ImageRes->GetPitch()/m_ImageRes->GetWidth() ); + + + if(colorRes==NULL||coords==NULL) + { + s3eDebugOutputString("Bad, no coords or color Res"); + return false; + } + //colorRes = cvCreateImage( cvSize(m_ImageRes->GetWidth(),m_ImageRes->GetHeight()),IPL_DEPTH_8U, 3 ); + //memcpy(colorRes->imageData,tempBGR.GetTexels(),colorRes->imageSize); + memcpy(colorRes->imageData,m_ImageRes->GetTexels(),colorRes->imageSize); + + bool res = false; + // DrawContour(IplImage* colorRes, bool contrast,bool smooth, bool dilate,CIwSVec2* coords) + res = DrawContour(colorRes,false,true,true,coords); + + if(!res) + res = DrawContour(colorRes,true,false,false,coords); + if(!res) + res = DrawContour(colorRes,true,true,false,coords); + if(!res) + res = DrawContour(colorRes,true,true,true,coords); + + ::cvFree_(colorRes); + + return res; +} \ No newline at end of file diff --git a/source/camera.h b/source/camera.h new file mode 100644 index 0000000..c8efddc --- /dev/null +++ b/source/camera.h @@ -0,0 +1,63 @@ + + +#ifndef _AE_CAMERA_H +#define _AE_CAMERA_H + +#include "s3eCamera.h" +#include "s3eCameraCapture.h" + +//Declaration Camera class + + +typedef enum +{ + kStarted, + kStopped, + kUnSupported +} CameraState; +class Camera +{ +//variable + static Camera* CameraPtr; + + static CameraState m_Status; + + static CIwTexture* m_ImageTexture; + static s3eCameraFrameRotation m_FrameRotation; + + +//functions + Camera() + {}; + +public: + + //functions + static Camera* Instance() + { + if(!CameraPtr) + CameraPtr = new Camera; + + return CameraPtr; + } + bool InitCamera(); + static void StopCamera(); + static bool CameraStart(); + CIwTexture* GetFrameTexture() + { + if (m_ImageTexture != NULL) + { + m_ImageTexture->ChangeTexels(m_ImageTexture->GetTexels(), CIwImage::RGB_565); + } + return m_ImageTexture; + } + static int32 cameraUpdate(void* systemData, void* userData); + bool GetCamFrame(CIwImage& frame); + + bool CameraScreenShot(char* buff,int buffSize); + static CameraState GetCameraState(); + +}; + + +#endif \ No newline at end of file diff --git a/source/main.cpp b/source/main.cpp new file mode 100644 index 0000000..33adb84 --- /dev/null +++ b/source/main.cpp @@ -0,0 +1,32 @@ +/* + * This file is part of the Marmalade SDK Code Samples. + * + * Copyright (C) 2001-2011 Ideaworks3D Ltd. + * All Rights Reserved. + * + * This source code is intended only as a supplement to Ideaworks Labs + * Development Tools and/or on-line documentation. + * + * THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY + * KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A + * PARTICULAR PURPOSE. + */ +#include "ExampleMain.h" + +//----------------------------------------------------------------------------- +// Main global function +//----------------------------------------------------------------------------- +int main() +{ + MainInit(); + // main loop + while (1) + { + if (!MainUpdate()) + break; + UIRender(); + } + MainTerm(); + return 0; +}