Initial commit.

Final release of the project Anonymizer (2015).
Project settings for the Qt Creator (ver. 3.6).
This commit is contained in:
2016-01-25 18:17:34 +01:00
commit 22dbc25cce
479 changed files with 141991 additions and 0 deletions

View File

@ -0,0 +1,88 @@
/** @file
*
* Template matching observation model for particle filter
* CvParticleState s must have s.x, s.y, s.width, s.height, s.angle
*/
/* The MIT License
*
* Copyright (c) 2008, Naotoshi Seo <sonots(at)sonots.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef CV_PARTICLE_OBSERVE_TEMPLATE_H
#define CV_PARTICLE_OBSERVE_TEMPLATE_H
#include "<opencv\cvparticle.h>"
#include "<opencv\cvrect32f.h>"
#include "<opencv\cvcropimageroi.h>"
using namespace std;
/********************* Globals **********************************/
int num_observes = 1;
CvSize feature_size = cvSize(24, 24);
/******************** Function Prototypes **********************/
#ifndef NO_DOXYGEN
void cvParticleObserveMeasure( CvParticle* p, IplImage* cur_frame, IplImage *pre_frame );
#endif
/**
* Measure and weight particles.
*
* The proposal function q is set p(xt|xt-1) in SIR/Condensation, and it results
* that "weights" are set to be proportional to the likelihood probability
* (Normalize later).
* Rewrite here if you want to use a different proposal function q.
*
* CvParticleState s must have s.x, s.y, s.width, s.height, s.angle
*
* @param particle
* @param frame
* @param reference
*/
void cvParticleObserveMeasure( CvParticle* p, IplImage* frame, IplImage *reference )
{
int i;
double likeli;
IplImage *patch;
IplImage *resize;
resize = cvCreateImage( feature_size, frame->depth, frame->nChannels );
for( i = 0; i < p->num_particles; i++ )
{
CvParticleState s = cvParticleStateGet( p, i );
CvBox32f box32f = cvBox32f( s.x, s.y, s.width, s.height, s.angle );
CvRect32f rect32f = cvRect32fFromBox32f( box32f );
CvRect rect = cvRectFromRect32f( rect32f );
patch = cvCreateImage( cvSize(rect.width,rect.height), frame->depth, frame->nChannels );
cvCropImageROI( frame, patch, rect32f );
cvResize( patch, resize );
// log likeli. kinds of Gaussian model
// exp( -d^2 / sigma^2 )
// sigma can be omitted because common param does not affect ML estimate
likeli = -cvNorm( resize, reference, CV_L2 );
cvmSet( p->weights, 0, i, likeli );
cvReleaseImage( &patch );
}
cvReleaseImage( &resize );
}
#endif

View File

@ -0,0 +1,185 @@
/** @file
*
* Moghaddam's PCA DIFS + DFFS (distance-in-feature-space + distance-from-feature-space)
* observation model for particle filter
* CvParticleState must have x, y, width, height, and angle
*/
/* The MIT License
*
* Copyright (c) 2008, Naotoshi Seo <sonots(at)sonots.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef CV_PARTICLE_OBSERVE_PCADIFFS_H
#define CV_PARTICLE_OBSERVE_PCADIFFS_H
#include "cvparticle.h"
#include "cvrect32f.h"
#include "cvcropimageroi.h"
#include "cvpcadiffs.h"
#include "cvgaussnorm.h"
#include <iostream>
using namespace std;
/********************************* Globals ******************************************/
int num_observes = 1;
CvSize feature_size = cvSize(24, 24);
string data_dir = "";
string data_pcaval = "pcaval.xml";
string data_pcavec = "pcavec.xml";
string data_pcaavg = "pcaavg.xml";
/******************************* Globals in this file ******************************/
CvMat *eigenvalues;
CvMat *eigenvectors;
CvMat *eigenavg;
/****************************** Function Prototypes ********************************/
#ifndef NO_DOXYGEN
void cvParticleObserveInitialize();
void cvParticleObserveFinalize();
void icvPreprocess( const IplImage* patch, CvMat *mat );
void icvGetFeatures( const CvParticle* p, const IplImage* frame, CvMat* features );
void cvParticleObserveMeasure( CvParticle* p, IplImage* cur_frame, IplImage *pre_frame );
#endif
/****************************** Functions ******************************************/
/**
* Initialization
*/
void cvParticleObserveInitialize()
{
string filename;
filename = data_dir + data_pcaval;
if( (eigenvalues = (CvMat*)cvLoad( filename.c_str() )) == NULL ) {
cerr << filename << " is not loadable." << endl << flush;
exit( 1 );
}
filename = data_dir + data_pcavec;
if( (eigenvectors = (CvMat*)cvLoad( filename.c_str() )) == NULL ) {
cerr << filename << " is not loadable." << endl << flush;
exit( 1 );
}
filename = data_dir + data_pcaavg;
if( (eigenavg = (CvMat*)cvLoad( filename.c_str() )) == NULL ) {
cerr << filename << " is not loadable." << endl << flush;
exit( 1 );
}
}
/**
* Finalization
*/
void cvParticleObserveFinalize()
{
cvReleaseMat( &eigenvalues );
cvReleaseMat( &eigenvectors );
cvReleaseMat( &eigenavg );
}
/**
* Preprocess as did in training PCA subspace
*/
void icvPreprocess( const IplImage* patch, CvMat *mat )
{
IplImage *gry;
if( patch->nChannels != 1 ) {
gry = cvCreateImage( cvGetSize(patch), patch->depth, 1 );
cvCvtColor( patch, gry, CV_BGR2GRAY );
} else {
gry = (IplImage*)patch;
}
IplImage *resize = cvCreateImage( cvSize(mat->rows, mat->cols), patch->depth, 1 );
cvResize( gry, resize );
cvConvert( resize, mat );
cvImgGaussNorm( mat, mat );
cvReleaseImage( &resize );
if( gry != patch )
cvReleaseImage( &gry );
}
/**
* Get observation features
*
* CvParticleState must have x, y, width, height, angle
*/
void icvGetFeatures( const CvParticle* p, const IplImage* frame, CvMat* features )
{
int feature_height = feature_size.height;
int feature_width = feature_size.width;
//cvNamedWindow( "patch" );
CvMat* normed = cvCreateMat( feature_height, feature_width, CV_64FC1 );
CvMat* normedT = cvCreateMat( feature_width, feature_height, CV_64FC1 );
CvMat* feature, featurehdr;
IplImage *patch;
for( int n = 0; n < p->num_particles; n++ ) {
CvParticleState s = cvParticleStateGet( p, n );
CvBox32f box32f = cvBox32f( s.x, s.y, s.width, s.height, s.angle );
CvRect32f rect32f = cvRect32fFromBox32f( box32f );
// get image patch and preprocess
patch = cvCreateImage( cvSize( cvRound( s.width ), cvRound( s.height ) ),
frame->depth, frame->nChannels );
cvCropImageROI( (IplImage*)frame, patch, rect32f );
//cvShowImage( "patch", patch );
//cvWaitKey( 10 );
icvPreprocess( patch, normed );
cvReleaseImage( &patch );
// vectorize
cvT( normed, normedT ); // transpose to make the same with matlab's reshape
feature = cvReshape( normedT, &featurehdr, 1, feature_height * feature_width );
cvSetCol( feature, features, n );
}
cvReleaseMat( &normedT );
cvReleaseMat( &normed );
}
/**
* Measure and weight particles.
*
* The proposal function q is set p(xt|xt-1) in SIR/Condensation, and it results
* that "weights" are set to be proportional to the likelihood probability
* (Normalize later).
* Rewrite here if you want to use a different proposal function q.
*
* CvParticleState s must have s.x, s.y, s.width, s.height, s.angle
*
* @param particle
* @param frame
* @param reference
*/
void cvParticleObserveMeasure( CvParticle* p, IplImage* frame )
{
int feature_height = feature_size.height;
int feature_width = feature_size.width;
// extract features from particle states
CvMat* features = cvCreateMat( feature_height*feature_width, p->num_particles, CV_64FC1 );
icvGetFeatures( p, frame, features );
// Likelihood measurments
cvMatPcaDiffs( features, eigenavg, eigenvalues, eigenvectors, p->weights, 0, TRUE);
}
#endif

View File

@ -0,0 +1,443 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file observetemplate.h
@brief Header file
@details Particle evaluation functions
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#ifndef CV_PARTICLE_OBSERVE_TEMPLATE_H
#define CV_PARTICLE_OBSERVE_TEMPLATE_H
#include "opencvx/cvparticle.h"
#include "opencvx/cvrect32f.h"
#include "opencvx/cvcropimageroi.h"
#include "state.h"
#define HIST_SIZE 128
#define DIVIDER 4
using namespace std;
/******************** Function Prototypes **********************/
#ifndef NO_DOXYGEN
/*!
** Initialization function for the particle evaluation by color histogram.
** Histograms of the R, G and B color channels of the reference image are created
** (the reference image is not being modified) and stored at the given addresses in memory.
**
** @param reference Reference image.
** @param histReferenceRed Reference histogram of the red channel.
** @param histReferenceGreen Reference histogram of the green channel.
** @param histReferenceBlue Reference histogram of the blue channel.
**/
void initializeRGBHist(IplImage *reference, CvHistogram** histReferenceRed,
CvHistogram** histReferenceGreen, CvHistogram** histReferenceBlue);
/*!
** Initialization function for the particle evaluation by gray scale histogram.
** Histograms of intensities of the reference image is created
** (the reference image is not being modified) and stored at the given address in memory.
**
** @param reference Reference image.
** @param histReferenceGray Reference histogram of the grayscale image.
**/
void initializeGrayHist(IplImage *reference, CvHistogram** histReferenceGray);
/*!
** Initialization function for the particle evaluation by a hybrid way.
** The reference image is divided to nx*ny parts. Each of the parts is evaluated by a color
** histogram implemented by an array of matrices. Each part of the reference image is related
** to one matrix, which consists of 3 rows for each of the R, G and B color channels and
** as many columns as there are parts ("bins") of the image.
**
** @param reference Reference image.
** @param matRef Array of matrices with histograms.
** @param nx Count of the horizontal parts.
** @param ny Count of the vertical parts.
**/
void initializeHyb(IplImage *reference, CvMat **matRef, int nx, int ny);
/*!
** Particle evaluation function using color histogram. Each particle contains histograms of the
** individual color channels. These histograms are compared to the reference values.
**
** @param p Pointer to the particles.
** @param frame Current image in the video stream.
** @param histReferenceRed Referencne histogram of the red channel.
** @param histReferenceGreen Referencne histogram of the green channel.
** @param histReferenceBlue Referencne histogram of the blue channel.
** @param featSize Size of each particle (particles are resized to this size).
** @param numParticlesDyn Current count of the particels.
**/
void particleEvalRGBHist( CvParticle* p, IplImage* frame, CvHistogram* histReferenceRed, CvHistogram* histReferenceGreen,
CvHistogram* histReferenceBlue, CvSize featSize, int numParticlesDyn);
/*!
** Particle evaluation function using grayscale histogram. Each particle contains histograms of the
** intensities. These histograms are compared to the reference values.
**
** @param p Pointer to the particles.
** @param frame Current image of the video.
** @param histReferenceGray Referencne grayscales histogram.
** @param featSize Size of each particle (particles are resized to this size).
** @param numParticlesDyn Current count of the particels.
**/
void particleEvalGrayHist( CvParticle* p, IplImage* frame, CvHistogram* histReferenceGray, CvSize featSize, int numParticlesDyn);
/*!
** Particle evaluation function using elementary method "pixel-by-pixel". Each particle
** is compared to the reference image using function cvNorm(...).
**
** @param p Pointer to the particles.
** @param frame Current image of the video.
** @param reference Referencne image.
** @param featSize Size of each particle (particles are resized to this size).
** @param numParticlesDyn Current count of the particels.
**/
void particleEvalDefault( CvParticle* p, IplImage* frame, IplImage *reference,CvSize featSize, int numParticlesDyn );
/*!
** Particle evaluation function using a hybrid method. For each particle an array of histograms (matrices, see function @ref initializeHyb)
** is created and these are then compared to the reference histograms.
**
** @param p Pointer to the particles.
** @param frame Current image of the video.
** @param reference Referencne image.
** @param matRef Array of matrices with histograms.
** @param nx Count of the horizontal parts.
** @param ny Count of the vertical parts.
** @param featSize Size of each particle (particles are resized to this size).
** @param numParticlesDyn Current count of the particels.
**/
void particleEvalHybrid( CvParticle* p, IplImage* frame, IplImage *reference, CvMat **matRef, int nx, int ny,CvSize featSize, int numParticlesDyn );
#endif
/***************************************************************/
void initializeRGBHist(IplImage *reference, CvHistogram** histReferenceRed,
CvHistogram** histReferenceGreen, CvHistogram** histReferenceBlue)
{
int hist_size = HIST_SIZE;
IplImage* referenceRed = cvCreateImage(cvSize(reference->width,reference->height), IPL_DEPTH_8U, 1);
IplImage* referenceGreen = cvCreateImage(cvSize(reference->width,reference->height), IPL_DEPTH_8U, 1);
IplImage* referenceBlue = cvCreateImage(cvSize(reference->width,reference->height), IPL_DEPTH_8U, 1);
cvSplit(reference, referenceRed, referenceGreen, referenceBlue, NULL);
*histReferenceRed = cvCreateHist(1, &hist_size, CV_HIST_ARRAY);
*histReferenceGreen = cvCreateHist(1, &hist_size, CV_HIST_ARRAY);
*histReferenceBlue = cvCreateHist(1, &hist_size, CV_HIST_ARRAY);
cvCalcHist( &referenceRed, *histReferenceRed, 0, NULL );
cvCalcHist( &referenceGreen, *histReferenceGreen, 0, NULL );
cvCalcHist( &referenceBlue, *histReferenceBlue, 0, NULL );
}
void initializeGrayHist(IplImage *reference, CvHistogram** histReferenceGray)
{
int hist_size = HIST_SIZE;
IplImage* referenceGray = cvCreateImage(cvSize(reference->width,reference->height), IPL_DEPTH_8U, 1);
cvCvtColor(reference, referenceGray, CV_BGR2GRAY);
*histReferenceGray = cvCreateHist(1, &hist_size, CV_HIST_ARRAY);
cvCalcHist(&referenceGray, *histReferenceGray, 0, NULL);
}
void initializeHyb(IplImage *reference, CvMat **matRef, int nx, int ny)
{
int a,b,x,y;
int i = 0;
int stepX = reference->width/nx;
int stepY = reference->height/ny;
uchar *ptrRef = NULL;
for(a = 0; a < ny; a++)
{
for(b = 0; b < nx; b++)
{
for(y = a * stepY; y < ((a + 1) * stepY); y++)
{
if(y < reference->height)
{
ptrRef = (uchar*) (reference->imageData + y * reference->widthStep);
for(x = b * stepX; x < ((b + 1) * stepX); x++)
{
if(x < reference->width)
{
(*(matRef[i]->data.ptr + (*(ptrRef+3*x))/DIVIDER))++;
(*(matRef[i]->data.ptr + matRef[i]->cols + (*(ptrRef + 3*x+1))/DIVIDER))++;
(*(matRef[i]->data.ptr + matRef[i]->cols * 2 + (*(ptrRef + 3*x+2))/DIVIDER))++;
}
}
}
}
i++;
}
}
}
void initializeRGB2(IplImage *reference, CvMat *matRef, int nx, int ny)
{
int y,x;
int i = 0;
uchar *ptrRef = NULL;
for(y = 0; y < 24; y++)
{
ptrRef = (uchar*) (reference->imageData + y * reference->widthStep);
for(x = 0; x < 24; x++)
{
(*(matRef->data.ptr + (*(ptrRef+3*x))/DIVIDER))++;
(*(matRef->data.ptr + matRef->cols + (*(ptrRef + 3*x+1))/DIVIDER))++;
(*(matRef->data.ptr + matRef->cols * 2 + (*(ptrRef + 3*x+2))/DIVIDER))++;
}
}
}
void particleEvalRGBHist( CvParticle* p, IplImage* frame, CvHistogram* histReferenceRed,CvHistogram* histReferenceGreen,
CvHistogram* histReferenceBlue, CvSize featSize, int numParticlesDyn )
{
int i;
double likeli;
double likeliRed;
double likeliGreen;
double likeliBlue;
IplImage *patch;
IplImage *resize;
resize = cvCreateImage( featSize, frame->depth, frame->nChannels );
int hist_size = HIST_SIZE;
IplImage* frameRed;
IplImage* frameGreen;
IplImage* frameBlue;
CvHistogram* histFrameRed;
CvHistogram* histFrameGreen;
CvHistogram* histFrameBlue;
for( i = 0; i < numParticlesDyn; i++ )
{
CvParticleState s = cvParticleStateGet( p, i );
CvBox32f box32f = cvBox32f( s.x, s.y, s.width, s.height, s.angle );
CvRect32f rect32f = cvRect32fFromBox32f( box32f );
CvRect rect = cvRectFromRect32f( rect32f );
patch = cvCreateImage( cvSize(rect.width,rect.height), frame->depth, frame->nChannels );
cvCropImageROI( frame, patch, rect32f );
cvResize( patch, resize );
frameRed = cvCreateImage(cvSize(resize->width,resize->height), IPL_DEPTH_8U, 1);
frameGreen = cvCreateImage(cvSize(resize->width,resize->height), IPL_DEPTH_8U, 1);
frameBlue = cvCreateImage(cvSize(resize->width,resize->height), IPL_DEPTH_8U, 1);
cvSplit(resize, frameRed, frameGreen,frameBlue, NULL);
histFrameRed = cvCreateHist(1, &hist_size, CV_HIST_ARRAY);
histFrameGreen = cvCreateHist(1, &hist_size, CV_HIST_ARRAY);
histFrameBlue = cvCreateHist(1, &hist_size, CV_HIST_ARRAY);
cvCalcHist( &frameRed, histFrameRed, 0, NULL );
cvCalcHist( &frameGreen, histFrameGreen, 0, NULL );
cvCalcHist( &frameBlue, histFrameBlue, 0, NULL );
likeliRed = cvCompareHist(histFrameRed, histReferenceRed, CV_COMP_INTERSECT);
likeliGreen = cvCompareHist(histFrameGreen, histReferenceGreen, CV_COMP_INTERSECT);
likeliBlue = cvCompareHist(histFrameBlue, histReferenceBlue, CV_COMP_INTERSECT);
likeli = likeliRed + likeliBlue + likeliGreen;
cvmSet( p->weights, 0, i, likeli );
cvReleaseImage( &patch );
cvReleaseImage( &frameRed );
cvReleaseImage( &frameGreen );
cvReleaseImage( &frameBlue );
cvReleaseHist(&histFrameRed);
cvReleaseHist(&histFrameGreen);
cvReleaseHist(&histFrameBlue);
}
cvReleaseImage( &resize );
for(i = numParticlesDyn; i < p->num_particles; i++)
cvmSet( p->weights, 0, i, -99999.0 );
}
void particleEvalGrayHist( CvParticle* p, IplImage* frame, CvHistogram* histReferenceGray, CvSize featSize, int numParticlesDyn )
{
int i;
double likeli;
IplImage *patch;
IplImage *resize;
int hist_size = HIST_SIZE;
resize = cvCreateImage( featSize, frame->depth, frame->nChannels );
for( i = 0; i < numParticlesDyn; i++ )
{
CvParticleState s = cvParticleStateGet( p, i );
CvBox32f box32f = cvBox32f( s.x, s.y, s.width, s.height, s.angle );
CvRect32f rect32f = cvRect32fFromBox32f( box32f );
CvRect rect = cvRectFromRect32f( rect32f );
patch = cvCreateImage( cvSize(rect.width,rect.height), frame->depth, frame->nChannels );
cvCropImageROI( frame, patch, rect32f );
cvResize( patch, resize );
IplImage* grayResize = cvCreateImage(cvSize(resize->width,resize->height), IPL_DEPTH_8U, 1);
cvCvtColor(resize, grayResize, CV_BGR2GRAY);
CvHistogram* histResize = cvCreateHist(1, &hist_size, CV_HIST_ARRAY);
cvCalcHist( &grayResize, histResize, 0, NULL );
likeli = cvCompareHist(histResize, histReferenceGray, CV_COMP_INTERSECT);
cvmSet( p->weights, 0, i, likeli );
cvReleaseImage( &patch );
}
cvReleaseImage( &resize );
for(i = numParticlesDyn; i < p->num_particles; i++)
cvmSet( p->weights, 0, i, -99999.0 );
}
void particleEvalDefault( CvParticle* p, IplImage* frame, IplImage *reference, CvSize featSize, int numParticlesDyn )
{
int i;
double likeli;
IplImage *patch;
IplImage *resize;
resize = cvCreateImage( featSize, frame->depth, frame->nChannels );
for( i = 0; i < numParticlesDyn; i++ )
{
CvParticleState s = cvParticleStateGet( p, i );
CvBox32f box32f = cvBox32f( s.x, s.y, s.width, s.height, s.angle );
CvRect32f rect32f = cvRect32fFromBox32f( box32f );
CvRect rect = cvRectFromRect32f( rect32f );
patch = cvCreateImage( cvSize(rect.width,rect.height), frame->depth, frame->nChannels );
cvCropImageROI( frame, patch, rect32f );
cvResize( patch, resize );
likeli = -cvNorm( resize, reference, CV_L2 );
cvmSet( p->weights, 0, i, likeli );
cvReleaseImage( &patch );
}
cvReleaseImage( &resize );
for(i = numParticlesDyn; i < p->num_particles; i++)
cvmSet( p->weights, 0, i, -99999.0 );
}
void particleEvalHybrid( CvParticle* p, IplImage* frame, IplImage *reference, CvMat **matRef, int nx, int ny, CvSize featSize, int numParticlesDyn )
{
int i,a,b,x,y;
int j = 0;
double likeli;
IplImage *patch;
IplImage *resize;
resize = cvCreateImage( featSize, frame->depth, frame->nChannels );
int stepX = featSize.width/nx;
int stepY = featSize.height/ny;
CvMat *matRes = cvCreateMat(3, (256/DIVIDER) + 1, CV_8UC1);
cvZero(matRes);
uchar *ptrRes = NULL;
for( i = 0; i < numParticlesDyn; i++ )
{
CvParticleState s = cvParticleStateGet( p, i );
//CvBox32f = The Constructor of Center Coordinate Floating Rectangle Structure.
CvBox32f box32f = cvBox32f( s.x, s.y, s.width, s.height, s.angle );
CvRect32f rect32f = cvRect32fFromBox32f( box32f );
CvRect rect = cvRectFromRect32f( rect32f );
patch = cvCreateImage( cvSize(rect.width,rect.height), frame->depth, frame->nChannels );
cvCropImageROI( frame, patch, rect32f );
cvResize( patch, resize );
likeli = 0;
j = 0;
for(a = 0; a < ny; a++)
{
for(b = 0; b < nx; b++)
{
for(y = a * stepY; y < ((a + 1) * stepY); y++)
{
if(y < featSize.height)
{
ptrRes = (uchar*) (resize->imageData + y * resize->widthStep);
for(x = b * stepX; x < ((b + 1) * stepX); x++)
{
if(x < featSize.width)
{
(*(matRes->data.ptr + (*(ptrRes+3*x))/DIVIDER))++;
(*(matRes->data.ptr + matRes->cols + (*(ptrRes + 3*x+1))/DIVIDER))++;
(*(matRes->data.ptr + matRes->cols * 2 + (*(ptrRes + 3*x+2))/DIVIDER))++;
}
}
}
}
likeli += cvNorm( matRef[j], matRes, CV_L2 );
j++;
cvZero(matRes);
}
}
likeli *= -1;
cvmSet( p->weights, 0, i, likeli );
cvZero(matRes);
cvReleaseImage( &patch );
}
cvReleaseImage( &resize );
for(i = numParticlesDyn; i < p->num_particles; i++)
cvmSet( p->weights, 0, i, -99999.0 );
}
#endif

View File

@ -0,0 +1,255 @@
/*
*
* The MIT License
*
* Copyright (c) 2008, Naotoshi Seo <sonots(at)sonots.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*/
#ifndef CV_PARTICLE_ROTRECT_H
#define CV_PARTICLE_ROTRECT_H
#include "opencvx/cvparticle.h"
#include "opencvx/cvdrawrectangle.h"
#include "opencvx/cvcropimageroi.h"
#include "opencvx/cvrect32f.h"
#include <float.h>
//using namespace std;
/********************** Definition of a particle *****************************/
int num_states = 5;
/*! Definition of meanings of 5 states.
This kinds of structures is not necessary to be defined,
but I recommend to define them because it makes clear meanings of states
*/
typedef struct CvParticleState {
double x; //!< Center coord of a rectangle
double y; //!< Center coord of a rectangle
double width; //!< Width of a rectangle
double height; //!< Height of a rectangle
double angle; //!< Rotation around center. degree
} CvParticleState;
/*! Definition of dynamics model:
@code
new_particle = cvMatMul( dynamics, particle ) + noise
@endcode
@code
curr_x =: curr_x + noise
@endcode
*/
double dynamics[] = {
1, 0, 0, 0, 0,
0, 1, 0, 0, 0,
0, 0, 1, 0, 0,
0, 0, 0, 1, 0,
0, 0, 0, 0, 1,
};
/********************** Function Prototypes *********************************/
// Functions for CvParticleState structure ( constructor, getter, setter )
inline CvParticleState cvParticleState( double x,
double y,
double width,
double height,
double angle = 0 );
CvParticleState cvParticleStateFromMat( const CvMat* state );
void cvParticleStateToMat( const CvParticleState &state, CvMat* state_mat );
CvParticleState cvParticleStateGet( const CvParticle* p, int p_id );
void cvParticleStateSet( CvParticle* p, int p_id, const CvParticleState &state );
// Particle Filter configuration
void cvParticleStateConfig( CvParticle* p, CvSize imsize, CvParticleState& std );
void cvParticleStateAdditionalBound( CvParticle* p, CvSize imsize );
// Utility Functions
void cvParticleStateDisplay( const CvParticleState& state, IplImage* frame, CvScalar color );
void cvParticleStatePrint( const CvParticleState& state );
/****************** Functions for CvParticleState structure ******************/
// This kinds of state definitions are not necessary,
// but helps readability of codes for sure.
/*!
* Constructor
*/
inline CvParticleState cvParticleState( double x,
double y,
double width,
double height,
double angle )
{
CvParticleState state = { x, y, width, height, angle };
return state;
}
/*!
* Convert a matrix state representation to a state structure
*
* @param state num_states x 1 matrix
*/
CvParticleState cvParticleStateFromMat( const CvMat* state )
{
CvParticleState s;
s.x = cvmGet( state, 0, 0 );
s.y = cvmGet( state, 1, 0 );
s.width = cvmGet( state, 2, 0 );
s.height = cvmGet( state, 3, 0 );
s.angle = cvmGet( state, 4, 0 );
return s;
}
/*!
* Convert a state structure to CvMat
*
* @param state A CvParticleState structure
* @param state_mat num_states x 1 matrix
* @return void
*/
void cvParticleStateToMat( const CvParticleState& state, CvMat* state_mat )
{
cvmSet( state_mat, 0, 0, state.x );
cvmSet( state_mat, 1, 0, state.y );
cvmSet( state_mat, 2, 0, state.width );
cvmSet( state_mat, 3, 0, state.height );
cvmSet( state_mat, 4, 0, state.angle );
}
/*!
* Get a state from a particle filter structure
*
* @param p particle filter struct
* @param p_id particle id
*/
CvParticleState cvParticleStateGet( const CvParticle* p, int p_id )
{
CvMat* state, hdr;
state = cvGetCol( p->particles, &hdr, p_id );
return cvParticleStateFromMat( state );
}
/*!
* Set a state to a particle filter structure
*
* @param state A CvParticleState structure
* @param p particle filter struct
* @param p_id particle id
* @return void
*/
void cvParticleStateSet( CvParticle* p, int p_id, const CvParticleState& state )
{
CvMat* state_mat, hdr;
state_mat = cvGetCol( p->particles, &hdr, p_id );
cvParticleStateToMat( state, state_mat );
}
/*************************** Particle Filter Configuration *********************************/
/*!
* Configuration of Particle filter
*/
void cvParticleStateConfig( CvParticle* p, CvSize imsize, CvParticleState& std )
{
// config dynamics model
CvMat dynamicsmat = cvMat( p->num_states, p->num_states, CV_64FC1, dynamics );
// config random noise standard deviation
CvRNG rng = cvRNG( time( NULL ) );
double stdarr[] = {
std.x,
std.y,
std.width,
std.height,
std.angle
};
CvMat stdmat = cvMat( p->num_states, 1, CV_64FC1, stdarr );
// config minimum and maximum values of states
// lowerbound, upperbound, circular flag (useful for degree)
// lowerbound == upperbound to express no bounding
double boundarr[] = {
0, imsize.width - 1, false,
0, imsize.height - 1, false,
1, imsize.width, false,
1, imsize.height, false,
0, 360, true
};
CvMat boundmat = cvMat( p->num_states, 3, CV_64FC1, boundarr );
cvParticleSetDynamics( p, &dynamicsmat );
cvParticleSetNoise( p, rng, &stdmat );
cvParticleSetBound( p, &boundmat );
}
/*!
* @todo
* CvParticle does not support this type of bounding currently
* Call after transition
*/
void cvParticleStateAdditionalBound( CvParticle* p, CvSize imsize )
{
for( int np = 0; np < p->num_particles; np++ )
{
double x = cvmGet( p->particles, 0, np );
double y = cvmGet( p->particles, 1, np );
double width = cvmGet( p->particles, 2, np );
double height = cvmGet( p->particles, 3, np );
width = MIN( width, imsize.width - (x) ); // another state x is used
height = MIN( height, imsize.height - (y) ); // another state y is used
cvmSet( p->particles, 2, np, width );
cvmSet( p->particles, 3, np, height );
}
}
void cvParticleRestoreSize(CvParticle *p, CvSize size)
{
int i = 0;
if((cvmGet(p->std, 0, 2) == 0.0) && (cvmGet(p->std, 0, 3) == 0.0))
{
printf("aaa");
for(i = 0; i < p->num_particles; i++)
{
cvmSet(p->particles, 2, i, size.width);
cvmSet(p->particles, 3, i, size.height);
}
}
}
/***************************** Utility Functions ****************************************/
void cvParticleStateDisplay( const CvParticleState& state, IplImage* img, CvScalar color, int thickness )
{
CvBox32f box32f = cvBox32f( state.x, state.y, state.width, state.height, state.angle );
CvRect32f rect32f = cvRect32fFromBox32f( box32f );
cvDrawRectangle( img, rect32f, cvPoint2D32f(0,0), color, thickness,8,0);
//cvDrawRectangle( img, rect32f, cvPoint2D32f(0,0), color);
}
void cvParticleStatePrint( const CvParticleState& state )
{
printf( "x :%.2f ", state.x );
printf( "y :%.2f ", state.y );
printf( "width :%.2f ", state.width );
printf( "height :%.2f ", state.height );
printf( "angle :%.2f\n", state.angle );
fflush( stdout );
}
#endif

View File

@ -0,0 +1,248 @@
/** @file
* Rotated rectangle state particle filter +
* 1nd order AR dynamics model ( in fact, next = current + noise )
*
* Use this file as a template of definitions of states and
* state transition model for particle filter
*
* Currently cvparticle.h supports only linear combination of states transition
* model only. you may create another cvParticleTransition to support more complex
* non-linear state transition model. Most of other functions still should be
* available modifications
*/
/* The MIT License
*
* Copyright (c) 2008, Naotoshi Seo <sonots(at)sonots.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef CV_PARTICLE_ROTRECT_H
#define CV_PARTICLE_ROTRECT_H
#include "cvparticle.h"
#include "cvdrawrectangle.h"
#include "cvcropimageroi.h"
#include "cvrect32f.h"
#include <float.h>
using namespace std;
/********************** Definition of a particle *****************************/
int num_states = 5;
// Definition of meanings of 5 states.
// This kinds of structures is not necessary to be defined,
// but I recommend to define them because it makes clear meanings of states
typedef struct CvParticleState {
double x; // center coord of a rectangle
double y; // center coord of a rectangle
double width; // width of a rectangle
double height; // height of a rectangle
double angle; // rotation around center. degree
} CvParticleState;
// Definition of dynamics model
// new_particle = cvMatMul( dynamics, particle ) + noise
// curr_x =: curr_x + noise
double dynamics[] = {
1, 0, 0, 0, 0,
0, 1, 0, 0, 0,
0, 0, 1, 0, 0,
0, 0, 0, 1, 0,
0, 0, 0, 0, 1,
};
/********************** Function Prototypes *********************************/
#ifndef NO_DOXYGEN
// Functions for CvParticleState structure ( constructor, getter, setter )
inline CvParticleState cvParticleState( double x,
double y,
double width,
double height,
double angle = 0 );
CvParticleState cvParticleStateFromMat( const CvMat* state );
void cvParticleStateToMat( const CvParticleState &state, CvMat* state_mat );
CvParticleState cvParticleStateGet( const CvParticle* p, int p_id );
void cvParticleStateSet( CvParticle* p, int p_id, const CvParticleState &state );
// Particle Filter configuration
void cvParticleStateConfig( CvParticle* p, CvSize imsize, CvParticleState& std );
void cvParticleStateAdditionalBound( CvParticle* p, CvSize imsize );
// Utility Functions
void cvParticleStateDisplay( const CvParticleState& state, IplImage* frame, CvScalar color );
void cvParticleStatePrint( const CvParticleState& state );
#endif
/****************** Functions for CvParticleState structure ******************/
// This kinds of state definitions are not necessary,
// but helps readability of codes for sure.
/**
* Constructor
*/
inline CvParticleState cvParticleState( double x,
double y,
double width,
double height,
double angle )
{
CvParticleState state = { x, y, width, height, angle };
return state;
}
/**
* Convert a matrix state representation to a state structure
*
* @param state num_states x 1 matrix
*/
CvParticleState cvParticleStateFromMat( const CvMat* state )
{
CvParticleState s;
s.x = cvmGet( state, 0, 0 );
s.y = cvmGet( state, 1, 0 );
s.width = cvmGet( state, 2, 0 );
s.height = cvmGet( state, 3, 0 );
s.angle = cvmGet( state, 4, 0 );
return s;
}
/**
* Convert a state structure to CvMat
*
* @param state A CvParticleState structure
* @param state_mat num_states x 1 matrix
* @return void
*/
void cvParticleStateToMat( const CvParticleState& state, CvMat* state_mat )
{
cvmSet( state_mat, 0, 0, state.x );
cvmSet( state_mat, 1, 0, state.y );
cvmSet( state_mat, 2, 0, state.width );
cvmSet( state_mat, 3, 0, state.height );
cvmSet( state_mat, 4, 0, state.angle );
}
/**
* Get a state from a particle filter structure
*
* @param p particle filter struct
* @param p_id particle id
*/
CvParticleState cvParticleStateGet( const CvParticle* p, int p_id )
{
CvMat* state, hdr;
state = cvGetCol( p->particles, &hdr, p_id );
return cvParticleStateFromMat( state );
}
/**
* Set a state to a particle filter structure
*
* @param state A CvParticleState structure
* @param p particle filter struct
* @param p_id particle id
* @return void
*/
void cvParticleStateSet( CvParticle* p, int p_id, const CvParticleState& state )
{
CvMat* state_mat, hdr;
state_mat = cvGetCol( p->particles, &hdr, p_id );
cvParticleStateToMat( state, state_mat );
}
/*************************** Particle Filter Configuration *********************************/
/**
* Configuration of Particle filter
*/
void cvParticleStateConfig( CvParticle* p, CvSize imsize, CvParticleState& std )
{
// config dynamics model
CvMat dynamicsmat = cvMat( p->num_states, p->num_states, CV_64FC1, dynamics );
// config random noise standard deviation
CvRNG rng = cvRNG( time( NULL ) );
double stdarr[] = {
std.x,
std.y,
std.width,
std.height,
std.angle
};
CvMat stdmat = cvMat( p->num_states, 1, CV_64FC1, stdarr );
// config minimum and maximum values of states
// lowerbound, upperbound, circular flag (useful for degree)
// lowerbound == upperbound to express no bounding
double boundarr[] = {
0, imsize.width - 1, false,
0, imsize.height - 1, false,
1, imsize.width, false,
1, imsize.height, false,
0, 360, true
};
CvMat boundmat = cvMat( p->num_states, 3, CV_64FC1, boundarr );
cvParticleSetDynamics( p, &dynamicsmat );
cvParticleSetNoise( p, rng, &stdmat );
cvParticleSetBound( p, &boundmat );
}
/**
* @todo
* CvParticle does not support this type of bounding currently
* Call after transition
*/
void cvParticleStateAdditionalBound( CvParticle* p, CvSize imsize )
{
for( int np = 0; np < p->num_particles; np++ )
{
double x = cvmGet( p->particles, 0, np );
double y = cvmGet( p->particles, 1, np );
double width = cvmGet( p->particles, 2, np );
double height = cvmGet( p->particles, 3, np );
width = MIN( width, imsize.width - x ); // another state x is used
height = MIN( height, imsize.height - y ); // another state y is used
cvmSet( p->particles, 2, np, width );
cvmSet( p->particles, 3, np, height );
}
}
/***************************** Utility Functions ****************************************/
void cvParticleStateDisplay( const CvParticleState& state, IplImage* img, CvScalar color )
{
CvBox32f box32f = cvBox32f( state.x, state.y, state.width, state.height, state.angle );
CvRect32f rect32f = cvRect32fFromBox32f( box32f );
cvDrawRectangle( img, rect32f, cvPoint2D32f(0,0), color );
}
void cvParticleStatePrint( const CvParticleState& state )
{
printf( "x :%.2f ", state.x );
printf( "y :%.2f ", state.y );
printf( "width :%.2f ", state.width );
printf( "height :%.2f ", state.height );
printf( "angle :%.2f\n", state.angle );
fflush( stdout );
}
#endif

View File

@ -0,0 +1,301 @@
/** @file
* Rotated rectangle state particle filter +
* 2nd order AR dynamics model ( in fact, next = current + speed + noise )
*
* Use this file as a template of definitions of states and
* state transition model for particle filter
*
* Currently cvparticle.h supports only linear combination of states transition
* model only. you may create another cvParticleTransition to support more complex
* non-linear state transition model. Most of other functions still should be
* available modifications
*/
/* The MIT License
*
* Copyright (c) 2008, Naotoshi Seo <sonots(at)sonots.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef CV_PARTICLE_ROTRECT2_H
#define CV_PARTICLE_ROTRECT2_H
#include "cvparticle.h"
#include "cvdrawrectangle.h"
#include "cvcropimageroi.h"
#include "cvrect32f.h"
#include <float.h>
using namespace std;
/********************** Definition of a particle *****************************/
int num_states = 10;
// Definition of meanings of 10 states.
// This kinds of structures is not necessary to be defined,
// but I recommend to define them because it makes clear meanings of states
typedef struct CvParticleState {
double x; // center coord of a rectangle
double y; // center coord of a rectangle
double width; // width of a rectangle
double height; // height of a rectangle
double angle; // rotation around center. degree
double xp; // previous center coord of a rectangle
double yp; // previous center coord of a rectangle
double widthp; // previous width of a rectangle
double heightp; // previous height of a rectangle
double anglep; // previous rotation around center. degree
} CvParticleState;
// Definition of dynamics model
// new_particle = cvMatMul( dynamics, particle ) + noise
// curr_x =: curr_x + dx + noise = curr_x + (curr_x - prev_x) + noise
// prev_x =: curr_x
double dynamics[] = {
2, 0, 0, 0, 0, -1, 0, 0, 0, 0,
0, 2, 0, 0, 0, 0, -1, 0, 0, 0,
0, 0, 2, 0, 0, 0, 0, -1, 0, 0,
0, 0, 0, 2, 0, 0, 0, 0, -1, 0,
0, 0, 0, 0, 2, 0, 0, 0, 0, -1,
1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0, 0, 0,
};
/********************** Function Prototypes *********************************/
#ifndef NO_DOXYGEN
// Functions for CvParticleState structure ( constructor, getter, setter )
inline CvParticleState cvParticleState( double x,
double y,
double width,
double height,
double angle = 0,
double xp = 0,
double yp = 0,
double widthp = 0,
double heightp = 0,
double anglep =0 );
CvParticleState cvParticleStateFromMat( const CvMat* state );
void cvParticleStateToMat( const CvParticleState &state, CvMat* state_mat );
CvParticleState cvParticleStateGet( const CvParticle* p, int p_id );
void cvParticleStateSet( CvParticle* p, int p_id, const CvParticleState &state );
// Particle Filter configuration
void cvParticleStateConfig( CvParticle* p, CvSize imsize, CvParticleState& std );
void cvParticleStateAdditionalBound( CvParticle* p, CvSize imsize );
// Utility Functions
void cvParticleStateDisplay( const CvParticleState& state, IplImage* frame, CvScalar color );
void cvParticleStatePrint( const CvParticleState& state );
#endif
/****************** Functions for CvParticleState structure ******************/
// This kinds of state definitions are not necessary,
// but helps readability of codes for sure.
/**
* Constructor
*/
inline CvParticleState cvParticleState( double x,
double y,
double width,
double height,
double angle,
double xp,
double yp,
double widthp,
double heightp,
double anglep )
{
CvParticleState state = { x, y, width, height, angle,
xp, yp, widthp, heightp, anglep };
return state;
}
/**
* Convert a matrix state representation to a state structure
*
* @param state num_states x 1 matrix
*/
CvParticleState cvParticleStateFromMat( const CvMat* state )
{
CvParticleState s;
s.x = cvmGet( state, 0, 0 );
s.y = cvmGet( state, 1, 0 );
s.width = cvmGet( state, 2, 0 );
s.height = cvmGet( state, 3, 0 );
s.angle = cvmGet( state, 4, 0 );
s.xp = cvmGet( state, 5, 0 );
s.yp = cvmGet( state, 6, 0 );
s.widthp = cvmGet( state, 7, 0 );
s.heightp = cvmGet( state, 8, 0 );
s.anglep = cvmGet( state, 9, 0 );
return s;
}
/**
* Convert a state structure to CvMat
*
* @param state A CvParticleState structure
* @param state_mat num_states x 1 matrix
* @return void
*/
void cvParticleStateToMat( const CvParticleState& state, CvMat* state_mat )
{
cvmSet( state_mat, 0, 0, state.x );
cvmSet( state_mat, 1, 0, state.y );
cvmSet( state_mat, 2, 0, state.width );
cvmSet( state_mat, 3, 0, state.height );
cvmSet( state_mat, 4, 0, state.angle );
cvmSet( state_mat, 5, 0, state.xp );
cvmSet( state_mat, 6, 0, state.yp );
cvmSet( state_mat, 7, 0, state.widthp );
cvmSet( state_mat, 8, 0, state.heightp );
cvmSet( state_mat, 9, 0, state.anglep );
}
/**
* Get a state from a particle filter structure
*
* @param p particle filter struct
* @param p_id particle id
*/
CvParticleState cvParticleStateGet( const CvParticle* p, int p_id )
{
CvMat* state, hdr;
state = cvGetCol( p->particles, &hdr, p_id );
return cvParticleStateFromMat( state );
}
/**
* Set a state to a particle filter structure
*
* @param state A CvParticleState structure
* @param p particle filter struct
* @param p_id particle id
* @return void
*/
void cvParticleStateSet( CvParticle* p, int p_id, const CvParticleState& state )
{
CvMat* state_mat, hdr;
state_mat = cvGetCol( p->particles, &hdr, p_id );
cvParticleStateToMat( state, state_mat );
}
/*************************** Particle Filter Configuration *********************************/
/**
* Configuration of Particle filter
*/
void cvParticleStateConfig( CvParticle* p, CvSize imsize, CvParticleState& std )
{
// config dynamics model
CvMat dynamicsmat = cvMat( p->num_states, p->num_states, CV_64FC1, dynamics );
// config random noise standard deviation
CvRNG rng = cvRNG( time( NULL ) );
double stdarr[] = {
std.x,
std.y,
std.width,
std.height,
std.angle,
0,
0,
0,
0,
0
};
CvMat stdmat = cvMat( p->num_states, 1, CV_64FC1, stdarr );
// config minimum and maximum values of states
// lowerbound, upperbound, circular flag (useful for degree)
// lowerbound == upperbound to express no bounding
double boundarr[] = {
0, imsize.width - 1, false,
0, imsize.height - 1, false,
1, imsize.width, false,
1, imsize.height, false,
0, 360, true,
0, 0, 0,
0, 0, 0,
0, 0, 0,
0, 0, 0,
0, 0, 0
};
CvMat boundmat = cvMat( p->num_states, 3, CV_64FC1, boundarr );
cvParticleSetDynamics( p, &dynamicsmat );
cvParticleSetNoise( p, rng, &stdmat );
cvParticleSetBound( p, &boundmat );
}
/**
* @todo
* CvParticle does not support this type of bounding currently
* Call after transition
*/
void cvParticleStateAdditionalBound( CvParticle* p, CvSize imsize )
{
for( int np = 0; np < p->num_particles; np++ )
{
double x = cvmGet( p->particles, 0, np );
double y = cvmGet( p->particles, 1, np );
double width = cvmGet( p->particles, 2, np );
double height = cvmGet( p->particles, 3, np );
width = MIN( width, imsize.width - x ); // another state x is used
height = MIN( height, imsize.height - y ); // another state y is used
cvmSet( p->particles, 2, np, width );
cvmSet( p->particles, 3, np, height );
}
}
/***************************** Utility Functions ****************************************/
/**
* Draw tracking state on an image
*/
void cvParticleStateDisplay( const CvParticleState& state, IplImage* img, CvScalar color )
{
CvBox32f box32f = cvBox32f( state.x, state.y, state.width, state.height, state.angle );
CvRect32f rect32f = cvRect32fFromBox32f( box32f );
cvDrawRectangle( img, rect32f, cvPoint2D32f(0,0), color );
}
/**
* Print the tracking state
*/
void cvParticleStatePrint( const CvParticleState& state )
{
printf( "x :%.2f ", state.x );
printf( "y :%.2f ", state.y );
printf( "width :%.2f ", state.width );
printf( "height :%.2f ", state.height );
printf( "angle :%.2f\n", state.angle );
printf( "xp:%.2f ", state.xp );
printf( "yp:%.2f ", state.yp );
printf( "widthp:%.2f ", state.widthp );
printf( "heightp:%.2f ", state.heightp );
printf( "anglep:%.2f\n", state.anglep );
fflush( stdout );
}
#endif

161
Sources/anchoritem.cpp Normal file
View File

@ -0,0 +1,161 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file anchoritem.cpp
@brief Implementation of methods ...
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#include "anchoritem.h"
#include <QDebug>
#include <QMouseEvent>
AnchorItem::AnchorItem(ItemType type, int64_t timestamp, unsigned long timePosition, unsigned long totalTime,
unsigned long frameNumber, unsigned long frameCount, QWidget *parent, bool isSet, bool displayTime) :
QWidget(parent),
type(type),
timestamp(timestamp),
timePosition(timePosition),
frameNumber(frameNumber),
isSet(isSet)
{
currentBackgroundColor = backgroundColorInactive;
QString nameString;
switch (type)
{
case ItemType::END:
{
nameString = tr("End");
break;
}
case ItemType::BEGINNING:
{
nameString = tr("Beginning");
break;
}
case ItemType::CHANGE:
nameString = tr("Trajectory change");
break;
default:
qDebug() << "ERROR - AnchorItem: Wrong ItemType";
return;
}
name = new QLabel(nameString, this);
hLayout = new QHBoxLayout(this);
hLayout->setMargin(0);
hLayout->setSpacing(0);
hLayout->addWidget(name);
setLayout(hLayout);
if (type == ItemType::END && !isSet)
{
QLabel *time = new QLabel(tr("Not set"), this);
time->setAlignment(Qt::AlignRight);
hLayout->addWidget(time);
}
else
{
timeLabel = new TimeLabel(this);
timeLabel->set_total_time(totalTime);
timeLabel->set_frame_count(frameCount);
if (displayTime)
timeLabel->display_time(timePosition, false);
else
timeLabel->display_frame_num(frameNumber, false);
timeLabel->setAlignment(Qt::AlignRight);
hLayout->addWidget(timeLabel);
}
installEventFilter(this);
}
bool AnchorItem::eventFilter(QObject *object, QEvent *event)
{
if (isEnabled() && object == this)
{
if (event->type() == QEvent::Enter)
{
setStyleSheet(backgroundColorActive);
}
else if(event->type() == QEvent::Leave)
{
setStyleSheet(currentBackgroundColor);
}
return false; // propagates the event even when processed
}
return false; // propagates the event further since was not processed
}
void AnchorItem::set_highlight(bool set)
{
if (set)
currentBackgroundColor = backgroundColorActive;
else
currentBackgroundColor = backgroundColorInactive;
setStyleSheet(currentBackgroundColor);
}
AnchorItem::~AnchorItem()
{
// All objects get deleted automatically as they are set to be children
}
ItemType AnchorItem::get_type()
{
return type;
}
bool AnchorItem::is_set()
{
return isSet;
}
int64_t AnchorItem::get_timestamp() const
{
return timestamp;
}
void AnchorItem::display_time()
{
if (type == ItemType::END && !isSet)
{ // End is not set so the time cannot be displayed
return;
}
timeLabel->display_time(timePosition, false);
}
void AnchorItem::display_frame_num()
{
if (type == ItemType::END && !isSet)
{ // End is not set so the frame number cannot be displayed
return;
}
timeLabel->display_frame_num(frameNumber, false);
}

122
Sources/anchoritem.h Normal file
View File

@ -0,0 +1,122 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file anchoritem.h
@brief Header file
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#ifndef ANCHORITEM_H
#define ANCHORITEM_H
#include <QWidget>
#include <QLabel>
#include <QHBoxLayout>
#include "timelabel.h"
#include <stdint.h> // int64_t; needed for MSVC compiler
enum class ItemType : int {BEGINNING, END, CHANGE};
class AnchorItem : public QWidget
{
Q_OBJECT
public:
/**
* Constructor
* @param type Item type
* @param timestamp Timestamp
* @param timePosition Time position
* @param totalTime Total video time
* @param frameNumber Fram number
* @param frameCount Number of frames in the video
* @param parent Parent widget
* @param isSet Is the item set? Works only with END ItemType
* @param displayTime True - display time. False - display frame number
*/
AnchorItem(ItemType type, int64_t timestamp, unsigned long timePosition, unsigned long totalTime,
unsigned long frameNumber, unsigned long frameCount, QWidget *parent, bool isSet=true, bool displayTime=false);
/**
* Destructor
*/
~AnchorItem();
/**
* Returns type of this item.
* @return Item type
*/
ItemType get_type();
/**
* Is the object set? isSet works only with END ItemType.
* @return isSet.
*/
bool is_set();
/**
* Sets highlighting. Works with eventFilter().
* @param set True - highlight always on. False - highlight set according item hover.
*/
void set_highlight(bool set);
/**
* Returns timestamp.
* @return timestamp
*/
int64_t get_timestamp() const;
/**
* Displays values as time positions.
*/
void display_time();
/**
* Displays values as frame numbers.
*/
void display_frame_num();
signals:
protected:
/**
* Filters Enter and Leave events for item highlighting when hovering
* @param object Object of the event
* @param event Event
* @return False - propagate event further. True - do not propagate.
*/
bool eventFilter(QObject *object, QEvent *event);
private:
TimeLabel *timeLabel;
QLabel *name;
ItemType type;
int64_t timestamp;
unsigned long timePosition;
unsigned long frameNumber;
bool isSet;
QHBoxLayout *hLayout;
const QString backgroundColorActive = "background-color: rgb(115, 171, 230);";
const QString backgroundColorInactive = "background-color: rgb(255, 255, 255);";
QString currentBackgroundColor;
};
#endif // ANCHORITEM_H

446
Sources/avwriter.cpp Normal file
View File

@ -0,0 +1,446 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file avwriter.cpp
@brief Implementation of methods ...
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#include "avwriter.h"
#include <QDebug>
#include <cassert>
AVWriter::AVWriter()
{
formatContext = nullptr;
videoStream = nullptr;
audioStream = nullptr;
}
AVWriter::~AVWriter()
{
if (videoStream)
avcodec_close(videoStream->codec);
if (audioStream)
avcodec_close(audioStream->codec);
if (formatContext)
{
avformat_free_context(formatContext);
formatContext = nullptr;
}
}
bool AVWriter::initialize_output(std::string filename, AVStream const *inVideoStream, AVStream const *inAudioStream, char const *inFormatName, std::string inFileExtension)
{
/** choose extension according the file format:
std::string str(formatContext->iformat->name);
std::string ext = str.substr(0, str.find(','));
filename += ".";
filename += ext;
qDebug() << ext.c_str();
avformat_alloc_output_context2(&formatContext2, NULL, ext.c_str(), NULL); // allocation of output context
*/
/** Output video format deduction form the (output) filename.
It is disabled because format should match the input format so as extra settings is not needed.
Encoding to some other formats might not be valid especially because mixed PTS and DTS
std::string originalFilename = filename; // "filename" might change the extension if needed; "originalFilename" stores the argument
// Allocating output media context
avformat_alloc_output_context2(&formatContext, nullptr, nullptr, filename.c_str()); // allocation of output context
if (!formatContext || formatContext->oformat->video_codec == AV_CODEC_ID_NONE)
{
if (!formatContext)
{
qDebug() << "Warning: Could not deduce output format from file extension. Deducing from input format instead.";
}
else
{
qDebug() << "Warning: File format deduced from file extension does not support video. Deducing from input format instead.";
avformat_free_context(formatContext);
formatContext = nullptr;
}
// Try allocate formatContext according to input format name and set appropriate extension
// The new extension is added after the previous one (user might want to have a dot in the filename)
std::string str(inFormatName);
std::string ext = str.substr(0, str.find(',')); // Finds first extension that belongs to given file format
if (!ext.empty()) // Appropriate extension found
{
filename = originalFilename + "." + ext;
qDebug() << ext.c_str();
avformat_alloc_output_context2(&formatContext, NULL, ext.c_str(), NULL); // allocation of output context
}
}
*/
// Try allocate formatContext according to input format name and set appropriate extension
// If a user changed the file extension, add the correct one
std::string str(inFormatName);
// Find first extension that belongs to given file format
std::string ext = "";
auto extPos = str.find(',');
if (extPos != std::string::npos)
ext = str.substr(0, extPos); // Finds first extension that belongs to given file format
else
ext = str;
if (!ext.empty()) // Appropriate extension found
{
std::string outFileExtension = "";
auto outExtensionPosition = filename.find_last_of('.');
if (outExtensionPosition != std::string::npos)
outFileExtension = filename.substr(outExtensionPosition);
qDebug() << "outFileExtension" << QString::fromStdString(outFileExtension);
qDebug() << "inFileExtension" << QString::fromStdString(inFileExtension);
if (outFileExtension.compare(inFileExtension)) // extensions are not the same, suffix the correct one
filename += inFileExtension;
qDebug() << ext.c_str();
avformat_alloc_output_context2(&formatContext, NULL, ext.c_str(), NULL); // allocation of output context
}
if (!formatContext || formatContext->oformat->video_codec == AV_CODEC_ID_NONE)
{ //Deduced file format does not support video. Using MPEG
if (!formatContext)
{
qDebug() << "Warning: Deduced file format does not support video. Using MPEG.";
}
else
{
qDebug() << "Warning: Could not deduce output format, using MPEG";
avformat_free_context(formatContext);
formatContext = nullptr;
}
filename = filename + ".mpg";
avformat_alloc_output_context2(&formatContext, NULL, "mpeg", filename.c_str());
}
if (!formatContext)
{
qDebug() << "Error: avformat_alloc_output_context2";
return false;
}
AVOutputFormat *fmt = formatContext->oformat;
// Add video stream
if (fmt->video_codec != AV_CODEC_ID_NONE)
{
if (!add_stream(&videoStream, inVideoStream))
{
qDebug() << "ERROR: Adding video stream";
return false;
}
}
else
{ // Video is needed
qDebug() << "ERROR: This format is not a video format.";
return false;
}
// Add audio format
if (fmt->audio_codec != AV_CODEC_ID_NONE)
{
if (!add_stream(&audioStream, inAudioStream))
{
qDebug() << "ERROR: Adding audio stream";
return false;
}
}
/* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
av_dump_format(formatContext, 0, filename.c_str(), 1);
// Open the output file
if (!(fmt->flags & AVFMT_NOFILE)) {
if (avio_open(&formatContext->pb, filename.c_str(), AVIO_FLAG_WRITE) < 0)
{
qDebug() << "Error could not open output file.";
return false;
}
}
// Write the stream header
if (avformat_write_header(formatContext, nullptr) < 0)
{
qDebug() << "Error writing header";
return false;
}
// Everything is correctly set, prepared for writing
return true;
}
bool AVWriter::close_output()
{
/* Write the trailer, if any. The trailer must be written before you
* close the CodecContexts open when you wrote the header; otherwise
* av_write_trailer() may try to use memory that was freed on
* av_codec_close(). */
av_write_trailer(formatContext);
if (!(formatContext->oformat->flags & AVFMT_NOFILE))
avio_closep(&formatContext->pb);
return true;
}
bool AVWriter::add_stream(AVStream **outputStream, const AVStream *inputStream)
{
// AVStream *inputStream = formatContext->streams[inputStreamID]; // careful, it's different formatContext!
AVCodecContext *codecContext;
AVCodec *codec;
/* find the encoder */
qDebug() << "codec id:" << inputStream->codec->codec_id;
codec = avcodec_find_encoder(inputStream->codec->codec_id);
if (!codec) {
qDebug() << "Error finding encoder";
return false;
}
if (codec->type == AVMEDIA_TYPE_VIDEO)
*outputStream = avformat_new_stream(formatContext, codec); // Creates new stream (also increases nb_streams)
else
*outputStream = avformat_new_stream(formatContext, inputStream->codec->codec); // Creates new stream (also increases nb_streams)
if (!*outputStream) {
qDebug() << "Error allocating stream";
return false;
}
(*outputStream)->id = formatContext->nb_streams - 1;
codecContext = (*outputStream)->codec;
/** Not used because of c->qmin, qmax and qcompress - in mp4 format copied values are invalid
if (avcodec_copy_context((*outputStream)->codec, inputStream->codec) < 0) {
qDebug() << "Failed to copy context from input to output stream codec context";
return false;
}
*/
switch (codec->type)
{
case AVMEDIA_TYPE_AUDIO:
if (avcodec_copy_context((*outputStream)->codec, inputStream->codec) < 0)
{
qDebug() << "Failed to copy context from input to output stream codec context";
return false;
}
(*outputStream)->codec->codec_tag = 0;
/** No need to encode because audio frames are not decoded
codecContext->sample_fmt = codec->sample_fmts ?
codec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
codecContext->bit_rate = 64000;
codecContext->sample_rate = 44100;
if (codec->supported_samplerates) {
codecContext->sample_rate = codec->supported_samplerates[0];
for (i = 0; codec->supported_samplerates[i]; i++) {
if (codec->supported_samplerates[i] == 44100)
codecContext->sample_rate = 44100;
}
}
codecContext->channels = av_get_channel_layout_nb_channels(codecContext->channel_layout);
codecContext->channel_layout = AV_CH_LAYOUT_STEREO;
if (codec->channel_layouts) {
codecContext->channel_layout = codec->channel_layouts[0];
for (i = 0; codec->channel_layouts[i]; i++) {
if (codec->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
codecContext->channel_layout = AV_CH_LAYOUT_STEREO;
}
}
codecContext->channels = av_get_channel_layout_nb_channels(codecContext->channel_layout);
(*outputStream)->time_base = AVRational{ 1, codecContext->sample_rate };
*/
break;
case AVMEDIA_TYPE_VIDEO:
codecContext->codec_id = inputStream->codec->codec_id;
// this is commented because for some video formats causes the first frame not being a keyframe
//codecContext->bit_rate = inputStream->codec->bit_rate;
codecContext->width = inputStream->codec->width;
codecContext->height = inputStream->codec->height;
(*outputStream)->time_base = inputStream->time_base;
codecContext->time_base = (*outputStream)->time_base;
codecContext->gop_size = inputStream->codec->gop_size; // one intra frame emitted every X frames at most
codecContext->pix_fmt = (enum AVPixelFormat)outputFormat;
codecContext->max_b_frames = inputStream->codec->max_b_frames;
codecContext->mb_decision = inputStream->codec->mb_decision;
break;
default:
break;
}
/* Some formats want stream headers to be separate. */
if (formatContext->oformat->flags & AVFMT_GLOBALHEADER)
codecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
if(codec->type == AVMEDIA_TYPE_VIDEO)
{
if (avcodec_open2((*outputStream)->codec, codec, NULL) < 0)
{
qDebug() << "Error opening codec";
return false;
}
}
return true;
}
bool AVWriter::write_video_frame(VideoFrame &frame)
{
assert(frame.get_mat_frame() != nullptr);
int got_packet = 0;
AVPacket pkt;
//AVPacket pkt = { 0 };
// Set .data and .size to 0; That is needed for avcodec_encode_video2() to allocate buffer
pkt.data = nullptr;
pkt.size = 0;
av_init_packet(&pkt);
AVFrame const *avFrame = frame.get_av_frame(); // frame converted from cv::Mat (part of VideoFrame)
if (!avFrame)
{
qDebug() << "Error: Cannot get frame converted to AVFrame";
return false;
}
/** Commented because not tested
if (formatContext->oformat->flags & AVFMT_RAWPICTURE)
{ // avoiding data copy with some raw video muxers
qDebug() << "raw";
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = videoStream->index;
pkt.data = (uint8_t *)avFrame;
pkt.size = sizeof(AVPicture);
pkt.pts = pkt.dts = frame.get_timestamp();
}
else
{
*/
//avFrame->pts = av_rescale_q(avFrame->pts, videoStream->time_base, videoStream->codec->time_base);
if (avcodec_encode_video2(videoStream->codec, &pkt, avFrame, &got_packet) < 0)
{
qDebug() << "Error encoding video frame";
return false;
}
if (!got_packet)
{ // packet is empty -> avcodec_encode_video2() needs more frames to begin decoding
qDebug() << "Encoding: packet is empty";
return true;
}
/** } */
// rescale output packet timestamp values from codec to stream timebase
av_packet_rescale_ts(&pkt, videoStream->codec->time_base, videoStream->time_base);
pkt.stream_index = videoStream->index;
// Write the compressed frame to the media file.
if (av_interleaved_write_frame(formatContext, &pkt) < 0)
{
qDebug() << "Error: av_interleaved_write_frame.";
return false;
}
av_free_packet(&pkt);
return true;
}
bool AVWriter::write_last_frames()
{
int got_packet = 1;
AVPacket pkt;
//AVPacket pkt = { 0 };
// Set .data and .size to 0; It is needed for avcodec_encode_video2() to allocate buffer
pkt.data = nullptr;
pkt.size = 0;
av_init_packet(&pkt);
// Get delayed frames - empty the buffer
while (got_packet)
{
qDebug() << "Writing a last frame";
if (avcodec_encode_video2(videoStream->codec, &pkt, NULL, &got_packet) < 0)
{
qDebug() << "Error encoding video frame when writing last frames";
return false;
}
if (got_packet)
{
av_packet_rescale_ts(&pkt, videoStream->codec->time_base, videoStream->time_base);
pkt.stream_index = videoStream->index;
// Write the compressed frame to the media file.
if (av_interleaved_write_frame(formatContext, &pkt) < 0)
{
qDebug() << "Error: av_interleaved_write_frame() when writing last frames";
return false;
}
av_free_packet(&pkt);
}
}
return true;
}
bool AVWriter::write_audio_packet(AVPacket &pkt)
{
pkt.pos = -1;
pkt.stream_index = audioStream->index;
if (av_interleaved_write_frame(formatContext, &pkt) < 0)
{
qDebug() << "Error: Writing audio packet";
return false;
}
return true;
}

111
Sources/avwriter.h Normal file
View File

@ -0,0 +1,111 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file avwriter.h
@brief Header file
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#ifndef AVWRITER_H
#define AVWRITER_H
extern "C"{
#include <ffmpeg/libavcodec/avcodec.h>
#include <ffmpeg/libavformat/avformat.h>
#include <ffmpeg/libswscale/swscale.h>
#include <ffmpeg/libavutil/common.h>
#include <ffmpeg/libavutil/imgutils.h>
#include <ffmpeg/libavutil/mathematics.h>
#include <ffmpeg/libavutil/samplefmt.h>
#include <ffmpeg/libavutil/avassert.h>
#include <ffmpeg/libavutil/channel_layout.h>
#include <ffmpeg/libavutil/opt.h>
#include <ffmpeg/libavutil/timestamp.h>
#include <ffmpeg/libswresample/swresample.h>
}
#include "videoframe.h"
class AVWriter
{
public:
AVWriter();
~AVWriter();
/**
* Initializes contexts, opens codecs and opens the output file for writing.
* @param filename Output filename
* @param inVideoStream Input video stream
* @param inAudioStream Input audio stream
* @param inFormatName Name of the input file format
* @param inFileExtension Extension of the input file
* @return True if initialization successful
*/
bool initialize_output(std::string filename, AVStream const *inVideoStream, AVStream const *inAudioStream,
char const *inFormatName, std::string inFileExtension);
/**
* Correct closing of the output file.
* @return True if successful
*/
bool close_output();
/**
* Writes the video frame to the output video stream.
* @param frame Frame to be written
* @return True if successful
*/
bool write_video_frame(VideoFrame &frame);
/**
* Writes the audio packet to the output audio stream.
* @param pkt Packet to be written
* @return True if successful
*/
bool write_audio_packet(AVPacket &pkt);
/**
* Writes all frames remaining in the encoder; emptying buffers.
* @return True if successful
*/
bool write_last_frames();
private:
/**
* Adds a new stream (either audio or video) for creating an output media file.
* @param outputStream Output stream that is initialized by this function
* @param inputStream Input stream; it's values are used for outputStream initialization
* @return True if successful
*/
bool add_stream(AVStream **outputStream, AVStream const *inputStream);
private:
AVFormatContext *formatContext;
AVStream *videoStream;
AVStream *audioStream;
const int outputFormat = AV_PIX_FMT_YUV420P;
};
#endif // AVWRITER_H

39
Sources/colors.cpp Normal file
View File

@ -0,0 +1,39 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file colors.cpp
@brief Implementation of methods ...
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#include "colors.h"
const unsigned Colors::BLACK = 1;
const unsigned Colors::GRAY = 2;
const unsigned Colors::SILVER = 3;
const unsigned Colors::WHITE = 4;
const unsigned Colors::RED = 5;
const unsigned Colors::GREEN = 6;
const unsigned Colors::BLUE = 7;
const unsigned Colors::YELLOW = 8;
const unsigned Colors::CYAN = 9;
const unsigned Colors::MAGENTA = 10;

45
Sources/colors.h Normal file
View File

@ -0,0 +1,45 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file colors.h
@brief Header file
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#ifndef COLORS_H
#define COLORS_H
class Colors
{
public:
static const unsigned BLACK;
static const unsigned GRAY;
static const unsigned SILVER;
static const unsigned WHITE;
static const unsigned RED;
static const unsigned GREEN;
static const unsigned BLUE;
static const unsigned YELLOW;
static const unsigned CYAN;
static const unsigned MAGENTA;
};
#endif // COLORS_H

524
Sources/ffmpegplayer.cpp Normal file
View File

@ -0,0 +1,524 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file ffmpegplayer.cpp
@brief Implementation of methods ...
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#include "ffmpegplayer.h"
#include <QDebug>
#include <cassert>
#define VIDEOTRACKING_MS2DURATION 1000
#define VIDEOTRACKING_TIME_BASE_Q AVRational{1, AV_TIME_BASE} // original AV_TIME_BASE_Q gives syntax error
#define VIDEOTRACKING_MS_TIME_BASE_Q AVRational{1, 1000}
FFmpegPlayer::FFmpegPlayer(std::string videoAddr, QProgressDialog const *progressDialog) :
scalingMethod(SWS_BILINEAR)
{
//av_register_all();
videoStreamID = -1; // -1 -> no video stream
audioStreamID = -1; // -1 -> no audio stream
formatContext = nullptr;
newFrame = nullptr;
videoContext = nullptr;
videoContextOrig = nullptr;
firstTimestamp = 0;
firstTimestampSet = false;
firstPts = 0;
firstPtsSet = false;
//firstPtsStream = 0;
// Open video file
if (avformat_open_input(&formatContext, videoAddr.c_str(), NULL, NULL) != 0)
{
qDebug() << "ffmpeg: Couldn't open the file.";
throw OpenException();
}
// Retrieve stream information
if (avformat_find_stream_info(formatContext, NULL)<0)
{
qDebug() << "ffmpeg: No stream information found.";
throw OpenException();
}
av_dump_format(formatContext, 0, videoAddr.c_str(), 0);
unsigned int i;
// Find first video and first audio stream
for (i = 0; i < formatContext->nb_streams; i++)
{
if (formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videoStreamID < 0)
videoStreamID = i;
if (formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audioStreamID < 0)
audioStreamID = i;
if (videoStreamID >= 0 && audioStreamID >= 0)
break;
}
if (videoStreamID == -1)
{
qDebug() << "ffmpeg: Hasn't found any video stream.";
throw OpenException();
}
// Get a pointer to the codec context for the video stream
videoContextOrig = formatContext->streams[videoStreamID]->codec;
//videoContext=formatContext->streams[videoStreamID]->codec;
AVCodec *videoCodec = nullptr;
// Find a decoder for the video stream
videoCodec = avcodec_find_decoder(videoContextOrig->codec_id);
if (videoCodec == nullptr)
{
qDebug() << "ffmpeg: Codec not supported.";
throw OpenException();
}
// Copy context
// ! Original AVCodecContext cannot be used directly, therefore avcodec_copy_context
// ...must be copied first
videoContext = avcodec_alloc_context3(videoCodec); // allocate memory
if (videoContext == nullptr)
{
qDebug() << "ffmpeg error: Allocation.";
throw OpenException();
}
if (avcodec_copy_context(videoContext, videoContextOrig) != 0)
{
qDebug() << "ffmpeg: Couldn't copy codec context.";
throw OpenException();
}
// Open video codec
if (avcodec_open2(videoContext, videoCodec, NULL) < 0)
{
qDebug() << "ffmpeg: Could not open codec.";
throw OpenException();
}
// Allocate memory for a video frame that is used for reading new frames
newFrame = av_frame_alloc();
if (newFrame == nullptr)
{
qDebug() << "ffmpeg: Cannot allocate memory for newFrame";
throw OpenException();
}
// get timeBase of the video stream
timeBase = formatContext->streams[videoStreamID]->time_base;
qDebug() << "frame count: " << get_frame_count();
qDebug() << "keyframe every: " << videoContext->gop_size << "x frame";
//analyze_video(qApplication);
analyze_video(progressDialog);
}
FFmpegPlayer::~FFmpegPlayer()
{
av_free(newFrame);
// Close the codecs
// avcodec_close(enContext);
avcodec_close(videoContext);
avcodec_close(videoContextOrig);
// Close the video file
avformat_close_input(&formatContext);
qDebug() << "ffmpeg deleted";
}
void FFmpegPlayer::analyze_video(QProgressDialog const *progressDialog)
{
AVPacket packet;
while (true)
{
//if (qApplication)
// qApplication->processEvents(); // Keeps progress bar active
if (progressDialog->wasCanceled()) // User clicked "Cancel"
{
throw UserCanceledOpeningException();
}
qApp->processEvents(); // Keeps progress bar active
if (av_read_frame(formatContext, &packet) < 0)
{
av_free_packet(&packet);
break;
}
// Is this a packet from the video stream?
if (packet.stream_index == videoStreamID)
{
if (!firstPtsSet)
{
//firstPts = packet.dts;
firstPts = packet.pts;
firstPtsSet = true;
//firstPtsStream = packet.stream_index;
if (!seek_first_packet())
{
qDebug() << "analyze_video(): Cannot seek the first packet 1";
throw OpenException();
}
continue;
}
else
{
//static unsigned long frameID = 1;
//framesIdMap[frameID] = packet.pts;
//framesTimestampSet[packet.pts] = frameID++;
framesTimestampSet.insert(packet.pts);
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
for (const auto &timestamp: framesTimestampSet)
{
framesIndexVector.push_back(timestamp);
}
qDebug() << "number of frames:" << framesTimestampSet.size();
qDebug() << "beginning:" << *(framesTimestampSet.begin());
if (!seek_first_packet())
{
qDebug() << "analyze_video(): Cannot seek the first packet 2";
throw OpenException();
}
}
double FFmpegPlayer::get_fps() const
{
// return formatContext->bit_rate;
// return formatContext->streams[videoStreamID]->nb_frames * AV_TIME_BASE / static_cast<double>(formatContext->duration);
return formatContext->streams[videoStreamID]->nb_frames * VIDEOTRACKING_MS2DURATION / static_cast<double>(get_total_time());
}
unsigned long FFmpegPlayer::get_frame_count() const
{
return framesTimestampSet.size();
//return formatContext->streams[videoStreamID]->nb_frames;
}
// in ms
unsigned long FFmpegPlayer::get_total_time() const
{
// Time of the last frame
return get_time_position_by_timestamp(*(framesTimestampSet.rbegin()));
//return formatContext->duration / VIDEOTRACKING_MS2DURATION;
}
unsigned int FFmpegPlayer::get_height() const
{
return videoContext->height;
}
unsigned int FFmpegPlayer::get_width() const
{
return videoContext->width;
}
const AVStream *FFmpegPlayer::get_video_stream()
{
if (videoStreamID < 0) // No video stream detected
return nullptr;
return formatContext->streams[videoStreamID];
}
const AVStream *FFmpegPlayer::get_audio_stream()
{
if (audioStreamID < 0) // No audio stream detected
return nullptr;
else
return formatContext->streams[audioStreamID];
}
/**
int64_t FFmpegPlayer::get_frame_timestamp() const
{
//return videoContext->frame_number;
return lastTimestamp;
}
*/
unsigned long FFmpegPlayer::get_time_position_by_timestamp(int64_t timestamp) const
{
return av_rescale_q(timestamp, timeBase, AVRational{1, VIDEOTRACKING_MS2DURATION});
}
/**
unsigned long FFmpegPlayer::get_time_position() const
{
//videoContext->ticks_per_frame;
return av_rescale_q(get_frame_timestamp(), timeBase, AVRational{1, VIDEOTRACKING_MS2DURATION});
}
*/
bool FFmpegPlayer::get_current_frame(VideoFrame *frame)
{
if (frame == nullptr)
return false;
if (!frame->set_frame(newFrame))
return false;
frame->set_timestamp(lastTimestamp);
//frame->set_time_position(get_time_position_by_timestamp(lastTimestamp + newFrame->pkt_duration));
//frame->set_frame_number(framesTimestampSet.find(lastTimestamp)->second);
frame->set_frame_number(std::distance(framesTimestampSet.begin(), framesTimestampSet.find(lastTimestamp))+1);
frame->set_time_position(get_time_position_by_timestamp(lastTimestamp));
//qDebug() << "duration: " << newFrame->pkt_duration;
return true;
}
bool FFmpegPlayer::get_frame_by_time(VideoFrame *resultFrame, unsigned long time)
{
int64_t approximateTimestamp = av_rescale_q(time, VIDEOTRACKING_MS_TIME_BASE_Q, timeBase);
// Find first timestamp that is not lower than the approximateTimestamp
auto iterator = framesTimestampSet.lower_bound(approximateTimestamp);
if (iterator == framesTimestampSet.end()) // approximateTimestamp is higher than all timestmaps; use the highest timestamp
iterator--;
int64_t exactTimestamp = *iterator;
return get_frame_by_timestamp(resultFrame, exactTimestamp);
}
/** exactPosition - if frame with the exact timestamp does not exist, return false;
* onlyKeyFrames - Seek only to keyframes - faster; */
bool FFmpegPlayer::get_frame_by_timestamp(VideoFrame *resultFrame, int64_t timestamp)
{
auto iterator = framesTimestampSet.find(timestamp);
int64_t seekTimestamp = timestamp;
while (true)
{ // Seek KEYFRAME at desired timestamp or timestamp lower.
// av_seek_frame() is not accurate. It might return frame at higher timestamp than
// wanted. Thus, it is necessary to seek lower (if this happens) until returned KEYFRAME is not
// higher than desired frame.
if (av_seek_frame(formatContext, videoStreamID, seekTimestamp, AVSEEK_FLAG_BACKWARD) < 0) // Find first previous frame; might not be a keyframe
//if (av_seek_frame(formatContext, videoStreamID, timestampPosition, 0) < 0)
{
qDebug() << "ERROR-get_frame_by_timestamp: seeking frame 1";
return false;
}
avcodec_flush_buffers(videoContext);
if (!read_frame(pkt))
return false;
if (lastTimestamp <= timestamp)
break; // This is what we were looking for
if (iterator == framesTimestampSet.begin())
return false; // This is the first frame but the desired timestamp is lower
seekTimestamp = *(--iterator);
}
//qDebug() << "I want" << timestamp << "I've got" << lastTimestamp;
//while (lastTimestamp != previousTimestamp)
while (lastTimestamp < timestamp)
{
if (!read_frame(pkt))
return false;
}
//qDebug() << "final" << lastTimestamp;
if (!get_current_frame(resultFrame))
return false;
return true;
}
bool FFmpegPlayer::get_frame_by_number(VideoFrame *resultFrame, unsigned long frameNumber)
{
int64_t timestamp = framesIndexVector[frameNumber-1];
return get_frame_by_timestamp(resultFrame, timestamp);
}
bool FFmpegPlayer::get_next_frame(VideoFrame *resultFrame)
{
if (!read_frame(pkt))
return false;
if (!get_current_frame(resultFrame))
{
qDebug() << "Cannot get current frame";
return false;
}
return true;
}
bool FFmpegPlayer::get_previous_frame(VideoFrame *resultFrame)
{
int64_t currentTimestamp = lastTimestamp;
//qDebug() << "get_previous_frame: currentTimestamp" << currentTimestamp;
if (currentTimestamp == firstTimestamp) // There is no previous frame
return false;
auto iterator = framesTimestampSet.find(currentTimestamp);
int64_t previousTimestamp = *(--iterator);
//qDebug() << "get_previous_frame: previousTimestamp" << previousTimestamp << iterator->second;
get_frame_by_timestamp(resultFrame, previousTimestamp);
return true;
}
// read_frame cannot be called two times at the same time (at least until AVPacket is used)
// AVFrame *frame needs to be allocated before (no need for buffer)
bool FFmpegPlayer::read_frame(AVPacket &packet, bool onlyVideoPackets, bool *isAudio)
{
assert(onlyVideoPackets || isAudio != nullptr); // isAudio needs to be defined if audioPackets should be searched too
int frameFinished;
bool lastFrames = false;
while (true)
{
if (av_read_frame(formatContext, &packet) < 0)
{
av_free_packet(&packet);
if (videoContext->codec->capabilities & CODEC_CAP_DELAY)
{ // delayed frames at the end of the video
lastFrames = true;
qDebug() << "last packets";
packet.data = NULL;
packet.size = 0;
packet.stream_index = videoStreamID;
}
else
return false;
}
// Is this a packet from the video stream?
if (packet.stream_index == videoStreamID)
{
//qDebug() << "readFrame pts" <<newFrame->pts;
// Decode video frame
if (avcodec_decode_video2(videoContext, newFrame, &frameFinished, &packet) < 0)
{
av_free_packet(&packet);
return false;
}
// frameFinished==0 -> no frame could be decompressed
if (frameFinished)
{
lastTimestamp = newFrame->pkt_pts;
av_free_packet(&packet);
if (!firstTimestampSet)
{
firstTimestamp = lastTimestamp;
qDebug() << "First timestamp is" << firstTimestamp;
firstTimestampSet = true;
}
if (!onlyVideoPackets)
*isAudio = false;
return true;
}
else if (lastFrames)
{
qDebug() << "last frames finished";
return false;
}
}
else if(!onlyVideoPackets && packet.stream_index == audioStreamID)
{ // Do not forget to free packet after using it
*isAudio = true;
return true;
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
return false;
}
bool FFmpegPlayer::seek_first_packet()
{
assert(firstPtsSet);
qDebug() << "stream id" << videoStreamID;
qDebug() << "first pts" << firstPts;
//if (av_seek_frame(formatContext, videoStreamID, timestamp, AVSEEK_FLAG_BACKWARD) < 0)
if (av_seek_frame(formatContext, videoStreamID, firstPts, AVSEEK_FLAG_BACKWARD) < 0)
{
qDebug() << "ERROR seek_first_packet()";
return false;
}
avcodec_flush_buffers(videoContext);
return true;
}
// Returns the name of input file format
char const *FFmpegPlayer::get_format_name()
{
return formatContext->iformat->name;
}

227
Sources/ffmpegplayer.h Normal file
View File

@ -0,0 +1,227 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file ffmpegplayer.h
@brief Header file
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#ifndef FFMPEGPLAYER_H
#define FFMPEGPLAYER_H
#include <set>
#include <string> // For video path (videoAddr)
#include "videoframe.h"
#include <QApplication>
#include <QProgressDialog>
//TODO: Are they needed in the header file or can they be moved to the source?
#include <opencv/cv.h>
#include <opencv/cvaux.h>
#include <opencv/cxcore.h>
#include <opencv/highgui.h>
extern "C"{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include <libavutil/avassert.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h>
#include <libavutil/timestamp.h>
#include <libswresample/swresample.h>
}
struct OpenException : public std::exception{};
struct UserCanceledOpeningException : public std::exception{};
class FFmpegPlayer
{
public:
/**
* Constructor
* @param videoAddr Path to a video file
* @param progressDialog QT progress dialog for displaying information about video opening
*/
FFmpegPlayer(std::string videoAddr, QProgressDialog const *progressDialog);
/**
* Destructor
*/
~FFmpegPlayer();
/**
* Returns information about frames per second.
* @return Frames per second
*/
double get_fps() const;
/**
* Returns information about number of frames.
* @return Number of frames
*/
unsigned long get_frame_count() const;
/**
* Returns information about video length.
* @return Video length in milliseconds
*/
unsigned long get_total_time() const;
/**
* Returns information about video height.
* @return Video height
*/
unsigned int get_height() const;
/**
* Returns information about video width.
* @return Video width
*/
unsigned int get_width() const;
//int64_t get_frame_timestamp() const;
//int get_time_position() const; // In milliseconds
/**
* Returns information about the input video stream.
* @return Input video stream
*/
AVStream const *get_video_stream();
/**
* Returns information about the input audio stream.
* @return Input audio stream
*/
AVStream const *get_audio_stream();
/**
* Reads a frame by its time position and returns it.
* @param resultFrame Read frame
* @param time Time position
* @return True if successful
*/
bool get_frame_by_time(VideoFrame *resultFrame, unsigned long time);
/**
* Reads a frame by its timestamp and returns it.
* @param resultFrame Read frame
* @param timestamp Timestamp
* @return True if successful
*/
bool get_frame_by_timestamp(VideoFrame *resultFrame, int64_t timestamp);
/**
* Reads a frame by its number (index) and returns it.
* @param resultFrame Read frame
* @param frameNumber Frame number (index)
* @return True if successful
*/
bool get_frame_by_number(VideoFrame *resultFrame, unsigned long frameNumber);
/**
* Reads a frame following to the current one returns it.
* @param resultFrame Read frame
* @return True if successful
*/
bool get_next_frame(VideoFrame *resultFrame);
/**
* Reads a frame preceding to the current one returns it.
* @param resultFrame Read frame
* @return True if successful
*/
bool get_previous_frame(VideoFrame *resultFrame);
// reads new frame to this.newFrame
/**
* Reads a new frame from the input video stream. The new frame is stored in resultFrame.
* @param packet Packet structure for storing the read packet; It is also used for returning audio packets
* @param onlyVideoPackets If false - audio packets are read as well as video packets. If true - only video packets are read.
* @param isAudio Is set to true if the function returned an audio packet in AVPacket &packet
* @return True if successful
*/
bool read_frame(AVPacket &packet, bool onlyVideoPackets=true, bool *isAudio=nullptr);
/**
* Returns the current frame that has already been read.
* @param frame Read frame
* @return True if successful
*/
bool get_current_frame(VideoFrame *frame);
/**
* Rewinds the video to its beginning by seeking its first packet.
* @return True if successful
*/
bool seek_first_packet();
/**
* Returns the input file format name.
* @return File format name
*/
const char *get_format_name();
private:
/**
* Converts a given frame timestamp to a time position.
* @param timestamp Timestamp
* @return Time position
*/
unsigned long get_time_position_by_timestamp(int64_t timestamp) const; // In miliseconds
/**
* Analyzes the opened video so that it can be seeked
* @param progressDialog QT progress dialog for showing an information about analyzation progress.
*/
void analyze_video(QProgressDialog const *progressDialog);
private:
const int scalingMethod;
int videoStreamID;
int audioStreamID; // -1 => no audio stream found
AVRational timeBase;
AVPacket pkt;
AVFormatContext *formatContext;
AVFrame *newFrame;
AVCodecContext *videoContext;
AVCodecContext *videoContextOrig;
int64_t firstTimestamp;
int64_t firstTimestampSet;
int64_t firstPts;
int64_t firstPtsSet;
int64_t lastTimestamp; // pkt_pts of last successfully read frame
std::vector<int64_t> framesIndexVector;
std::set<int64_t> framesTimestampSet;
};
#endif // FFMPEGPLAYER_H

41
Sources/helpbrowser.cpp Normal file
View File

@ -0,0 +1,41 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file helpbrowser.cpp
@brief Implementation of methods ...
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#include "helpbrowser.h"
HelpBrowser::HelpBrowser(QHelpEngine *helpEngine, QWidget *parent):
QTextBrowser(parent),
helpEngine(helpEngine)
{
}
QVariant HelpBrowser::loadResource(int type, const QUrl &name)
{
if (name.scheme() == "qthelp")
return QVariant(helpEngine->fileData(name));
else
return QTextBrowser::loadResource(type, name);
}

56
Sources/helpbrowser.h Normal file
View File

@ -0,0 +1,56 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file helpbrowser.h
@brief Header file
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#ifndef HELPBROWSER_H
#define HELPBROWSER_H
#include <QTextBrowser>
#include <QHelpEngine>
#include <QWidget>
class HelpBrowser : public QTextBrowser
{
public:
/**
* Constructor
* @param helpEngine Help engine with loaded help files
* @param parent Parent Widget
*/
HelpBrowser(QHelpEngine *helpEngine, QWidget *parent = 0);
/**
* Loading a resource.
* @param type Resourse type
* @param name Resource name
* @return Found data
*/
QVariant loadResource (int type, const QUrl& name);
private:
QHelpEngine* helpEngine;
};
#endif // HELPBROWSER_H

233
Sources/imagelabel.cpp Normal file
View File

@ -0,0 +1,233 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file imagelabel.cpp
@brief Implementation of methods ...
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#include "imagelabel.h"
#include <QtDebug>
#include <QPainter>
#include <cassert>
ImageLabel::ImageLabel(QWidget *parent) : QLabel(parent)
{
//connect(this, SIGNAL(clicked()), this, SLOT(labelClicked()));
selectionEnabled = false;
isPressed = false;
//isSelected = false;
selection.angle = 0;
}
ImageLabel::~ImageLabel()
{
qDebug() << "Img destroyed";
}
void ImageLabel::show_without_selection()
{
setPixmap(QPixmap::fromImage(scaledImage));
show();
}
void ImageLabel::show_with_selection()
{
QImage editImage = scaledImage;
QPainter painter(&editImage);
assert(scaledHeight > 0);
assert(scaledWidth > 0);
double widthRatio = scaledImage.width() / static_cast<double>(scaledWidth);
double heightRatio = scaledImage.height() / static_cast<double>(scaledHeight);
painter.setBrush(QColor(0,0,0,170));
painter.setPen(QColor(255,255,255,200));
painter.drawRect(selection.x * widthRatio, selection.y * heightRatio
, selection.width * widthRatio, selection.height * heightRatio);
painter.end(); // Not needed - destructs itself
setPixmap(QPixmap::fromImage(editImage));
show();
return;
}
void ImageLabel::set_image(QImage const &image, int displayWidth, int displayHeight)
{
imgWidth = image.width();
imgHeight = image.height();
scaledImage = image.scaled(displayWidth, displayHeight, Qt::KeepAspectRatio);
resize(scaledImage.width(), scaledImage.height());
//this->setGeometry(0, 0, scaledImage.width(), scaledImage.height());
//if (isSelected)
if (selectionEnabled)
show_with_selection();
else
show_without_selection();
}
void ImageLabel::resizeEvent(QResizeEvent *event)
{
isPressed = false; // User cannot be making a selection at the time of image resizing
QLabel::resizeEvent(event);
}
void ImageLabel::mousePressEvent(QMouseEvent *event)
{
event->accept();
QLabel::mousePressEvent(event);
if (!selectionEnabled) // Selection is disabled
return;
// qDebug()<<"Clicked";
isPressed = true;
posX = event->x();
posY = event->y();
}
void ImageLabel::mouseReleaseEvent(QMouseEvent *event)
{
event->accept();
QLabel::mouseReleaseEvent(event);
if (!selectionEnabled) // Selection is disabled
return;
// qDebug()<<"Released";
isPressed = false;
/** selection.width = event->x() - selection.x;
selection.height = event->y() - selection.y;
emit send_position(selection);
*/
}
void ImageLabel::mouseMoveEvent(QMouseEvent *event)
{
event->accept();
QLabel::mouseMoveEvent(event);
if (!selectionEnabled) // Selection is disabled
return;
if (!isPressed) // No button pressed -> no need to process this event
return;
int curX = event->x() < this->size().width() ? event->x() : this->size().width()-1;
int curY = event->y() < this->size().height() ? event->y() : this->size().height()-1;
curX = curX > 0 ? curX : 0;
curY = curY > 0 ? curY : 0;
scaledWidth = scaledImage.width();
scaledHeight = scaledImage.height();
// count selection geometry:
if (posX < curX)
{
selection.x = posX;
selection.width = curX - posX;
} else {
selection.x = curX;
selection.width = posX - curX;
}
if (posY < curY)
{
selection.y = posY;
selection.height = curY - posY;
} else {
selection.y = curY;
selection.height = posY - curY;
}
/**
if (!isSelected)
{
emit selected(true);
isSelected = true; // Selection is valid and can be displayed
}
*/
show_with_selection();
}
Selection ImageLabel::get_selection() const
{
// assert(isSelected == true);
assert(selectionEnabled == true);
double widthRatio = imgWidth / static_cast<double>(scaledWidth);
double heightRatio = imgHeight / static_cast<double>(scaledHeight);
Selection fullSizeSelection = selection;
fullSizeSelection.width *= widthRatio;
fullSizeSelection.x *= widthRatio;
fullSizeSelection.height *= heightRatio;
fullSizeSelection.y *= heightRatio;
return fullSizeSelection;
}
void ImageLabel::set_selection_enabled(bool enabled)
{
selectionEnabled = enabled;
if (!enabled)
{
isPressed = false;
show_without_selection();
}
else
{
scaledWidth = scaledImage.width();
scaledHeight = scaledImage.height();
selection.angle = 0;
selection.width = scaledWidth/2;
selection.height = scaledHeight/2;
selection.x = scaledWidth/2 - selection.width/2;
selection.y = scaledHeight/2 - selection.height/2;
show_with_selection();
}
}
/**
void ImageLabel::hide_selection()
{
isSelected = false;
show_without_selection();
}
*/

124
Sources/imagelabel.h Normal file
View File

@ -0,0 +1,124 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file imagelabel.h
@brief Header file
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#ifndef IMAGELABEL_H
#define IMAGELABEL_H
#include "selection.h"
#include <QLabel>
#include <QMouseEvent>
class ImageLabel : public QLabel
{
Q_OBJECT
public:
/**
* Constructor
* @param parent Parent widget
*/
explicit ImageLabel(QWidget *parent = 0);
/**
* Destructor
*/
~ImageLabel();
/**
* Sets new image. It ss called also when the MainWindow is resized.
* @param image Original image
* @param displayWidth Maximal allowed width (with respect to the current size of the window)
* @param displayHeight Maximal allowed height
*/
void set_image(QImage const &image, int displayWidth, int displayHeight);
/**
* Returns position of the currently selected area.
* @return Selected area
*/
Selection get_selection() const;
/**
* Enables or disables object (area) selection.
* @param enabled If true, selection is enabled. If false, selection is disabled.
*/
void set_selection_enabled(bool enabled);
//void hide_selection();
protected:
/**
* Updates the ImageLabel when the MainWindows is resized.
*/
void resizeEvent(QResizeEvent *);
/**
* Handles mouse press events over the ImageLabel area.
* @param event
*/
void mousePressEvent(QMouseEvent * event);
/**
* Handles mouse release events over the ImageLabel area.
* @param event
*/
void mouseReleaseEvent(QMouseEvent * event);
/**
* Handles mouse move events over the ImageLabel area.
* @param event
*/
void mouseMoveEvent(QMouseEvent * event);
private:
/**
* Shows the image without highlight any selection.
*/
void show_without_selection();
/**
* Shows the image with highlighted selection.
*/
void show_with_selection();
private:
bool selectionEnabled;
bool isPressed; // true -> mouse button pressed over the image
//bool isSelected; // selection exists, therefore is displayed
int posX; // x coordinate, where the selection starts
int posY; // y coordinate, where the selection starts
int imgWidth; // width of original (unscaled) image
int imgHeight; // height of original (unscaled) image
int scaledWidth; // width of image at the time of selecting
int scaledHeight;
Selection selection; // selection geometry - values with respect to scaledWidth and scaledHeight
QImage scaledImage;
};
#endif // IMAGELABEL_H

38
Sources/main.cpp Normal file
View File

@ -0,0 +1,38 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file main.cpp
@brief Implementation of methods ...
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#include "mainwindow.h"
#include <QApplication>
int main(int argc, char *argv[])
{
QApplication application(argc, argv);
MainWindow window;
window.show();
return application.exec();
}

2366
Sources/mainwindow.cpp Normal file

File diff suppressed because it is too large Load Diff

562
Sources/mainwindow.h Normal file
View File

@ -0,0 +1,562 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file mainwindow.h
@brief Header file
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#ifndef MAINWINDOW_H
#define MAINWINDOW_H
#include <QMainWindow>
#include <QFileInfo>
#include <QListWidgetItem>
#include <QSettings>
#include <QHelpEngine>
#include "imagelabel.h"
#include "timelabel.h"
#include "videotracker.h"
#include "anchoritem.h"
#include "trajectoryitem.h"
// SERIALIZATION WITH CEREAL
#include <cereal/types/string.hpp>
#include <cereal/types/map.hpp>
#include <cereal/types/utility.hpp> // for std::pair
enum class SelectionState : int {NO_SELECTION, NEW_OBJECT, CHANGE_POSITION,
SET_END_FRAME, NEW_SECTION};
namespace Ui {
class MainWindow;
}
class MainWindow : public QMainWindow
{
Q_OBJECT
public:
/**
* CEREAL serialization
*/
template<class Archive>
void serialize(Archive &archive)
{
archive(CEREAL_NVP(inputFileName), CEREAL_NVP(tracker), CEREAL_NVP(customColorsCount), cereal::make_nvp("colors", colorsMap));
}
/**
* Constructor
* @param parent Parent widget
*/
explicit MainWindow(QWidget *parent = 0);
/**
* Destructor
*/
~MainWindow();
private:
/**
* Asks for saving a project before closing the application.
* @param event Event
*/
void closeEvent(QCloseEvent *event);
/**
* Connects signals and slots.
*/
void connect_signals();
/**
* Sets the GUI to a state when the end of the video was reached.
*/
void end_of_video();
/**
* When a frame is showed, slider value and time are changed. Used only as an internal function,
* that is called by other functions (show_next_frame, show_previous_frame ...) that are reading
* correct frames.
*/
void show_frame();
/**
* Sets values of Appearance forms.
*/
void set_form_values();
/**
* This is used when appearance of the object has changed. The VideoTracker needs to be informed
* to register these changes.
*/
void change_appearance();
/**
* Fills the Trajectory tab with values.
* @param id Object ID
* @param clear If true, trajectory is cleared and filled again.
* @param showProgressBar If true, shows progress bar.
*/
void set_trajectory_tab(unsigned int id, bool clear=false, bool showProgressBar=true);
/**
* Fills the Key points tab with values.
* @param id Object ID
*/
void set_anchors_tab(unsigned int id);
/**
* Fills the Appearance tab with values.
* @param id Object ID
*/
void set_appearance_tab(unsigned int id);
/**
* Restores default values of the Appearance tab.
*/
void show_appearance_tab();
/**
* Shows a frame by a given timestamp.
* @param timestamp Timestamp
*/
void show_frame_by_timestamp(int64_t timestamp);
/**
* Confirms adding a new tracked object.
* @param selectedPosition Position of the new object
*/
void new_object_confirm(Selection const &selectedPosition);
/**
* Confirms position change.
* @param selectedPosition New position
* @return True if successful
*/
bool change_position_confirm(Selection const &selectedPosition);
/**
* Confirms the last frame selection of the object
* @return True if successful
*/
bool set_end_frame_confirm();
/**
* Confirms new trajectory change (correction).
* @return True if successful
*/
bool new_section_confirm(const Selection &selectedPosition);
/**
* Disables everything apart from the video player, shows selectionWidget.
* @param state Selected option
* @param enable_selecting If true, enables selecting.
*/
void set_selection(SelectionState state, bool enable_selecting=true);
/**
* Asks a user for an object name. The new object name is stored in newObjectName.
* @param change
* @return True if name entered correctly. False if user clicked Cancel.
*/
bool enter_object_name(bool change);
/**
* Sets ui->objectsBox to display tracked objects that exist in the tracker.
* @param noTabsUpdate If true, do not change tabs according current object's index.
* This is used when an object is selected right after calling this function
* and setting tabs in this function would be useless. This saves the time
* that would be used to setting the tabs (especially the Trajectory tab
* takes a long time to be set)
*/
void show_objects_box(bool noTabsUpdate=false);
/**
* Sets the main application menu.Needs to be called AFTER enabling/disabling widget (that affect
* buttons).* It can be called only once whe changing a group of buttons, but always AFTER all
* changes are made. Sets also Window's title.
*/
void set_application_menu();
/**
* When appearance changes were made, should they be confirmed or discarded?
* @return True if changes were confirmed, discarded or if no changes were made. False if user
* pressed "Cancel" button and nothing is done with appearance changes.
*/
bool ask_save_appearance_changes();
/**
* When project is being closed, should it be saved?
* @return True if project was saved or not. False if "Cancel" button was pressed.
*/
bool ask_save_project();
/**
* Restores the initial application settings.
*/
void initial_application_settings();
/**
* Pauses the video playback.
*/
void pause();
/**
* Shows the first frame of the video.
*/
void show_first_frame();
/**
* An internal function used when changing playback speed.
*/
void speed_general();
/**
* Shows a dilog for opening a video.
* Result is stored in inputFileName
* @return False if Cancel button pressed. True otherwise.
*/
bool open_video_dialog();
/**
* Sets the application to display successfully opened video
*/
void open_video_successful();
/**
* Lets user select object color by a color picker.
*/
void custom_color();
/**
* Lets user select border object color by a color picker.
*/
void custom_border_color();
private slots:
/**
* Context menu that is displayed when clicking on an item in the Trajectory tab
* @param item Clicked item
*/
void show_anchors_menu(QListWidgetItem *item);
/**
* When an object is selected, display its settings
*/
void set_object_settings();
/**
* Slider has been pressed.
*/
void slider_pressed();
/**
* Slider has been released.
*/
void slider_released();
/**
* Shows a frame by a given time position.
* @param position Time position
*/
void show_frame_by_time(int position);
/**
* Sets playback faster.
*/
void faster();
/**
* Sets playback slower.
*/
void slower();
/**
* Shows the following frame.
*/
void show_next_frame();
/**
* Shows the previous frame.
*/
void show_previous_frame();
/**
* Plays / pauses the video.
*/
void play();
/**
* Stops the video
*/
void stop();
/**
* Opens a new video
*/
void open_video();
/**
* Allows users to create a Custom color - select it by a color picker
* @param newColorID Used for returning the ID of the created color
* @param initialColor Initial color of the picker
* @return True if a color selected. False if Cancel button pressed.
*/
bool color_picker(unsigned int &newColorID, const ObjectColor &initialColor);
/**
* Creats an output video file with tracked objects.
*/
void create_output();
/**
* Adds a new object. After receiving an object name sets UI for selecting an area
* for the TrackingAlgorithm.
*/
void add_new_object();
/**
* Sets GUI to look like before adding new object.
*/
void selection_end();
/**
* Sets a new picture when the label ui->videoLabel is resized.
*/
void reload_video_label();
/**
* Changes a shape of the object.
*/
void change_shape();
/**
* Changes a border thickness of the object.
*/
void change_border_thickness();
/**
* Changes a border color of the object.
*/
void change_border_color();
/**
* Changes a color of the object.
*/
void change_color();
/**
* Changes the object to be filled with a color.
*/
void change_draw_inside();
/**
* Changes the object so that its border is drawn.
*/
void change_draw_border();
/**
* Changes defocusing size (square size) for the object.
*/
void change_defocus_size();
/**
* Changes the object to be defocused.
*/
void change_defocus();
/**
* Discards changes to the object appearance.
*/
void discard_appearance_changes();
/**
* Confirms changes to the object appearance.
*/
void confirm_appearance_changes();
/**
* Area / video position was selected and confirmed.
* Calls responsible function; either for New object, Position change, Setting
* object's end frame or New trajectory change
*/
void selection_confirmed();
/**
* Initiates a trajectory change (correction)
*/
void add_trajectory_change();
/**
* Changes name of the object.
*/
void change_name();
/**
* Removed the object.
*/
void delete_object();
/**
* Switches the appliction to Czech.
*/
void set_czech_language();
/**
* Switches the appliction to English.
*/
void set_english_language();
/**
* Sets playback speed to its original value.
*/
void original_speed();
/**
* Sets object tracking till the end of the video.
*/
void set_video_end();
/**
* Sets frame where tracking of the object shall end.
*/
void set_end_frame();
/**
* Changes beginning of the object.
*/
void set_change_beginning();
/**
* Shows the previous frame
*/
void step_back();
/**
* Shows the following frame
*/
void step_forward();
/**
* Save the project in XML or JSON.
* @return True if successful.
*/
bool save_project();
/**
* Loads a project in XML or JSON.
*/
void load_project();
/**
* Shows a frame by a given number (index)
* @param position Number (index)
*/
void show_frame_by_number(unsigned long position);
/**
* Displays time positions.
*/
void display_time();
/**
* Displays frame numbers.
*/
void display_frame_numbers();
/**
* Sets / unsets showing the original video beside the altered one.
*/
void show_original_video();
/**
* Computes all trajectory for the object
*/
void compute_trajectory();
/**
* Shows frame of the selected trajectory item.
* @param item Selected item
*/
void trajectory_show_frame(QListWidgetItem *item);
/**
* Shows application help.
*/
void show_help();
/**
* Shows information about the application.
*/
void show_about();
private:
const QString applicationName;
Ui::MainWindow *ui;
QHelpEngine *helpEngine;
QSettings *settings;
bool displayTime; //false ~ show frame numbers
bool showOriginalVideo;
QImage frame;
QImage originalFrame;
std::unique_ptr<VideoTracker> tracker;
QTimer * timer;
unsigned int timerInterval;
int timerSpeed;
SelectionState selectionState;
QString newObjectName;
AnchorItem *editingAnchorItem; // When show_anchors_menu() called, this variable is set to know what item is being edited
bool isPlaying;
bool endOfVideo;
bool appearanceChanged;
bool projectChanged; // to ask if user wants to save it
bool settingObjectSettings;
bool isEndTimestampSet; // used with AnchorItem - determining whether to show option "Track till the end of the video"
std::string inputFileName;
// Last loaded/saved project; FileDialog starts with at this path.
QString projectFileName;
// Currently opened project to be displayed in the window's title
QString projectName;
std::map<unsigned int, std::pair<std::string, ObjectColor>> colorsMap;
unsigned int customColorsCount;
std::map<unsigned int, QString> shapesMap;
Characteristics originalObjectAppearance;
Characteristics alteredObjectAppearance;
QString computingInfoText;
QString computingInfoTitle;
};
#endif // MAINWINDOW_H

1624
Sources/mainwindow.ui Normal file

File diff suppressed because it is too large Load Diff

30
Sources/objectshape.cpp Normal file
View File

@ -0,0 +1,30 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file objectshape.cpp
@brief Implementation of methods ...
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#include "objectshape.h"
const unsigned ObjectShape::RECTANGLE = 1;
const unsigned ObjectShape::ELLIPSE = 2;

36
Sources/objectshape.h Normal file
View File

@ -0,0 +1,36 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file objectshape.h
@brief Header file
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#ifndef OBJECTSHAPE_H
#define OBJECTSHAPE_H
struct ObjectShape
{
static const unsigned RECTANGLE;
static const unsigned ELLIPSE;
};
#endif // OBJECTSHAPE_H

128
Sources/playerslider.cpp Normal file
View File

@ -0,0 +1,128 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file playerslider.cpp
@brief Implementation of methods ...
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#include "playerslider.h"
#include <QDebug>
PlayerSlider::PlayerSlider(QWidget *parent) : QSlider(parent)
{
}
/* When mouse is pressed at a position different than the controller, signal with
* the position is emitted so that the video can jump to the frame.*/
/**
void PlayerSlider::mousePressEvent(QMouseEvent *event)
{
// QSlider mousePressEvent needs to be called so that signals
// sliderPressed with sliderMoved are emitted correctly.
// It also sets flag SliderDown when the "controller" is pressed
// and therefore no further action is needed since the pressed position
// is the current one.
QSlider::mousePressEvent(event);
if (isSliderDown())
return;
if (event->button() == Qt::LeftButton)
{
int position;
double value = 0; //0-1
if (orientation() == Qt::Horizontal)
value = event->x() / static_cast<double>(width()-1);
else // ==Qt::Vertical
value = event->y() / static_cast<double>(height()-1);
// Position is more accurate when floor() is used at the beginning
// and ceil() at the end.
if (value < 0.3)
position = floor(minimum() + (maximum()-minimum()) * value);
else if (value > 0.7)
position = ceil(minimum() + (maximum()-minimum()) * value);
else
position = round(minimum() + (maximum()-minimum()) * value);
emit sliderMoved(position);
event->accept();
}
}
*/
void PlayerSlider::mouseReleaseEvent(QMouseEvent *event)
{
qDebug() << "clicked";
// QSlider mousePressEvent needs to be called so that signals
// sliderPressed with sliderMoved are emitted correctly.
// It also sets flag SliderDown when the "controller" is pressed
// and therefore no further action is needed since the pressed position
// is the current one.
QSlider::mouseReleaseEvent(event);
if (isSliderDown())
return;
if (event->button() == Qt::LeftButton)
{
long position;
double value = 0; //0-1
if (orientation() == Qt::Horizontal)
value = event->x() / static_cast<double>(width()-1);
else // ==Qt::Vertical
value = event->y() / static_cast<double>(height()-1);
// Position is more accurate when floor() is used at the beginning
// and ceil() at the end.
if (value < 0.3)
position = floor(minimum() + (maximum()-minimum()) * value);
else if (value > 0.7)
position = ceil(minimum() + (maximum()-minimum()) * value);
else
position = round(minimum() + (maximum()-minimum()) * value);
//unsigned long maximum = this->maximum();
//unsigned long minimum = this->minimum();
if (position > maximum())
position = maximum();
else if (position < minimum())
position = minimum();
qDebug() << "position" << position;
unsigned long returnPosition = position;
emit clicked(returnPosition);
event->accept();
}
}
PlayerSlider::~PlayerSlider()
{
}

65
Sources/playerslider.h Normal file
View File

@ -0,0 +1,65 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file playerslider.h
@brief Header file
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#ifndef PLAYERSLIDER_H
#define PLAYERSLIDER_H
#include <QSlider>
#include <QMouseEvent>
class PlayerSlider : public QSlider
{
Q_OBJECT
public:
/**
* Constructor
* @param parent Parent widget
*/
explicit PlayerSlider(QWidget *parent = 0);
/**
* Destructor
*/
~PlayerSlider();
protected:
/**
* Captures mouse release events to detect a mouse click that is processed
* and clicked signal is emitted.
* @param event Mouse event
*/
void mouseReleaseEvent(QMouseEvent * event);
signals:
/**
* When a click is detected, this signal send a position of the click.
* @param Position of the slider where the click was made
*/
void clicked(unsigned long);
};
#endif // PLAYERSLIDER_H

114
Sources/selection.h Normal file
View File

@ -0,0 +1,114 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file selection.h
@brief Header file
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#include "objectshape.h"
#include "colors.h"
#include <cereal/types/utility.hpp>
#ifndef SELECTION
#define SELECTION
struct Selection
{
template<class Archive>
void serialize(Archive &archive)
{
archive(CEREAL_NVP(x), CEREAL_NVP(y), CEREAL_NVP(width), CEREAL_NVP(height), CEREAL_NVP(angle));
}
Selection() { }
Selection (int x, int y, int width, int height, int angle=0) :
x(x),
y(y),
width(width),
height(height),
angle(angle)
{ }
int x;
int y;
int width;
int height;
double angle; // todo: Should be double or int?
};
struct ObjectColor
{
// CEREAL serialization
template<class Archive>
void serialize(Archive &archive)
{
archive(cereal::make_nvp("red", r), cereal::make_nvp("green", g), cereal::make_nvp("blue", b));
}
ObjectColor(): r(0), g(0), b(0) { } // default constructor
ObjectColor(int r, int g, int b): r(r), g(g), b(b) { } // default constructor int r;
int r;
int g;
int b;
};
struct Characteristics
{ // Characteristics(): shape(ObjectShape::RECTANGLE), color(0,0,0), colorName("Black"), borderColor(0,0,0), borderColorName("Black"), borderThickness(3) { }
// CEREAL serialization
template<class Archive>
void serialize(Archive &archive)
{
archive(CEREAL_NVP(shape), CEREAL_NVP(defocus), CEREAL_NVP(defocusSize), CEREAL_NVP(drawInside), CEREAL_NVP(color), CEREAL_NVP(colorID), CEREAL_NVP(drawBorder), CEREAL_NVP(borderColor), CEREAL_NVP(borderColorID), CEREAL_NVP(borderThickness));
}
// Default constructor
Characteristics(): shape(ObjectShape::RECTANGLE), defocus(false), defocusSize(20),
drawInside(true), color(ObjectColor(0, 0, 0)), colorID(Colors::BLACK),
drawBorder(true), borderColor(ObjectColor(0,0,0)), borderColorID(Colors::BLACK),
borderThickness(3) { }
// Constructor with arguments
Characteristics(unsigned int shape, bool defocus, unsigned int defocusSize, bool drawInside,
ObjectColor color, unsigned int colorID, bool drawBorder, ObjectColor borderColor,
unsigned int borderColorID, int borderThickness) :
shape(shape), defocus(defocus), defocusSize(defocusSize), drawInside(drawInside), color(color),
colorID(colorID), drawBorder(drawBorder), borderColor(borderColor), borderColorID(borderColorID),
borderThickness(borderThickness) { }
//ObjectShape shape;
unsigned int shape;
bool defocus;
unsigned int defocusSize;
bool drawInside;
ObjectColor color;
unsigned int colorID;
bool drawBorder;
ObjectColor borderColor;
unsigned int borderColorID;
int borderThickness;
};
#endif // SELECTION

114
Sources/timelabel.cpp Normal file
View File

@ -0,0 +1,114 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file timelabel.cpp
@brief Implementation of methods ...
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#include "timelabel.h"
#include <QDebug>
void TimeConversion::convert_time(Time &converted)
{
converted.hr = converted.raw / ms2hr;
unsigned long rest = converted.raw % ms2hr;
converted.min = rest / ms2min;
rest %= ms2min;
//converted.sec = (rest % ms2min) / ms2sec;
converted.sec = rest / ms2sec;
converted.ms = rest % ms2sec;
}
void TimeConversion::Time2QString(Time const &time, QString &timeString, bool showHours)
{
timeString = time.hr ? "%1:%2:%3.%4" : "%1:%2.%3";
if (showHours)
timeString = timeString.arg(time.hr);
timeString = timeString.arg(time.min, (showHours ? 2 : 1), 10, QChar('0'));
timeString = timeString.arg(time.sec, 2, 10, QChar('0')); // Second argument ~ number of digits
timeString = timeString.arg(time.ms/100, 1, 10, QChar('0')); // Second argument ~ number of digits
}
TimeLabel::TimeLabel(QWidget *parent) : QLabel(parent)
{
showHours = true;
totalTime.raw = 0;
currentTime.raw = 0;
}
TimeLabel::~TimeLabel()
{
}
void TimeLabel::set_total_time(const unsigned long &time)
{
totalTime.raw = time;
convert_time(totalTime);
if (totalTime.hr)
showHours = true;
else
showHours = false;
Time2QString(totalTime, totalTimeString, showHours);
}
void TimeLabel::display_time(const unsigned long &time, bool includeTotal)
{
currentTime.raw = time;
convert_time(currentTime);
if (currentTime.raw > totalTime.raw && currentTime.sec > totalTime.sec)
{
qDebug() << "Error: Current time is bigger than total time";
currentTime = totalTime;
}
Time2QString(currentTime, currentTimeString, showHours);
// qDebug() << "Time current:" << currentTimeString;
// qDebug() << "Time total:" << totalTimeString;
if (includeTotal)
setText(QString("%1 / %2").arg(currentTimeString).arg(totalTimeString));
else
setText(currentTimeString);
}
void TimeLabel::set_frame_count(unsigned long frameCount)
{
totalFrameCount = frameCount;
}
void TimeLabel::display_frame_num(unsigned long frameNumber, bool includeTotal)
{
if (includeTotal)
setText(QString("%1 / %2").arg(QString::number(frameNumber)).arg(QString::number(totalFrameCount)));
else
setText(QString::number(frameNumber));
}

106
Sources/timelabel.h Normal file
View File

@ -0,0 +1,106 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file timelabel.h
@brief Header file
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#ifndef TIMELABEL_H
#define TIMELABEL_H
#include <QLabel>
namespace TimeConversion
{
struct Time
{
unsigned long raw;
unsigned long hr;
unsigned long min;
unsigned long sec;
unsigned long ms;
};
static const unsigned long ms2hr = 3600000; // 1000*60*60 ms
static const unsigned long ms2min = 60000; // 1000*60 ms
static const unsigned long ms2sec = 1000; // 1000 ms
void convert_time(Time &converted);
void Time2QString(Time const &time, QString &timeString, bool showHours);
}
using namespace TimeConversion;
class TimeLabel : public QLabel
{
Q_OBJECT
public:
/**
* Constructor
* @param parent Parent widget
*/
explicit TimeLabel(QWidget *parent = 0);
/**
* Destructor
*/
~TimeLabel();
/**
* Sets the total video time.
* @param time Time in milliseconds
*/
void set_total_time(const unsigned long &time);
/**
* Displays given time position with the total time.
* @param time Displayed time
* @param includeTotal If true, total length is also displayed.
*/
void display_time(const unsigned long &time, bool includeTotal=true);
/**
* Sets the total number of frames.
* @param frameCount Number of frames
*/
void set_frame_count(unsigned long frameCount);
/**
* Displays given frame number with the total number of frames.
* @param frameNumber Displayed frame number
* @param includeTotal If true, total number of frames is also displayed.
*/
void display_frame_num(unsigned long frameNumber, bool includeTotal=true);
private:
private:
Time totalTime;
Time currentTime;
QString totalTimeString;
QString currentTimeString;
bool showHours;
unsigned long totalFrameCount;
};
#endif // TIMELABEL_H

663
Sources/trackedobject.cpp Normal file
View File

@ -0,0 +1,663 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file trackedobject.cpp
@brief Implementation of methods ...
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#include "trackedobject.h"
#include <QDebug>
#include "objectshape.h"
// Only currentSection can have initialized trackingAlgorithm so as memory can be correctly freed
TrackedObject::TrackedObject()
{ // CEREAL uses this constructor
endTimestampSet = false;
currentSection = nullptr;
nextSection = false;
allProcessed = false;
qDebug() << "new trackedObject";
}
TrackedObject::TrackedObject(const Characteristics &appearance, std::string objectName, int64_t initialTimestamp,
Selection initialPosition, unsigned long initialTimePosition, unsigned long initialFrameNumber,
bool endTimestampSet, int64_t endTimestamp, unsigned long endTimePosition,
unsigned long endFrameNumber) :
name(objectName),
appearance(appearance),
initialTimestamp(initialTimestamp),
//initialPosition(initialPosition),
endTimestampSet(endTimestampSet),
endTimestamp(endTimestamp),
endTimePosition(endTimePosition),
endFrameNumber(endFrameNumber)
{
add_section(initialTimestamp, initialPosition, initialTimePosition, initialFrameNumber);
currentSection = nullptr;
nextSection = false;
allProcessed = false;
//lastProcessedTimestamp = -1;
//lastProcessedTimestamp = VIDEOTRACKING_NOTHING_PROCESSED;
}
TrackedObject::~TrackedObject()
{
for (auto &section: trajectorySections)
{
if (section.second.trackingAlgorithm)
{// if there is initialized trackingAlgorithm, free its memory
// this should occur only with currentSection
delete section.second.trackingAlgorithm;
section.second.trackingAlgorithm = nullptr;
}
}
/**
if (trackingAlgorithm)
{ // was it already initialized (by initialize_section)?
delete trackingAlgorithm;
trackingAlgorithm = nullptr;
}
*/
}
void TrackedObject::add_section(int64_t initialTimestamp, Selection const &initialPosition, unsigned long initialTimePosition, unsigned long initialFrameNumber)
{
TrajectorySection newSection(initialPosition, initialTimestamp, initialTimePosition, initialFrameNumber);
trajectorySections.insert({initialTimestamp, newSection});
}
// If oldTimeSet==true, oldTimestamp is valid and updating is done, otherwise adding is done
bool TrackedObject::set_trajectory_section(int64_t newTimestamp, Selection position, unsigned long timePosition, unsigned long frameNumber)
{
if (endTimestampSet && endTimestamp < newTimestamp)
return false; // Section cannot begin after the end of the tracking period
else if (trajectorySections.find(newTimestamp) != trajectorySections.end())
{ // Section with this timestamp already exists
// Change that section instead of creating a new one
qDebug() << "set_trajectory_section calls change_trajectory_section";
return change_trajectory_section(newTimestamp, newTimestamp, position, timePosition, frameNumber);
}
add_section(newTimestamp, position, timePosition, frameNumber);
allProcessed = false;
int64_t lastProcessedTimestamp;
bool lastProcessedTimestampSet = get_last_processed_timestamp(lastProcessedTimestamp);
if (newTimestamp < initialTimestamp)
{ // delete current and delete all trajectory => DONE
initialTimestamp = newTimestamp;
trajectory.clear();
}
else if ((newTimestamp < initialTimestamp) || // This section begins before the BEGINNING section. Therefore, set this section as beginning.
(lastProcessedTimestampSet && newTimestamp <= lastProcessedTimestamp))
{
if (currentSection)
{ // Unset it
if (currentSection->trackingAlgorithm)
{
delete currentSection->trackingAlgorithm;
currentSection->trackingAlgorithm = nullptr;
}
currentSection = nullptr;
nextSection = false;
}
if (newTimestamp < initialTimestamp)
{
initialTimestamp = newTimestamp;
trajectory.clear();
}
else // (lastProcessedTimestampSet && newTimestamp <= lastProcessedTimestamp)
{
// delete all >=newTimestamp
trajectory.erase(trajectory.find(newTimestamp), trajectory.end());
}
}
else
{ // newTimestamp > LASTPROCESSEDTIMESTAMP
// No need to unset currentSection or delete anything from trajectory.
// The currently processed frame is before this section.
// Only nextSectionTimestamp may need to be updated if the newly added section is the next one.
// The newly added is already in trajectorySections, so let it find the appropriate one itself
auto nextSectionIterator = trajectorySections.upper_bound(currentSection->initialTimestamp);
if (nextSectionIterator != trajectorySections.end())
{
nextSection = true;
nextSectionTimestamp = nextSectionIterator->first;
}
else
nextSection = false;
}
return true;
}
bool TrackedObject::change_trajectory_section(int64_t oldTimestamp, int64_t newTimestamp, Selection position, unsigned long timePosition, unsigned long frameNumber)
{
// The oldTimestamp needs to exist
assert(trajectorySections.find(oldTimestamp) != trajectorySections.end());
if (endTimestampSet && endTimestamp < newTimestamp)
return false; // Section cannot begin after the end of the tracking period
// currentSection will be unset, therefore trackingAlgorithm deletion is needed
if (currentSection && currentSection->trackingAlgorithm)
{ // if currentSection is set and has initialized trackingAlgorithm, delete the trackingAlgorithm
// as it is not needed anymore; if needed, it would get initialized again
trajectory.erase(trajectory.find(currentSection->initialTimestamp), trajectory.end()); // Clear all the trajectory from Current section initial timestamp till the end
delete currentSection->trackingAlgorithm;
currentSection->trackingAlgorithm = nullptr;
}
currentSection = nullptr; // let track_next() to find correct currentSection and nextSection
nextSection = false;
allProcessed = false;
auto oldSection = trajectorySections.find(oldTimestamp);
if (oldSection == trajectorySections.begin())
{ // The Beginning section is being changed
assert(initialTimestamp == oldTimestamp);
trajectory.clear(); // All trajectory will be counted from the beginning
// Delete all trajectory changes (sections) that occur before this section as this is the beginning.
// This situation happens when user moves the Beginning forward while some Section was
// defined on some position between the old Beginning and the new one.
trajectorySections.erase(trajectorySections.begin(), trajectorySections.find(newTimestamp));
initialTimestamp = newTimestamp;
}
else
{ // Trajectory change section is being change, not Beginning
trajectory.erase(trajectory.find(newTimestamp), trajectory.end()); // Clear all the trajectory from the timestamp at which the section begins till the end
if (newTimestamp < initialTimestamp) // This section begins before the BEGINNING section. Therefore, set this section as beginning.
initialTimestamp = newTimestamp;
if (oldTimestamp < newTimestamp)
{ // Section is updated to begin later
// V tomhle pripade je treba, aby bylo z trajectory odstraneno vsechno pocinaje
// zacatku PREDCHOZI sekce od oldTimestamp. To z toho duvodu, ze od oldTimestamp
// je treba prepocitat novou trajektorii. Vzhledem k tomu, ze zadna sekce na snimku
// oldTimestamp nezacina (ta soucasna se meni ne currentTimestamp), tak je treba pozici
// objektu vypocitat pomoci trackingAlgorithm predchozi sekce. Ten vsak musi byt
// inicializovan a proto musi probehnout od zacatku. Z toho duvodu se maze vse od
// zacatku teto predchozi sekce.
// If oldSection was trajectorySections.begin(), decreasing the iterator would cause
// undefined behaviour. However, this branche secures that this cannot happen.
trajectory.erase(trajectory.find((--oldSection)->first), trajectory.end());
}
}
trajectorySections.erase(oldTimestamp);
if (trajectorySections.find(newTimestamp) != trajectorySections.end())
{ // Section with this timestamp already exists
// Call change_trajectory_section to replace the old section by this new one
qDebug() << "change_trajectory_section(): Section with given timestamp exists. Thus, the old one was deleted.";
return change_trajectory_section(newTimestamp, newTimestamp, position, timePosition, frameNumber);
}
add_section(newTimestamp, position, timePosition, frameNumber);
return true;
}
/* If false return: Beginning section cannot be deleted */
bool TrackedObject::delete_trajectory_section(int64_t timestamp)
{
if (timestamp == initialTimestamp) // Beginning section cannot be deleted
{
qDebug() << "Beginning section cannot be deleted";
return false;
}
if (currentSection && (currentSection->initialTimestamp >= timestamp))
{ // This trajectorySection will be erased below(==timestamp) or
// takes place after the section being deleted(>timestamp), therefore needs to be unset
if (currentSection->trackingAlgorithm)
{
delete currentSection->trackingAlgorithm;
currentSection->trackingAlgorithm = nullptr;
}
currentSection = nullptr;
nextSection = false;
}
else if (nextSection && nextSectionTimestamp == timestamp)
{ // The deleted section should have been the next section.
// Set the following section or false if no later section exists.
auto nextSectionIterator = trajectorySections.upper_bound(timestamp);
if (nextSectionIterator != trajectorySections.end())
{
nextSection = true;
nextSectionTimestamp = nextSectionIterator->first;
}
else
nextSection = false;
}
allProcessed = false;
// previousSection always exists as Beginning section cannot be deleted.
auto previousSection = --(trajectorySections.find(timestamp));
trajectorySections.erase(timestamp);
// Remove all entries that exist for frames at timestamps higher than the beginning
// of the PREVIOUS section. It is needed so the trackingAlgorithm from the previous section
// goes from its beginning to have the correct data. Thereby, the deleted trajectory will be recounted.
trajectory.erase(trajectory.find(previousSection->first), trajectory.end());
return true;
}
/* If set==false, the end frame will be unset and the object will be tracked till the end of the video
* newTimestamp and timePosition are valid only if set==true
* Returns false when given timestamp is lower than the timestamp of Beginning
*/
bool TrackedObject::change_end_frame(bool set, int64_t timestamp, unsigned long timePosition, unsigned long frameNumber)
{
if (!set)
{ // Tracking till the end of the video
if (allProcessed)
{ // All frames were processed, but now the length is extended.
// Delete trajectory from the last section's initial timestamp till the end so the
// tracking algorithm has the right values.
trajectory.erase(trajectory.find(trajectorySections.rbegin()->first), trajectory.end());
}
endTimestampSet = false;
allProcessed = false;
}
else
{ // End timestamp is set
if (timestamp < initialTimestamp)// given timestamp is lower than the timestamp of Beginning -> Not valid
return false;
if (allProcessed && timestamp > endTimestamp)
{ // All frames were processed, but now the length is extended.
// Delete trajectory from the last section's initial timestamp till the end so the
// tracking algorithm has the right values.
trajectory.erase(trajectory.find(trajectorySections.rbegin()->first), trajectory.end());
}
allProcessed = false;
endTimestampSet = true;
endTimestamp = timestamp;
endTimePosition = timePosition;
endFrameNumber = frameNumber;
// Remove all entries that exist for frames at timestamps higher than the end timestamp
trajectory.erase(trajectory.upper_bound(timestamp), trajectory.end());
if (currentSection && (currentSection->initialTimestamp > timestamp) && currentSection->trackingAlgorithm)
{ // This trajectorySection will be erased below, therefore needs to be unset
delete currentSection->trackingAlgorithm;
currentSection->trackingAlgorithm = nullptr;
currentSection = nullptr;
nextSection = false;
}
if (nextSection && (nextSectionTimestamp > endTimestamp)) // If there is a next section that is later than the end timestamp
nextSection = false;
// Remove all sections (trajectory changes) that have timestamp higher than the end timestamp
trajectorySections.erase(trajectorySections.upper_bound(timestamp), trajectorySections.end());
if (trajectory.find(timestamp) != trajectory.end())
{ // "trajectory" contains positions throughout all the object live; thus, everything is processed
set_all_processed(true);
}
}
return true;
}
void TrackedObject::change_appearance(Characteristics const &newAppearance)
{
appearance = newAppearance;
}
Characteristics TrackedObject::get_appearance() const
{
return appearance;
}
//Selection TrackedObject::track_next(cv::Mat const &frame, int64_t timestamp)
// All changes to trajectorySections make currentSection==nullptr and nextSection=false, so
// track_next() needs to find appropriate values
Selection TrackedObject::track_next(VideoFrame const *frame)
{
if (!currentSection || (nextSection && frame->get_timestamp() >= nextSectionTimestamp))
{ // Enters new section
// Close old section and initialize a new one.
int64_t currentSectionTimestamp;
if (!currentSection)
{ // First section or change to trajectorySections was made
if (trajectory.begin() == trajectory.end())
{ // Nothing is processed, set Beginning section
auto trajectoryIterator = trajectorySections.begin();
currentSection = &(trajectoryIterator->second);
currentSectionTimestamp = trajectoryIterator->first; // Is later used to count the nextSectionTimestamp
}
else
{
int64_t lastProcessed = trajectory.rbegin()->first;
// Find section that begins after the last processed frame.
// That should be the next frame. If not - entries would be missing
// and that would cause an error.
auto trajectoryIterator = trajectorySections.upper_bound(lastProcessed);
currentSection = &(trajectoryIterator->second);
currentSectionTimestamp = trajectoryIterator->first;
}
}
else
{ // currentSection was set
// Entering next section
if (currentSection->trackingAlgorithm)
{ // delete the trackingAlgorithm as it is not needed anymore;
// if needed, it would get initialized again
delete currentSection->trackingAlgorithm;
currentSection->trackingAlgorithm = nullptr;
}
currentSection = &(trajectorySections[nextSectionTimestamp]);
currentSectionTimestamp = nextSectionTimestamp;
}
auto nextSectionIterator = trajectorySections.upper_bound(currentSectionTimestamp);
if (nextSectionIterator != trajectorySections.end())
{
nextSection = true;
nextSectionTimestamp = nextSectionIterator->first;
}
else
nextSection = false;
// Now initialize the new section's tracking algorithm
Selection centerizedPosition;
currentSection->trackingAlgorithm = new TrackingAlgorithm(*(frame->get_mat_frame()),
currentSection->initialPosition, centerizedPosition);
trajectory[frame->get_timestamp()] = TrajectoryEntry(centerizedPosition, frame->get_time_position(), frame->get_frame_number());
return centerizedPosition;
}
Selection result = currentSection->trackingAlgorithm->track_next_frame(*(frame->get_mat_frame()));
trajectory[frame->get_timestamp()] = TrajectoryEntry(result, frame->get_time_position(), frame->get_frame_number());
int64_t lastProcessedTimestamp;
if (endTimestampSet && get_last_processed_timestamp(lastProcessedTimestamp) && lastProcessedTimestamp == endTimestamp)
{
set_all_processed(true);
}
return result;
}
bool TrackedObject::get_position(int64_t timestamp, Selection &trackedPosition) const
{
try
{
trackedPosition = trajectory.at(timestamp).position; // Throws an exception, when trajectory with given timestamp does not exist
} catch (std::out_of_range) {
return false;
}
return true;
}
bool TrackedObject::draw_mark(cv::Mat &frame, cv::Mat const &originalFrame, int64_t timestamp) const
{
//qDebug()<< "Draw mark";
Selection position;
try
{
position = trajectory.at(timestamp).position; // Throws an exception, when trajectory with given timestamp does not exist
} catch (std::out_of_range) {
return false;
}
if (appearance.defocus)
{
assert(appearance.defocusSize > 0);
//cv::Mat3b roiMat = imgMat(cv::Rect(hSt,vSt,hEn,vEn));
unsigned long x = position.x - position.width/2;
unsigned long y = position.y - position.height/2;
unsigned long xMax = position.width + x;
unsigned long yMax = position.height + y;
// This secures valid values
unsigned long cols = frame.cols;
unsigned long rows = frame.rows;
if (xMax > cols)
xMax = cols;
if (yMax > rows)
yMax = rows;
unsigned long i, j;
unsigned long currentSquareWidth, currentSquareHeight;
cv::Rect roiRect;
cv::Mat roi;
cv::Scalar mean;
for (i=x; i < xMax; i+=appearance.defocusSize)
{
for (j=y; j < yMax; j+=appearance.defocusSize)
{
currentSquareWidth = appearance.defocusSize;
currentSquareHeight = appearance.defocusSize;
if (i+currentSquareWidth > xMax) // Do not cross the object's border
currentSquareWidth = xMax - i;
if (j+currentSquareHeight > yMax) // Do not cross the object's border
currentSquareHeight = yMax - j;
// Width and height must not be 0 for roi to be created
if (currentSquareWidth > 0 && currentSquareHeight > 0)
{
roiRect = cv::Rect(i, j, currentSquareWidth, currentSquareHeight);
roi = originalFrame(roiRect); // originalFrame is used so as it does not involve other tracked objects
mean = cv::mean(roi);
cv::rectangle(frame, roiRect, mean, CV_FILLED);
}
}
}
}
else // Fill and/or border; not defocus
{
cv::RotatedRect rectangle = cv::RotatedRect(cv::Point2f(position.x, position.y), cv::Size2f(position.width, position.height), position.angle);
// Important note: OpenCV uses BGR, not RGB
cv::Scalar borderColor(appearance.borderColor.b, appearance.borderColor.g, appearance.borderColor.r);
cv::Scalar color(appearance.color.b, appearance.color.g, appearance.color.r);
if (appearance.shape == ObjectShape::RECTANGLE)
{
cv::Point2f vertices2f[4];
cv::Point vertices[4];
rectangle.points(vertices2f);
for (int i = 0; i < 4; i++)
vertices[i] = vertices2f[i];
if (appearance.drawInside)
cv::fillConvexPoly(frame, vertices, 4, color);
if (appearance.drawBorder && appearance.borderThickness > 0)
{
for (int i = 0; i < 4; i++)
cv::line(frame, vertices2f[i], vertices2f[(i+1)%4], borderColor, appearance.borderThickness, CV_AA);
}
}
else if (appearance.shape == ObjectShape::ELLIPSE)
{
if (appearance.drawInside)
cv::ellipse(frame,rectangle, color, -1);
if (appearance.drawBorder && appearance.borderThickness > 0)
cv::ellipse(frame,rectangle, borderColor, appearance.borderThickness);
}
}
return true;
}
std::map<int64_t, TrajectorySection> const &TrackedObject::get_trajectory_sections() const
{
return trajectorySections;
}
std::map<int64_t, TrajectoryEntry> const &TrackedObject::get_trajectory() const
{
return trajectory;
}
int64_t TrackedObject::get_initial_timestamp() const
{
return initialTimestamp;
}
int64_t TrackedObject::get_end_timestamp() const
{
return endTimestamp;
}
unsigned long TrackedObject::get_end_time_position() const
{
return endTimePosition;
}
unsigned long TrackedObject::get_end_frame_number() const
{
return endFrameNumber;
}
//int64_t TrackedObject::get_last_processed_timestamp() const
/* If false is returned, timestamp is not valid */
bool TrackedObject::get_last_processed_timestamp(int64_t &timestamp) const
{
if (trajectory.begin() == trajectory.end())
{
qDebug() << "Trajectory is empty";
return false;
}
timestamp = trajectory.rbegin()->first;
return true;
//return lastProcessedTimestamp;
}
void TrackedObject::set_all_processed(bool processed)
{
allProcessed = processed;
//qDebug() << "all processed set";
if (processed == true)
{
if (currentSection && currentSection->trackingAlgorithm)
{ // if currentSection is set and has initialized trackingAlgorithm, delete the trackingAlgorithm
// as it is not needed anymore; if needed, it would get initialized again
delete currentSection->trackingAlgorithm;
currentSection->trackingAlgorithm = nullptr;
}
currentSection = nullptr;
nextSection = false;
}
}
bool TrackedObject::is_all_processed() const
{
return allProcessed;
}
bool TrackedObject::is_end_timestamp_set() const
{
return endTimestampSet;
}
std::string TrackedObject::get_name() const
{
return name;
}
bool TrackedObject::set_name(std::string newName)
{
name = newName;
return true;
}
void TrackedObject::erase_trajectory_to_comply()
{
if (allProcessed) // Everything is already processed, sections are not needed anymore
return;
int64_t lastProcessedTimestamp;
if (!get_last_processed_timestamp(lastProcessedTimestamp))
return; // Nothing processed -> nothing to be erased
auto section = --(trajectorySections.upper_bound(lastProcessedTimestamp));
trajectory.erase(trajectory.find(section->second.initialTimestamp), trajectory.end()); // Clear all the trajectory from Current section initial timestamp till the end
}

361
Sources/trackedobject.h Normal file
View File

@ -0,0 +1,361 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file trackedobject.h
@brief Header file
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#ifndef TRACKEDOBJECT_H
#define TRACKEDOBJECT_H
#include "trackingalgorithm.h"
#include "videoframe.h"
#include "selection.h"
#include <QDebug>
#include <cereal/types/string.hpp>
#include <cereal/types/map.hpp>
#include <cereal/types/tuple.hpp>
#include <opencv/cv.h>
#include <opencv/cvaux.h>
#include <opencv/cxcore.h>
#include <opencv/highgui.h>
#include <opencv2/imgproc.hpp>
/**
#define VIDEOTRACKING_END_OF_VIDEO -1
#define VIDEOTRACKING_ALL_PROCESSED -1
#define VIDEOTRACKING_NOTHING_PROCESSED -2
#define VIDEOTRACKING_NO_PREVIOUS_TIMESTAMP -3 // In case other function than VideoTracker::get_next_frame() is used
*/
struct TrajectorySection
{
/**
* CEREAL serialization
*/
template<class Archive>
void serialize(Archive &archive)
{
archive(CEREAL_NVP(initialPosition), CEREAL_NVP(initialTimestamp),
CEREAL_NVP(initialTimePosition), CEREAL_NVP(initialFrameNumber));
}
/**
* Constructor
*/
TrajectorySection()
{
trackingAlgorithm = nullptr;
}
/**
* Constructor
* @param initialPosition Initial object position
* @param initialTimestamp
* @param initialTimePosition
* @param initialFrameNumber
*/
TrajectorySection(Selection initialPosition, int64_t initialTimestamp,
unsigned long initialTimePosition, unsigned long initialFrameNumber) :
initialPosition(initialPosition),
initialTimestamp(initialTimestamp),
initialTimePosition(initialTimePosition),
initialFrameNumber(initialFrameNumber)
{
trackingAlgorithm = nullptr;
}
Selection initialPosition;
int64_t initialTimestamp;
unsigned long initialTimePosition;
unsigned long initialFrameNumber;
TrackingAlgorithm *trackingAlgorithm;
// bool algorithmInitialized;
};
struct TrajectoryEntry
{
/**
* CEREAL serialization
*/
template<class Archive>
void serialize(Archive &archive)
{
archive(CEREAL_NVP(position), CEREAL_NVP(timePosition), CEREAL_NVP(frameNumber));
}
/**
* Constructor
*/
TrajectoryEntry() { }
/**
* Constructor
* @param position Object position
* @param timePosition
* @param frameNumber
*/
TrajectoryEntry(Selection position, unsigned long timePosition, unsigned long frameNumber) :
position(position),
timePosition(timePosition),
frameNumber(frameNumber)
{ }
Selection position;
unsigned long timePosition;
unsigned long frameNumber;
};
class TrackedObject
{
public:
/**
* CEREAL serialization
*/
template<class Archive>
void serialize(Archive &archive)
{
archive(CEREAL_NVP(name), CEREAL_NVP(appearance), CEREAL_NVP(initialTimestamp),
CEREAL_NVP(endTimestampSet), CEREAL_NVP(endTimestamp), CEREAL_NVP(endTimePosition),
CEREAL_NVP(endFrameNumber), CEREAL_NVP(trajectorySections), CEREAL_NVP(allProcessed),
CEREAL_NVP(trajectory));
}
/**
* Constructor
*/
TrackedObject();
/**
* Constructor
* @param appearance Object appearance
* @param objectName Object name
* @param initialTimestamp Initial timestamp
* @param initialPosition Initial object position
* @param initialTimePosition Initial time position
* @param initialFrameNumber Initial frame number
* @param endTimestampSet Is end timestamp set?
* @param endTimestamp End timestamp
* @param endTimePosition End time position
* @param endFrameNumber End frame number
*/
TrackedObject(Characteristics const &appearance, std::string objectName, int64_t initialTimestamp,
Selection initialPosition, unsigned long initialTimePosition, unsigned long initialFrameNumber,
bool endTimestampSet, int64_t endTimestamp, unsigned long endTimePosition,
unsigned long endFrameNumber);
/**
* Destructor
*/
~TrackedObject();
/**
* Adds a trajectory section of the object
* @param initialTimestamp Initial timestamp of the section
* @param initialPosition Object position at the initial timestamp
* @param initialTimePosition Initial time position of the section
* @param initialFrameNumber Initial frame number of the section
*/
void add_section(int64_t initialTimestamp, Selection const &initialPosition, unsigned long initialTimePosition, unsigned long initialFrameNumber);
/**
* Sets a trajectory section of the object.
* @param newTimestamp New timestamp of the section
* @param position Object position at the first frame of the section
* @param timePosition Time position of the section
* @param frameNumber Frame number of the section
* @return True if successful
*/
bool set_trajectory_section(int64_t newTimestamp, Selection position, unsigned long timePosition, unsigned long frameNumber);
/**
* Changes a trajectory section of the object.
* @param oldTimestamp Old timestamp of the section
* @param newTimestamp New timestamp of the section
* @param position Object position at the first frame of the section
* @param timePosition Time position of the section
* @param frameNumber Frame number of the section
* @return True if successful
*/
bool change_trajectory_section(int64_t oldTimestamp, int64_t newTimestamp, Selection position, unsigned long timePosition, unsigned long frameNumber);
/**
* Changes the end frame of the object.
* @param set Set / unset last frame
* @param timestamp Timestamp of the last frame
* @param timePosition Time position of the last frame
* @param frameNumber Frame number of the last frame
* @return True if successful
*/
bool change_end_frame(bool set, int64_t timestamp=0, unsigned long timePosition=0, unsigned long frameNumber=0);
/**
* Changes appearance of the object.
* @param newAppearance New appearance of the object
*/
void change_appearance(Characteristics const &newAppearance);
/**
* Returns appearance of the object.
* @return Object appearance
*/
Characteristics get_appearance() const;
/**
* Returns position of the object at frame with given timestamp.
* @param timestamp Frame timestamp
* @param trackedPosition Returned object position
* @return False if trajectory with given timestamp does not exist, otherwise true.
*/
bool get_position(int64_t timestamp, Selection &trackedPosition) const;
/**
* Computes position of the object in the next frame.
* @param frame Next frame
* @return Tracked object position
*/
Selection track_next(VideoFrame const *frame);
/**
* Draws mark of the object in a frame.
* @param frame Frame for drawing
* @param originalFrame Original frame
* @param timestamp Timestamp of the frame
* @return True if successful
*/
bool draw_mark(cv::Mat &frame, cv::Mat const &originalFrame, int64_t timestamp) const;
/**
* Returns trajectory sections of the object.
* @return Trajectory section of the object
*/
std::map<int64_t, TrajectorySection> const &get_trajectory_sections() const;
/**
* Returns computed trajectory of the object.
* @return Trajectory of the object
*/
std::map<int64_t, TrajectoryEntry> const &get_trajectory() const;
/**
* Returns initial timestamp.
* @return initial timestamp
*/
int64_t get_initial_timestamp() const;
/**
* Returns end timestamp.
* @return End timestamp
*/
int64_t get_end_timestamp() const;
/**
* Returns end time position.
* @return End time position
*/
unsigned long get_end_time_position() const;
/**
* Returns end frame number.
* @return End frame number
*/
unsigned long get_end_frame_number() const;
/**
* Returns last processed timestamp.
* @param timestamp Returned last processed timestamp
* @return Is returned timestamp valid?
*/
bool get_last_processed_timestamp(int64_t &timestamp) const;
/**
* Sets value of the flag saying whether all trajectory is processed.
* @param processed Set / unset
*/
void set_all_processed(bool processed);
/**
* Returns whether all trajectory is processed.
* @return Is all trajectory processed?
*/
bool is_all_processed() const;
/**
* Returns whether the end timestamp is set.
* @return Is end timestamp set?
*/
bool is_end_timestamp_set() const;
/**
* Deletes a trajectory section.
* @param timestamp Trajectory section to be deleted begins at this timestamp
* @return True if successful
*/
bool delete_trajectory_section(int64_t timestamp);
/**
* Returns name of the object.
* @return Object name
*/
std::string get_name() const;
/**
* Sets name of the object.
* @param newName New object name
* @return True if successful
*/
bool set_name(std::string newName);
/**
* Erases a part of the computed trajectory. This is necessary after deserialization (with CEREAL)
* as track_next() initializes correct sections only when the trajectory's last frame is the last
* frame of the previous section.
*/
void erase_trajectory_to_comply();
private:
std::string name;
Characteristics appearance;
int64_t initialTimestamp;
bool endTimestampSet; // if FALSE -> track till the end of the video
int64_t endTimestamp;
unsigned long endTimePosition;
unsigned long endFrameNumber;
std::map<int64_t, TrajectorySection> trajectorySections;
std::map<int64_t, TrajectoryEntry> trajectory;
TrajectorySection *currentSection;
bool nextSection;
int64_t nextSectionTimestamp;
bool allProcessed;
};
#endif // TRACKEDOBJECT_H

View File

@ -0,0 +1,128 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file trackingalgorithm.cpp
@brief Implementation of methods ...
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#include "trackingalgorithm.h"
#include "opencvx/cvxmat.h"
#include "opencvx/cvxrectangle.h"
#include "opencvx/cvrect32f.h"
#include "opencvx/cvcropimageroi.h"
#include "opencvx/cvdrawrectangle.h"
#include "opencvx/cvparticle.h"
#include "tracking/algorithm1/observetemplate.h"
#include "tracking/algorithm1/state.h"
#include <iostream>
#define DEFAULT 0
#define DRAW_PARTICLES_REALLOC 512
TrackingAlgorithm::TrackingAlgorithm(cv::Mat const &initialFrame, Selection const &initialPosition, Selection &centerizedPosition)
{
IplImage *frame = new IplImage(initialFrame);
resize = cvSize(24, 24);// size
pDyn = 100; // dynamic numer of particles
int p = 300; // number of particles
int sx = 5; // x
int sy = 5; // y
int sw = 0; // width
int sh = 0; // height
int sr = 0; // rotation
CvPoint *particleCenter = NULL;
if((particleCenter = (CvPoint*)(malloc(DRAW_PARTICLES_REALLOC * sizeof(CvPoint)))) == NULL)
exit(1);
CvRect region;
region.x = initialPosition.x;
region.y = initialPosition.y;
region.height = initialPosition.height;
region.width = initialPosition.width;
bool logprob = true;
particle = cvCreateParticle( num_states, p, logprob );
CvParticleState std = cvParticleState (
sx,
sy,
sw,
sh,
sr
);
cvParticleStateConfig( static_cast<CvParticle *>(particle), cvGetSize(frame), std );
CvParticleState s;
CvParticle *init_particle;
init_particle = cvCreateParticle( num_states, 1 );
CvRect32f region32f = cvRect32fFromRect( region );
CvBox32f box = cvBox32fFromRect32f( region32f ); // center box
s = cvParticleState( box.cx, box.cy, box.width, box.height, 0.0 );
cvParticleStateSet( init_particle, 0, s );
cvParticleInit( static_cast<CvParticle *>(particle), init_particle );
cvReleaseParticle( &init_particle );
// Resize reference image
reference = cvCreateImage( resize, frame->depth, frame->nChannels );
IplImage* tmp = cvCreateImage( cvSize(region.width,region.height), frame->depth, frame->nChannels );
cvCropImageROI(frame, tmp, region32f );
cvResize( tmp, reference );
cvReleaseImage( &tmp );
centerizedPosition.width = box.width;
centerizedPosition.height = box.height;
centerizedPosition.x = box.cx;
centerizedPosition.y = box.cy;
centerizedPosition.angle = initialPosition.angle;
}
TrackingAlgorithm::~TrackingAlgorithm()
{
}
Selection TrackingAlgorithm::track_next_frame(cv::Mat const &nextImage)
{
IplImage *frame = new IplImage(nextImage);
cvParticleTransition( static_cast<CvParticle *>(particle) );
particleEvalDefault( static_cast<CvParticle *>(particle), frame, reference, resize, pDyn);
int maxp_id = cvParticleGetMax( static_cast<CvParticle *>(particle) );
CvParticleState maxs = cvParticleStateGet( static_cast<CvParticle *>(particle), maxp_id );
Selection objectPosition(maxs.x, maxs.y, maxs.width, maxs.height, maxs.angle);
cvParticleNormalize( static_cast<CvParticle *>(particle));
cvParticleResample( static_cast<CvParticle *>(particle) );
return objectPosition;
}

View File

@ -0,0 +1,68 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file trackingalgorithm.h
@brief Header file
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#ifndef TRACKINGALGORITHM_H
#define TRACKINGALGORITHM_H
#include "selection.h"
#include <opencv/cv.h>
#include <opencv/cvaux.h>
#include <opencv/cxcore.h>
#include <opencv/highgui.h>
class TrackingAlgorithm
{
public:
/**
* Constructor
* @param initialFrame Data of the first frame
* @param initialPosition Position of the object in the first frame
* @param centerizedPosition Returned position of the object
*/
TrackingAlgorithm(cv::Mat const &initialFrame, Selection const &initialPosition, Selection &centerizedPosition);
/**
* Destructor
*/
~TrackingAlgorithm();
/**
* Tracks the next provided frame.
* @param nextImage The next image for tracking
* @return Position of the object
*/
Selection track_next_frame(cv::Mat const &nextImage);
private:
void *particle;
IplImage* reference;
CvSize resize;
int pDyn;
};
#endif // TRACKINGALGORITHM_H

130
Sources/trajectoryitem.cpp Normal file
View File

@ -0,0 +1,130 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file trajectoryitem.cpp
@brief Implementation of methods ...
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#include "trajectoryitem.h"
#include <QDebug>
#include <QMouseEvent>
TrajectoryItem::TrajectoryItem(int64_t timestamp, unsigned long timePosition, unsigned long totalTime,
unsigned long frameNumber, unsigned long frameCount, Selection position,
QWidget *parent, bool displayTime) :
QWidget(parent),
timestamp(timestamp),
timePosition(timePosition),
frameNumber(frameNumber)
{
QWidget *title = new QWidget(this);
title->setContentsMargins(0,3,0,0);
type = new QLabel(title);
type->setStyleSheet("QLabel { color: rgb(144, 144, 144)}");
QHBoxLayout *hLayout = new QHBoxLayout(title);
hLayout->setMargin(0);
hLayout->setSpacing(0);
timeLabel = new TimeLabel(title);
timeLabel->set_total_time(totalTime);
timeLabel->set_frame_count(frameCount);
timeLabel->setSizePolicy(QSizePolicy::Expanding, QSizePolicy::Preferred);
if (displayTime)
{
timeLabel->display_time(timePosition, false);
type->setText(tr("Time position: "));
}
else
{
timeLabel->display_frame_num(frameNumber, false);
type->setText(tr("Frame number: "));
}
hLayout->addWidget(type);
hLayout->addWidget(timeLabel);
title->setLayout(hLayout);
QString valuesString = "<FONT COLOR='#909090'>x:</FONT>";
valuesString.append(QString::number(position.x));
valuesString.append("<FONT COLOR='#909090'> y:</FONT>");
valuesString.append(QString::number(position.y));
valuesString.append("<FONT COLOR='#909090'> "+ tr("w") + ":</FONT>");
valuesString.append(QString::number(position.width));
valuesString.append("<FONT COLOR='#909090'> " + tr("h") + ":</FONT>");
valuesString.append(QString::number(position.height));
QLabel *values = new QLabel(valuesString, this);
values->setContentsMargins(0,0,0,5);
vLayout = new QVBoxLayout(this);
vLayout->setMargin(0);
vLayout->setSpacing(0);
vLayout->addWidget(title);
vLayout->addWidget(values);
setLayout(vLayout);
installEventFilter(this);
}
bool TrajectoryItem::eventFilter(QObject *object, QEvent *event)
{
if (isEnabled() && object == this)
{
if (event->type() == QEvent::Enter)
{
setStyleSheet(backgroundColorActive);
}
else if(event->type() == QEvent::Leave)
{
setStyleSheet(backgroundColorInactive);
}
return false; // propagates the event even when processed
}
return false; // propagates the event further since was not processed
}
TrajectoryItem::~TrajectoryItem()
{
// All objects get deleted automatically as they are set to be children
}
int64_t TrajectoryItem::get_timestamp() const
{
return timestamp;
}
void TrajectoryItem::display_time()
{
timeLabel->display_time(timePosition, false);
type->setText(tr("Time position: "));
}
void TrajectoryItem::display_frame_num()
{
timeLabel->display_frame_num(frameNumber, false);
type->setText(tr("Frame number: "));
}

99
Sources/trajectoryitem.h Normal file
View File

@ -0,0 +1,99 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file trajectoryitem.h
@brief Header file
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#ifndef TRAJECTORYITEM_H
#define TRAJECTORYITEM_H
#include <QWidget>
#include <QLabel>
#include <QHBoxLayout>
#include "timelabel.h"
#include "selection.h"
#include <stdint.h> // int64_t; needed for MSVC compiler
class TrajectoryItem : public QWidget
{
Q_OBJECT
public:
/**
* Constructor
* @param timestamp Timestamp
* @param timePosition Time position
* @param totalTime Total video time
* @param frameNumber Fram number
* @param frameCount Number of frames in the video
* @param parent Parent widget
* @param displayTime True - display time. False - display frame number
*/
TrajectoryItem(int64_t timestamp, unsigned long timePosition, unsigned long totalTime, unsigned long frameNumber, unsigned long frameCount,
Selection position, QWidget *parent, bool displayTime=false);
/**
* Destructor
*/
~TrajectoryItem();
/**
* Returns timestamp.
* @return timestamp
*/
int64_t get_timestamp() const;
/**
* Displays values as time positions.
*/
void display_time();
/**
* Displays values as frame numbers.
*/
void display_frame_num();
protected:
/**
* Filters Enter and Leave events for item highlighting when hovering
* @param object Object of the event
* @param event Event
* @return False - propagate event further. True - do not propagate.
*/
bool eventFilter(QObject *object, QEvent *event);
private:
TimeLabel *timeLabel;
QLabel *type;
int64_t timestamp;
unsigned long timePosition;
unsigned long frameNumber;
bool isSet;
QVBoxLayout *vLayout;
const QString backgroundColorActive = "background-color: rgb(115, 171, 230);";
const QString backgroundColorInactive = "background-color: rgb(255, 255, 255);";
};
#endif // TRAJECTORYITEM_H

266
Sources/videoframe.cpp Normal file
View File

@ -0,0 +1,266 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file videoframe.cpp
@brief Implementation of methods ...
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#include "videoframe.h"
#include <QDebug>
VideoFrame::VideoFrame():
scalingMethod(VIDEOTRACKING_DEFAULT_SCALING_METHOD)
{
matFrame = nullptr;
avFrame = nullptr;
width = 0;
height = 0;
}
VideoFrame::VideoFrame(unsigned int width, unsigned int height):
scalingMethod(VIDEOTRACKING_DEFAULT_SCALING_METHOD),
width(width),
height(height)
{
matFrame = nullptr;
avFrame = nullptr;
}
//VideoFrame::VideoFrame(cv::Mat const &newFrame, int64_t timestamp, unsigned long timePosition):
// Does not copy avFrame because it is not needed and would make it slower
VideoFrame::VideoFrame(VideoFrame const &obj):
scalingMethod(VIDEOTRACKING_DEFAULT_SCALING_METHOD)
{
avFrame = nullptr;
if (obj.matFrame)
matFrame = new cv::Mat(*(obj.matFrame));
else
matFrame = nullptr;
height = obj.height;
width = obj.width;
timestamp = obj.timestamp;
timePosition = obj.timePosition;
}
VideoFrame::~VideoFrame()
{
if (matFrame)
{
delete matFrame;
matFrame = nullptr;
}
if (avFrame)
{
av_free(avFrame->data[0]);
av_frame_free(&avFrame);
avFrame = nullptr;
}
}
bool VideoFrame::set_frame(AVFrame const *newFrame)
{
if (!matFrame) // matFrame is nullptr and was not allocated yet
{
if (!width || !height) // equals one of them zero?
return false;
else
matFrame = new cv::Mat(height, width, CV_8UC3); // CV_8UC3->3 channels of unsigned 8-bit int
}
return AVFrame2Mat(newFrame, matFrame);
}
unsigned long VideoFrame::get_time_position() const
{
return timePosition;
}
int64_t VideoFrame::get_timestamp() const
{
return timestamp;
}
unsigned long VideoFrame::get_frame_number() const
{
return frameNumber;
}
unsigned int VideoFrame::get_width() const
{
return width;
}
unsigned int VideoFrame::get_height() const
{
return height;
}
void VideoFrame::set_size(unsigned int newWidth, unsigned int newHeight)
{
width = newWidth;
height = newHeight;
// size changed -> allocated memory incorrect; free memory; it will be allocated when needed
if (matFrame)
{
delete matFrame;
matFrame = nullptr;
}
}
void VideoFrame::set_timestamp(int64_t newTimestamp)
{
timestamp = newTimestamp;
}
void VideoFrame::set_time_position(unsigned long newTimePosition)
{
timePosition = newTimePosition;
}
void VideoFrame::set_frame_number(unsigned long newFrameNumber)
{
frameNumber = newFrameNumber;
}
AVFrame const *VideoFrame::get_av_frame()
//AVFrame *VideoFrame::get_av_frame()
{
if (matFrame == nullptr) // There is not a valid frame that could be converted to AVFrame
return nullptr;
if (!avFrame)
{
avFrame = av_frame_alloc();
if (avFrame == nullptr)
{
qDebug() << "Not allocated";
exit(1);
}
int numBytes = avpicture_get_size((enum AVPixelFormat)outputFormat, width, height);
if (numBytes < 0)
{
qDebug() << "Error: avpicture_get_size()";
exit(1);
}
uint8_t *buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
if (buffer == nullptr)
{
qDebug() << "Not allocated";
exit(1);
}
if (avpicture_fill((AVPicture *)avFrame, buffer, (enum AVPixelFormat)outputFormat, width, height) < 0)
{
qDebug() << "Error: avpicture_fill()";
exit(1);
}
}
Mat2AVFrame(*matFrame, avFrame, outputFormat);
avFrame->pts = timestamp;
return avFrame;
}
cv::Mat const *VideoFrame::get_mat_frame() const
{
return matFrame;
}
// dstMat must be allocated before
bool VideoFrame::AVFrame2Mat(AVFrame const *src, cv::Mat *dstMat) const
{
assert(src != nullptr);
//assert(dstMat != nullptr);
AVFrame *dst = nullptr;
dst = av_frame_alloc(); // Calls also memset, xal
dst->data[0] = (uint8_t *)dstMat->data; //dstMat->data is used as a buffer
// note: avpicture_fill does not perform a deep copy
if (avpicture_fill((AVPicture *)dst, dst->data[0], AV_PIX_FMT_BGR24, src->width, src->height) < 0)
{
qDebug() << "Error - AVFrame2Mat: avpicture_fill";
av_frame_free(&dst);
dst = nullptr;
return false;
}
SwsContext *conversionContext = nullptr; // Context is needed for sws_scale
conversionContext = sws_getContext(src->width, src->height, (enum AVPixelFormat)src->format,
src->width, src->height, AV_PIX_FMT_BGR24,
scalingMethod, NULL, NULL, NULL); // xal
sws_scale(conversionContext, src->data, src->linesize, 0, src->height,
dst->data, dst->linesize);
// free allocated memories
sws_freeContext(conversionContext);
conversionContext = nullptr;
av_frame_free(&dst);
dst = nullptr;
return true;
}
bool VideoFrame::Mat2AVFrame(cv::Mat const &src, AVFrame *dstAVFrame, const int dstFormat) const
{
//assert(src != nullptr);
assert(dstAVFrame != nullptr);
static const int width = src.cols;
static const int height = src.rows;
AVFrame *srcAV = av_frame_alloc(); // Calls also memset, xal
if (srcAV == nullptr)
{
qDebug() << "Not allocated";
return false;
}
avpicture_fill((AVPicture *)srcAV, (uint8_t *)src.data, AV_PIX_FMT_BGR24, width, height);
// all frames have same width, height, format ...
SwsContext *conversionContext = sws_getContext(width, height, AV_PIX_FMT_BGR24,
width, height, (enum AVPixelFormat)dstFormat,
scalingMethod, NULL, NULL, NULL); // xal
sws_scale(conversionContext, srcAV->data, srcAV->linesize, 0, height,
dstAVFrame->data, dstAVFrame->linesize);
// free allocated memories
sws_freeContext(conversionContext);
conversionContext = nullptr;
av_frame_free(&srcAV);
srcAV = nullptr;
return true;
}

170
Sources/videoframe.h Normal file
View File

@ -0,0 +1,170 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file videoframe.h
@brief Header file
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#ifndef VIDEOFRAME_H
#define VIDEOFRAME_H
#include <opencv/cv.h>
#include <opencv/cvaux.h>
#include <opencv/cxcore.h>
#include <opencv/highgui.h>
extern "C"{
#include <ffmpeg/libavcodec/avcodec.h>
#include <ffmpeg/libavformat/avformat.h>
#include <ffmpeg/libswscale/swscale.h>
}
#define VIDEOTRACKING_DEFAULT_SCALING_METHOD SWS_BILINEAR
#define VIDEOTRACKING_OUTPUT_FORMAT AV_PIX_FMT_YUV420P
class VideoFrame
{
public:
/**
* Constructor
*/
VideoFrame();
/**
* Copy constructor
* @param obj Original
*/
VideoFrame(const VideoFrame &obj);
/**
* Constructor
* @param width Frame width
* @param height Frame height
*/
VideoFrame(unsigned int width, unsigned int height);
/**
* Destructor
*/
~VideoFrame();
/**
* Sets a new frame
* @param newFrame New frame data
* @return True if successful
*/
bool set_frame(AVFrame const *newFrame);
unsigned long get_time_position() const;
/**
* Returns frame timestamp.
* @return Frame timestamp
*/
int64_t get_timestamp() const;
/**
* Returns frame number (index).
* @return Frame number
*/
unsigned long get_frame_number() const;
/**
* Returns width of the frame.
* @return Frame width
*/
unsigned int get_width() const;
/**
* Returns height of the frame.
* @return Frame height
*/
unsigned int get_height() const;
/**
* Sets frame size.
* @param newWidth New frame width
* @param newHeight New frame height
*/
void set_size(unsigned int newWidth, unsigned int newHeight);
/**
* Sets timestamp.
* @param newTimestamp New timestamp
*/
void set_timestamp(int64_t newTimestamp);
/**
* Sets time position.
* @param newTimePosition New time Position
*/
void set_time_position(unsigned long newTimePosition);
/**
* Sets frame number.
* @param newFrameNumber New frame number
*/
void set_frame_number(unsigned long newFrameNumber);
/**
* Returns the frame in AVFrame.
* @return Frame
*/
AVFrame const *get_av_frame();
/**
* Returns the frame in cv::Mat.
* @return Frame
*/
cv::Mat const *get_mat_frame() const;
/**
* Converts AVFrame to cv::Mat.
* @param src Source AVFrame
* @param dstMat Destination cv::Mat; It mus be allocated before using this function.
* @return True if successful
*/
bool AVFrame2Mat(AVFrame const *src, cv::Mat *dstMat) const;
/**
* Converts cv::Mat to AVFrame.
* @param src Source cv::at
* @param dstAVFrame Destination AVFrame
* @param dstFormat Destination PixelFormat; it must be allocated before, but must not have allocated buffer (is allocated inside of this function). The buffer needs to be later freed manually.
* @return True if successful
*/
bool Mat2AVFrame(cv::Mat const &src, AVFrame *dstAVFrame, const int dstFormat) const;
public:
cv::Mat *matFrame; // Be careful using it directly! If possible, use get_mat_frame() instead
private:
AVFrame *avFrame;
const int outputFormat = VIDEOTRACKING_OUTPUT_FORMAT;
const int scalingMethod;
int64_t timestamp;
unsigned long timePosition;
unsigned long frameNumber;
unsigned int width;
unsigned int height;
};
#endif // VIDEOFRAME_H

689
Sources/videotracker.cpp Normal file
View File

@ -0,0 +1,689 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file videotracker.cpp
@brief Implementation of methods ...
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#include <QImage>
#include <QDebug>
#include "videotracker.h"
#include "avwriter.h"
using namespace cv;
VideoTracker::VideoTracker()
{
player = nullptr;
currentFrame = nullptr;
tempFrame = nullptr;
qDebug() << "new videoTracker";
}
VideoTracker::VideoTracker(std::string const &videoAddr, QProgressDialog const *progressDialog)
{
load_video(videoAddr, progressDialog);
}
void VideoTracker::load_video(std::string const &videoAddr, QProgressDialog const *progressDialog)
{
av_register_all();
player = new FFmpegPlayer(videoAddr, progressDialog);
currentFrame = new VideoFrame(player->get_width(), player->get_height()); // stores currently read frame
tempFrame = new VideoFrame(player->get_width(), player->get_height()); // stores temporary frame when tracking
}
VideoTracker::~VideoTracker()
{
/**
for (TrackedObject *object: trackedObjects)
{
delete object;
object = nullptr;
}
*/
delete currentFrame;
currentFrame = nullptr;
delete tempFrame;
tempFrame = nullptr;
delete player;
player = nullptr;
}
void VideoTracker::seek_first_packet()
{
if (!player->seek_first_packet())
{
qDebug() << "ERROR-create_output: seek_first_packet";
}
}
int VideoTracker::add_object(Characteristics const &objectAppearance, std::string const &objectName, Selection initialPosition,
int64_t initialTimestamp, unsigned long initialTimePosition, unsigned long initialFrameNumber,
bool endTimestampSet, int64_t endTimestamp, unsigned long endTimePosition, unsigned long endFrameNumber)
{
qDebug() << "Initialize tracking";
auto newObject = std::make_shared<TrackedObject>(objectAppearance, objectName, initialTimestamp, initialPosition,
initialTimePosition, initialFrameNumber, endTimestampSet,
endTimestamp, endTimePosition, endFrameNumber);
trackedObjects.push_back(newObject);
if (!player->get_frame_by_timestamp(currentFrame, initialTimestamp))
{
qDebug()<< "Tracker: Cannot read this frame.";
return -1;
}
newObject->track_next(currentFrame);
qDebug() << "Initialized";
return trackedObjects.size() - 1;
}
bool VideoTracker::get_frame_by_time(QImage &imgFrame, QImage &originalImgFrame, bool includeOriginal, int64_t time, QProgressDialog *progressDialog)
{
if (!player->get_frame_by_time(currentFrame, time))
return false;
Mat result = currentFrame->get_mat_frame()->clone(); // Simple assignment would do only a shallow copy, clone() is needed
if (!track_frame(currentFrame, result, progressDialog))
return false;
if (includeOriginal)
originalImgFrame = Mat2QImage(*currentFrame->get_mat_frame());
imgFrame = Mat2QImage(result);
return true;
}
bool VideoTracker::get_frame_by_timestamp(QImage &imgFrame, QImage &originalImgFrame, bool includeOriginal, int64_t timestamp, QProgressDialog *progressDialog)
{
if (!player->get_frame_by_timestamp(currentFrame, timestamp))
return false;
Mat result = currentFrame->get_mat_frame()->clone(); // Simple assignment would do only a shallow copy, clone() is needed
if (!track_frame(currentFrame, result, progressDialog))
return false;
if (includeOriginal)
originalImgFrame = Mat2QImage(*currentFrame->get_mat_frame());
imgFrame = Mat2QImage(result);
return true;
}
bool VideoTracker::get_frame_by_number(QImage &imgFrame, QImage &originalImgFrame, bool includeOriginal, unsigned long frameNumber, QProgressDialog *progressDialog)
{
if (!player->get_frame_by_number(currentFrame, frameNumber))
return false;
Mat result = currentFrame->get_mat_frame()->clone(); // Simple assignment would do only a shallow copy, clone() is needed
if (!track_frame(currentFrame, result, progressDialog))
return false;
if (includeOriginal)
originalImgFrame = Mat2QImage(*currentFrame->get_mat_frame());
imgFrame = Mat2QImage(result);
return true;
}
bool VideoTracker::get_next_frame(QImage &imgFrame, QImage &originalImgFrame, bool includeOriginal, QProgressDialog *progressDialog)
{
//Mat matFrame(player->get_height(), player->get_width(), CV_8UC3);
// This variable is used for the "track_frame" function to determine whether
// it needs to jump to a particular frame or if the lastProcessedTimestamp is
// the same as this variable (previousTimestamp). When it's the same,
// track_frame does not need to jump since the currentlly read frame is the
// following to the previously tracked.
int64_t previousTimestamp = currentFrame->get_timestamp();
if (!player->get_next_frame(currentFrame))
return false;
Mat result = currentFrame->get_mat_frame()->clone(); // Simple assignment would do only a shallow copy, clone() is needed
if (!track_frame(currentFrame, result, progressDialog, true, previousTimestamp))
return false;
if (includeOriginal)
originalImgFrame = Mat2QImage(*currentFrame->get_mat_frame());
imgFrame = Mat2QImage(result);
return true;
}
bool VideoTracker::get_previous_frame(QImage &imgFrame, QImage &originalImgFrame, bool includeOriginal, QProgressDialog *progressDialog)
{
if (!player->get_previous_frame(currentFrame))
return false;
Mat result = currentFrame->get_mat_frame()->clone(); // Simple assignment would do only a shallow copy, clone() is needed
if (!track_frame(currentFrame, result, progressDialog))
return false;
if (includeOriginal)
originalImgFrame = Mat2QImage(*currentFrame->get_mat_frame());
imgFrame = Mat2QImage(result);
return true;
}
bool VideoTracker::get_first_frame(QImage &imgFrame, QImage &originalImgFrame, bool includeOriginal, QProgressDialog *progressDialog)
{
player->seek_first_packet();
return get_next_frame(imgFrame, originalImgFrame, includeOriginal, progressDialog);
}
bool VideoTracker::get_current_frame(QImage &imgFrame, QImage &originalImgFrame, bool includeOriginal)
{
Mat result = currentFrame->get_mat_frame()->clone(); // Simple assignment would do only a shallow copy, clone() is needed
if (!track_frame(currentFrame, result, nullptr))
return false;
if (includeOriginal)
originalImgFrame = Mat2QImage(*currentFrame->get_mat_frame());
imgFrame = Mat2QImage(result);
return true;
}
bool VideoTracker::track_frame(VideoFrame const *originalFrame, cv::Mat &result, QProgressDialog *progressDialog, bool previousTimestampSet,
int64_t previousTimestamp)
{
int64_t currentTimestamp = originalFrame->get_timestamp();
if (!trackedObjects.empty())
{
Selection trackedPosition;
//for (TrackedObject *object: trackedObjects)
for (auto object: trackedObjects)
{
if ((object->get_initial_timestamp() > currentTimestamp) ||
((object->is_end_timestamp_set() && object->get_end_timestamp() < currentTimestamp)))
continue; // In this case current timestamp is out of the range of this tracked object
int64_t lastProcessedTimestamp;
bool lastProcessedTimestampSet = object->get_last_processed_timestamp(lastProcessedTimestamp); // If the frame was already processed but the whole video is not yet processed
if (object->is_all_processed() || (lastProcessedTimestampSet && currentTimestamp <= lastProcessedTimestamp))
{
if (!object->get_position(currentTimestamp, trackedPosition))// Position is already known as this frame has already been processed.
{
return false;
}
}
else if (previousTimestampSet && (lastProcessedTimestamp == previousTimestamp)) // Is used only with function "get_next_frame"; instead of seeking frame it uses the current one as it is the right one when obtained by get_next_frame(); that's much faster
trackedPosition = object->track_next(originalFrame);
/**else if (!object->is_initialized())
{
qDebug() << "ERROR - VideoTracker: tracking frame when trackedObject not initialized";
return false;
}*/
else
{
if (progressDialog)
{
progressDialog->show();
}
if (lastProcessedTimestampSet)
{
// Set to the last processed position. This frame won't be used, but allows to use "get_next_frame".
// It might be possible to skip this frame and find right the desired (next) one.
// However, it would be more complicated and won't make it much faster.
if (!player->get_frame_by_timestamp(tempFrame, lastProcessedTimestamp))
return false;
if (!player->get_next_frame(tempFrame)) // No more frames => all processed for this object
{
object->set_all_processed(true);
continue;
}
}
else
{ // So far no frame is processed, therefore lastProcessedTimestamp is not set
if (!player->get_frame_by_timestamp(tempFrame, object->get_initial_timestamp()))
return false;
}
while (true)
{
qApp->processEvents(); // Keeps progress bar active
trackedPosition = object->track_next(tempFrame);
if (tempFrame->get_timestamp() >= currentTimestamp)
break;
if (progressDialog)
{
if (progressDialog->wasCanceled()) // User cancelled the progress dialog
throw UserCanceledException();
}
qApp->processEvents(); // Keeps progress bar active
if (!player->get_next_frame(tempFrame))
return false; // Could not reach desired frame
}
//while (object->get_last_processed_timestamp() < currentTimestamp);
assert(currentTimestamp == tempFrame->get_timestamp());
if (progressDialog)
{
if (progressDialog->wasCanceled()) // User cancelled the progress dialog
throw UserCanceledException();
}
}
if (!object->draw_mark(result, *originalFrame->get_mat_frame(), currentTimestamp))
return false;
}
//progressDialog->reset();
return true;
}
return true;
}
bool VideoTracker::track_object(unsigned int objectID, QProgressDialog *progressDialog)
{
auto object = trackedObjects[objectID];
if (object->is_all_processed())
{
return true; // This object is processed throughout all its range
}
else
{
int64_t endTimestamp = object->get_end_timestamp();
int64_t lastProcessedTimestamp;
bool lastProcessedTimestampSet = object->get_last_processed_timestamp(lastProcessedTimestamp); // If the frame was already processed but the whole video is not yet processed
if (lastProcessedTimestampSet)
{
// Set to the last processed position. This frame won't be used, but allows to use "get_next_frame".
// It might be possible to skip this frame and find right the desired (next) one.
// However, it would be more complicated and won't make it much faster.
if (!player->get_frame_by_timestamp(tempFrame, lastProcessedTimestamp))
{
//progressDialog->reset();
return false;
}
if (!player->get_next_frame(tempFrame)) // No more frames => all processed for this object
{
object->set_all_processed(true);
return true;
}
}
else
{ // So far no frame is processed, therefore lastProcessedTimestamp is not set
if (!player->get_frame_by_timestamp(tempFrame, object->get_initial_timestamp()))
{
//progressDialog->reset();
return false;
}
}
while (true)
{
qApp->processEvents(); // Keeps progress bar active
object->track_next(tempFrame); // Also sets object->allProcessed
if (object->is_end_timestamp_set() && (tempFrame->get_timestamp() >= endTimestamp))
break;
if (progressDialog->wasCanceled()) // User canceled the progress dialog
{
// reads last tracked frame; is used for knowing where aborted
player->get_current_frame(currentFrame);
throw UserCanceledException();
}
qApp->processEvents(); // Keeps progress bar active
if (!player->get_next_frame(tempFrame))
{
if (object->is_end_timestamp_set())
{
qDebug() << "Warning-track_all(): Cannot read more frames but object->endTimestamp is higher";
}
object->set_all_processed(true);
break;
}
}
}
return true;
}
bool VideoTracker::track_all(QProgressDialog *progressDialog)
{
if (!trackedObjects.empty())
{
progressDialog->show();
unsigned int i;
for (i = 0; i < trackedObjects.size(); i++)
{
if (!track_object(i, progressDialog))
{ // Tracking is not successful, do not track other objects
qDebug() << "ERROR-track_all(): track_object() returned false";
return false;
}
}
progressDialog->cancel();
//progressDialog->reset();
return true;
}
qDebug() << "Warning-track_all(): No objects to track";
return true;
}
QImage VideoTracker::Mat2QImage(Mat const &src) const
{
Mat temp; // make the same cv::Mat
cvtColor(src, temp, CV_BGR2RGB);
QImage dest((const uchar *) temp.data, temp.cols, temp.rows, temp.step, QImage::Format_RGB888);
dest.bits(); // enforce deep copy, see documentation
// of QImage::QImage ( const uchar * data, int width, int height, Format format )
return dest;
}
/**
bool VideoTracker::set_frame_position(int const &position)
{
return player->set_frame_position(position);
}
bool VideoTracker::set_time_position(int const &position)
{
return player->set_time_position(position);
}
*/
double VideoTracker::get_fps() const
{
return player->get_fps();
}
unsigned long VideoTracker::get_frame_count() const
{
return player->get_frame_count();
}
unsigned long VideoTracker::get_total_time() const
{
// return player->get_frame_count() / player->get_fps();
return player->get_total_time();
}
int64_t VideoTracker::get_frame_timestamp() const
{
return currentFrame->get_timestamp();
}
unsigned long VideoTracker::get_time_position() const
{
return currentFrame->get_time_position();
}
unsigned long VideoTracker::get_frame_number() const
{
return currentFrame->get_frame_number();
}
unsigned int VideoTracker::get_objects_count() const
{
return trackedObjects.size();
}
void VideoTracker::create_output(std::string const &filename, QProgressDialog &fileProgressDialog,
QProgressDialog *trackingProgressDialog, std::string inFileExtension)
{
qDebug() << "filename" <<QString::fromStdString(filename);
assert(currentFrame != nullptr);
qDebug() << "Creating output: Tracking started";
try
{
if (!track_all(trackingProgressDialog))
{
qDebug() << "ERROR - Creating output: Tracking unsuccessful";
throw OutputException();
//return false;
}
} catch (UserCanceledException) {
// Cancel the second progress bar and propagate the Exception
fileProgressDialog.cancel();
throw UserCanceledException();
}
fileProgressDialog.setValue(0);
//trackingProgressDialog->cancel();
qDebug() << "Creating output: Tracking successful";
qDebug() << "Creating output: Writer initialization";
AVWriter *writer = new AVWriter();
if (!writer->initialize_output(filename, player->get_video_stream(), player->get_audio_stream(),
player->get_format_name(), inFileExtension))
{
//qDebug() << "Error: Writer not correctly initialized";
throw OutputException();
//return false;
}
qDebug() << "Creating output: Writer successfully initialized";
AVPacket packet;
bool isAudio = false;
//int i = 0;
qDebug() << "Creating output: Seeking first packet";
if (!player->seek_first_packet())
{
qDebug() << "ERROR-create_output: seek_first_packet";
throw OutputException();
//return false;
}
while (player->read_frame(packet, false/*onlyVideoPackets*/, &isAudio))
{ //packet needs to be freed only when contains audio, otherwise is freed already in read_frame()
if (isAudio)
{
if (!writer->write_audio_packet(packet))
{
av_free_packet(&packet);
throw OutputException();
}
else
av_free_packet(&packet);
}
else
{
if (!player->get_current_frame(currentFrame))
{
qDebug() << "Error: creating output: cannot get frame.";
throw OutputException();
}
// Mat result = currentFrame->get_mat_frame()->clone(); // Simple assignment would do only a shallow copy, clone() is needed
VideoFrame result (*currentFrame); // Simple assignment would do only a shallow copy, clone() is needed
// All trajectories are counted with track_all();
// track_frame now only returns those previously counted trajectories.
if (!track_frame(currentFrame, *(result.matFrame)))
break;
if (!writer->write_video_frame(result))
{
qDebug() << "Error: write_video_frame";
throw OutputException();
}
qDebug() << currentFrame->get_time_position() << "/" << get_total_time();
fileProgressDialog.setValue(currentFrame->get_time_position());
if (fileProgressDialog.wasCanceled())
{ // todo: close file or delete?
writer->close_output();
delete writer;
throw UserCanceledException();
}
}
}
if (!writer->write_last_frames()) // There might be frames left in the buffer
qDebug() << "WARNING: Creating output: write_last_frames() not correct";
qDebug() << "Creating output: Frames successfully written";
qDebug() << "Creating output: Closing output file";
if (!writer->close_output())
{
qDebug() << "Error: Writer not correctly closed";
throw OutputException();
//return false;
}
// fileProgressDialog.setValue(fileProgressDialog.maximum()); // In case video was shorter than the claimed length
delete writer;
writer = nullptr;
player->seek_first_packet(); // Return video to the first position
qDebug() << "Creating output: Output file successfully closed";
//return true;
}
Characteristics VideoTracker::get_object_appearance(unsigned int objectID) const
{
return trackedObjects[objectID]->get_appearance();
}
std::map<int64_t, TrajectorySection> const &VideoTracker::get_object_trajectory_sections(unsigned int objectID) const
{
return trackedObjects[objectID]->get_trajectory_sections();
}
std::map<int64_t, TrajectoryEntry> const &VideoTracker::get_object_trajectory(unsigned int objectID) const
{
return trackedObjects[objectID]->get_trajectory();
}
bool VideoTracker::set_object_trajectory_section(unsigned int objectID, int64_t newTimestamp, Selection position,
unsigned long timePosition, unsigned long frameNumber)
{
return trackedObjects[objectID]->set_trajectory_section(newTimestamp, position, timePosition, frameNumber); //todo check if exists
}
bool VideoTracker::change_object_trajectory_section(unsigned int objectID, int64_t oldTimestamp, int64_t newTimestamp,
Selection position, unsigned long timePosition, unsigned long frameNumber)
{
return trackedObjects[objectID]->change_trajectory_section(oldTimestamp, newTimestamp, position, timePosition, frameNumber); //todo check if exists
}
bool VideoTracker::delete_object_trajectory_section(unsigned int objectID, int64_t timestamp)
{
return trackedObjects[objectID]->delete_trajectory_section(timestamp); //todo check if exists
}
bool VideoTracker::change_object_end_frame(unsigned int objectID, bool set, int64_t timestamp,
unsigned long timePosition, unsigned long frameNumber)
{
return trackedObjects[objectID]->change_end_frame(set, timestamp, timePosition, frameNumber); //todo test if object exists
}
bool VideoTracker::get_object_end(unsigned int objectID, int64_t &timestamp, unsigned long &timePosition, unsigned long &frameNumber)
{
if (!trackedObjects[objectID]->is_end_timestamp_set())
return false;
timestamp = trackedObjects[objectID]->get_end_timestamp();
timePosition = trackedObjects[objectID]->get_end_time_position();
frameNumber = trackedObjects[objectID]->get_end_frame_number();
return true;
}
bool VideoTracker::change_object_appearance(unsigned int objectID, Characteristics const &newAppearance)
{
trackedObjects[objectID]->change_appearance(newAppearance);
return true;
}
std::string VideoTracker::get_object_name(unsigned int objectID)
{
return trackedObjects[objectID]->get_name();
}
void VideoTracker::set_object_name(unsigned int objectID, std::string newName)
{
trackedObjects[objectID]->set_name(newName);
}
void VideoTracker::delete_object(unsigned int objectID)
{
assert(trackedObjects.size() > objectID);
trackedObjects.erase(trackedObjects.begin()+objectID);
}
std::vector<std::string> VideoTracker::get_all_objects_names()
{
std::vector<std::string> objectNames;
for (unsigned int i = 0; i < trackedObjects.size(); i++)
objectNames.push_back(get_object_name(i));
return objectNames;
}
void VideoTracker::erase_object_trajectories_to_comply()
{
for (auto object: trackedObjects)
{
object->erase_trajectory_to_comply();
}
}

408
Sources/videotracker.h Normal file
View File

@ -0,0 +1,408 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file videotracker.h
@brief Header file
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#ifndef VIDEOTRACKER_H
#define VIDEOTRACKER_H
#include <opencv/cv.h>
#include <opencv/cvaux.h>
#include <opencv/cxcore.h>
#include <opencv/highgui.h>
#include <QObject>
#include <QProgressDialog>
#include <QApplication>
#include <memory>
#include "ffmpegplayer.h"
#include "trackingalgorithm.h"
#include "trackedobject.h"
#include "videoframe.h"
#include "selection.h"
#include <cereal/types/vector.hpp>
#include <cereal/types/memory.hpp> // for shared_ptr
struct OutputException : public std::exception{};
struct UserCanceledException : public std::exception{};
class VideoTracker
{
public:
// CEREAL serialization
template<class Archive>
void serialize(Archive &archive)
{
archive(CEREAL_NVP(trackedObjects));
}
/**
* Constructor
*/
VideoTracker();
/**
* Constructor
* @param videoAddr Path to a video file
* @param progressDialog QT progress dialog for displaying information about video opening
*/
explicit VideoTracker(std::string const &videoAddr, QProgressDialog const *progressDialog);
/**
* Destructor
*/
~VideoTracker();
/**
* Loads a new video file.
* @param videoAddr Path to a video file
* @param progressDialog QT progress dialog for displaying information about video opening
*/
void load_video(std::string const &videoAddr, QProgressDialog const *progressDialog);
/**
* Returns frames per second value of the video.
* @return Frames per second
*/
double get_fps() const;
/**
* Returns information about number of frames.
* @return Number of frames
*/
unsigned long get_frame_count() const;
/**
* Returns information about video length.
* @return Video length in milliseconds
*/
unsigned long get_total_time() const;
/**
* Returns timestamp of the current frame.
* @return Timestamp
*/
int64_t get_frame_timestamp() const;
/**
* Returns time position of the current frame.
* @return Time position in milliseconds
*/
unsigned long get_time_position() const;
/**
* Returns frame number of the current frame.
* @return Frame number
*/
unsigned long get_frame_number() const;
/**
* Returns number of tracked objects
* @return Number of tracked objects
*/
unsigned int get_objects_count() const;
/**
* Tracks a new object.
* @param objectAppearance Object appearance
* @param objectName Object name
* @param initialPosition Initial object position
* @param initialTimestamp Initial timestamp
* @param initialTimePosition Initial time position
* @param initialFrameNumber Initial frame number
* @param endTimestampSet Is end timestamp set?
* @param endTimestamp End timestamp
* @param endTimePosition End time position
* @param endFrameNumber End frame number
* @return Object ID; First objest has ID 1. If frame at initialTimestamp is not read, -1 is returned;
*/
int add_object(const Characteristics &objectAppearance, const std::string &objectName,
Selection initialPosition, int64_t initialTimestamp, unsigned long initialTimePosition,
unsigned long initialFrameNumber, bool endTimestampSet=false, int64_t endTimestamp=0,
unsigned long endTimePosition=0, unsigned long endFrameNumber=0);
/**
* Reads a frame by its time position and returns it.
* @param imgFrame Altered read frame (contains tracked objects) converted to be displayed in QT
* @param originalImgFrame Read frame converted to be displayed in QT.
* @param includeOriginal Should be original frame read? If not, originalImgFrame does not contain a valid frame.
* @param time Time position
* @param progressDialog QT progress dialog for showing an information about tracking objects process
* @return True if successful
*/
bool get_frame_by_time(QImage &imgFrame, QImage &originalImgFrame, bool includeOriginal, int64_t time, QProgressDialog *progressDialog);
/**
* Reads a frame by its timestamp and returns it.
* @param imgFrame Altered read frame (contains tracked objects) converted to be displayed in QT
* @param originalImgFrame Read frame converted to be displayed in QT.
* @param includeOriginal Should be original frame read? If not, originalImgFrame does not contain a valid frame.
* @param timestamp Timestamp
* @param progressDialog QT progress dialog for showing an information about tracking objects process
* @return True if successful
*/
bool get_frame_by_timestamp(QImage &imgFrame, QImage &originalImgFrame, bool includeOriginal, int64_t timestamp, QProgressDialog *progressDialog);
/**
* Reads a frame by its frame number (index) and returns it.
* @param imgFrame Altered read frame (contains tracked objects) converted to be displayed in QT
* @param originalImgFrame Read frame converted to be displayed in QT.
* @param includeOriginal Should be original frame read? If not, originalImgFrame does not contain a valid frame.
* @param frameNumber Frame number (index)
* @param progressDialog QT progress dialog for showing an information about tracking objects process
* @return True if successful
*/
bool get_frame_by_number(QImage &imgFrame, QImage &originalImgFrame, bool includeOriginal, unsigned long frameNumber, QProgressDialog *progressDialog);
/**
* Reads a frame following to the current one returns it.
* @param imgFrame Altered read frame (contains tracked objects) converted to be displayed in QT
* @param originalImgFrame Read frame converted to be displayed in QT.
* @param includeOriginal Should be original frame read? If not, originalImgFrame does not contain a valid frame.
* @param progressDialog QT progress dialog for showing an information about tracking objects process
* @return True if successful
*/
bool get_next_frame(QImage &imgFrame, QImage &originalImgFrame, bool includeOriginal, QProgressDialog *progressDialog);
/**
* Reads a frame preceding to the current one returns it.
* @param imgFrame Altered read frame (contains tracked objects) converted to be displayed in QT
* @param originalImgFrame Read frame converted to be displayed in QT.
* @param includeOriginal Should be original frame read? If not, originalImgFrame does not contain a valid frame.
* @param progressDialog QT progress dialog for showing an information about tracking objects process
* @return True if successful
*/
bool get_previous_frame(QImage &imgFrame, QImage &originalImgFrame, bool includeOriginal, QProgressDialog *progressDialog);
/**
* Gets the current frame and returns it.
* @param imgFrame Altered read frame (contains tracked objects) converted to be displayed in QT
* @param originalImgFrame Read frame converted to be displayed in QT.
* @param includeOriginal Should be original frame read? If not, originalImgFrame does not contain a valid frame.
* @return True if successful
*/
bool get_current_frame(QImage &imgFrame, QImage &originalImgFrame, bool includeOriginal);
/**
* Reads the first frame and returns it.
* @param imgFrame Altered read frame (contains tracked objects) converted to be displayed in QT
* @param originalImgFrame Read frame converted to be displayed in QT.
* @param includeOriginal Should be original frame read? If not, originalImgFrame does not contain a valid frame.
* @param progressDialog QT progress dialog for showing an information about tracking objects process
* @return True if successful
*/
bool get_first_frame(QImage &imgFrame, QImage &originalImgFrame, bool includeOriginal, QProgressDialog *progressDialog);
/**
* Creates an output media file including all tracked objects.
* Throws UserCanceledException when user clicked "Cancel" button in the progress dialog
* Mind there are two progress dialogs. One is in track_all() and displays infinet progress
* bar when counting objects positions. The second progress dialog (fileProgressDialog shows percentage of
* creating the output file. When the first one (in track_all()) is canceled, the second one should be
* canceled too
* @param filename Output filename
* @param fileProgressDialog QT progress dialog for showing an information about creating the file progress
* @param trackingProgressDialog QT progress dialog for showing an information about tracking objects process
* @param inFileExtension Input file extensions
*/
void create_output(std::string const &filename, QProgressDialog &fileProgressDialog,
QProgressDialog *trackingProgressDialog, std::string inFileExtension);
/**
* Returns appearance of the object.
* @param objectID Object ID
* @return Object appearance
*/
Characteristics get_object_appearance(unsigned int objectID) const;
/**
* Returns trajectory sections of the object.
* @param objectID Object ID
* @return Trajectory section of the object
*/
std::map<int64_t, TrajectorySection> const &get_object_trajectory_sections(unsigned int objectID) const;
/**
* Returns computed trajectory of the object.
* @param objectID Object ID
* @return Trajectory of the object
*/
std::map<int64_t, TrajectoryEntry> const &get_object_trajectory(unsigned int objectID) const;
/**
* Changes appearance of the object.
* @param objectID Object ID
* @param newAppearance New appearance of the object
*/
bool change_object_appearance(unsigned int objectID, Characteristics const &newAppearance);
/**
* Rewinds the video to its beginning by seeking its first packet.
*/
void seek_first_packet();
// int64_t get_object_end_timestamp(unsigned int objectID, bool &isSet);
/**
* Return information about the end of the object.
* @param objectID Object ID
* @param timestamp Frame timestamp
* @param timePosition Time position
* @param frameNumber Frame number (index)
* @return True if end is set, false if it is not.
*/
bool get_object_end(unsigned int objectID, int64_t &timestamp, unsigned long &timePosition, unsigned long &frameNumber);
/**
* Changes a trajectory section of the object.
* @param objectID Object ID
* @param oldTimestamp Old timestamp of the section
* @param newTimestamp New timestamp of the section
* @param position Object position at the first frame of the section
* @param timePosition Time position of the section
* @param frameNumber Frame number of the section
* @return True if successful
*/
bool change_object_trajectory_section(unsigned int objectID, int64_t oldTimestamp, int64_t newTimestamp, Selection position, unsigned long timePosition, unsigned long frameNumber);
/**
* Changes the end frame of the object. If set==false, the end frame will be unset and the object
* will be tracked till the end of the video. newTimestamp and timePosition are valid onlt if set==true.
* @param objectID Object ID
* @param set Set / Unset last frame
* @param newTimestamp Timestamp of the last frame
* @param timePosition Time position of the last frame
* @param frameNumber Frame number of the last frame
* @return False if given timestamp is lower than the timestamp of Beginning.
*/
bool change_object_end_frame(unsigned int objectID, bool set, int64_t newTimestamp=0, unsigned long timePosition=0, unsigned long frameNumber=0);
/**
* Deletes a trajectory section of the object
* @param objectID Object ID
* @param timestamp Trajectory section to be deleted begins at this timestamp
* @return True if successful
*/
bool delete_object_trajectory_section(unsigned int objectID, int64_t timestamp);
/**
* Sets a trajectory section of the object.
* @param objectID Object ID
* @param newTimestamp New timestamp of the section
* @param position Object position at the first frame of the section
* @param timePosition Time position of the section
* @param frameNumber Frame number of the section
* @return True if successful
*/
bool set_object_trajectory_section(unsigned int objectID, int64_t newTimestamp, Selection position, unsigned long timePosition, unsigned long frameNumber);
/**
* Returns name of the object.
* @param objectID Object ID
* @return Object name
*/
std::string get_object_name(unsigned int objectID);
/**
* Returns names of all objects.
* @return Names of all objects
*/
std::vector<std::string> get_all_objects_names();
/**
* Sets name of the object.
* @param objectID Object ID
* @param newName New object name
* @return True if successful
*/
void set_object_name(unsigned int objectID, std::string newName);
/**
* Removes the object
* @param objectID Object ID
*/
void delete_object(unsigned int objectID);
/**
* Computes all trajectory for the tracked object
* @param objectID ObjectID
* @param progressDialog QT progress dialog for showing an information about tracking object process
* @return True if successful
*/
bool track_object(unsigned int objectID, QProgressDialog *progressDialog);
/**
* Erases a part of the computed trajectory. This is necessary after deserialization (with CEREAL)
* as track_next() initializes correct sections only when the trajectory's last frame is the last
* frame of the previous section.
* @param objectID ObjectID
*/
void erase_object_trajectories_to_comply();
private:
/**
* Converts cv::Mat format (OpenCV frame) to QImage (QT)
* @param src Source frame in cv::Mat
* @return Frame for QT
*/
QImage Mat2QImage(cv::Mat const &src) const;
// previousTimestamp can be used only when previousTimestampSet==true
/**
* Draws tracking mark to the result cv::Mat. If current frame was not yet processed, processes tracking till this frame.
* @param originalFrame Original frame
* @param result Altered frame; with drawn tracked objects
* @param progressDialog QT progress dialog for showing an information about tracking objects process
* @param previousTimestampSet Used only with get_next_frame(). True - this is the following frame to the one given the last time.
* @param previousTimestamp Timestamp of the preceding frame. This is used only if previousTimestampSet==true.
* @return Returns true if successful
*/
bool track_frame(VideoFrame const *originalFrame, cv::Mat &result, QProgressDialog *progressDialog=nullptr, bool previousTimestampSet=false, int64_t previousTimestamp=0);
/**
* Tracks all frames. This is called when an output media file is being created.
* @param progressDialog QT progress dialog for showing an information about tracking objects process
* @return True if successful
*/
bool track_all(QProgressDialog *progressDialog);
private:
QApplication *qApplication; // Is used for updating progress bars
std::shared_ptr<TrackedObject> a;
FFmpegPlayer *player;
VideoFrame *currentFrame;
VideoFrame *tempFrame;
std::vector<std::shared_ptr<TrackedObject>> trackedObjects;
};
#endif // VIDEOTRACKER_H

45
Sources/videowidget.cpp Normal file
View File

@ -0,0 +1,45 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file videowidget.cpp
@brief Implementation of methods ...
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#include "videowidget.h"
VideoWidget::VideoWidget(QWidget *parent) : QWidget(parent)
{
}
VideoWidget::~VideoWidget()
{
}
void VideoWidget::resizeEvent(QResizeEvent *event)
{
emit resized();
QWidget::resizeEvent(event);
}

61
Sources/videowidget.h Normal file
View File

@ -0,0 +1,61 @@
//------------------------------------------------------------------------------
//
// Project: Anonymizer
//
// Brno University of Technology
// Faculty of Information Technology
//
//------------------------------------------------------------------------------
//
// This project was financially supported by project VG20102015006 funds
// provided by Ministry of the Interior of the Czech republic.
//
//------------------------------------------------------------------------------
/*!
@file videowidget.h
@brief Header file
@details Details
@authors Martin Borek (mborekcz@gmail.com)
@authors Filip Orsag (orsag@fit.vutbr.cz)
@date 2014-2015
@note This project was supported by MV CR project VG20102015006.
@copyright BUT OPEN SOURCE LICENCE (see License.txt)
*/
#ifndef VIDEOWIDGET_H
#define VIDEOWIDGET_H
#include <QWidget>
class VideoWidget : public QWidget
{
Q_OBJECT
public:
/**
* Constructor
* @param parent Parent widget
*/
explicit VideoWidget(QWidget *parent = 0);
/**
* Destructor
*/
~VideoWidget();
/**
* When the widget is resized, the resized() signal is emitted.
* @param event
*/
void resizeEvent(QResizeEvent *event);
signals:
/**
* This signal is emitted when the widget is resized.
*/
void resized();
public slots:
};
#endif // VIDEOWIDGET_H