2018 |
Gonçalves, Gabriel Resende; Diniz, Matheus Alves; Laroca, Rayson; Menotti, David; Schwartz, William Robson Real-time Automatic License Plate Recognition Through Deep Multi-Task Networks Inproceedings Conference on Graphic, Patterns and Images (SIBGRAPI), pp. 1-8, 2018. Links | BibTeX | Tags: Automatic License Plate Recognition, Deep Learning, DeepEyes, GigaFrames, Multi-Task Learning, Sense-ALPR @inproceedings{Goncalves:2018:SIBGRAPI, title = {Real-time Automatic License Plate Recognition Through Deep Multi-Task Networks}, author = {Gabriel Resende Gonçalves and Matheus Alves Diniz and Rayson Laroca and David Menotti and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/paper.pdf}, year = {2018}, date = {2018-09-04}, booktitle = {Conference on Graphic, Patterns and Images (SIBGRAPI)}, pages = {1-8}, keywords = {Automatic License Plate Recognition, Deep Learning, DeepEyes, GigaFrames, Multi-Task Learning, Sense-ALPR}, pubstate = {published}, tppubtype = {inproceedings} } |
Junior, Carlos Antonio Caetano; dos Santos, Jefersson A; Schwartz, William Robson Statistical Measures from Co-occurrence of Codewords for Action Recognition Inproceedings VISAPP 2018 - International Conference on Computer Vision Theory and Applications, pp. 1-8, 2018. Links | BibTeX | Tags: Action Recognition, Activity Recognition, DeepEyes, GigaFrames, Spatiotemporal Features @inproceedings{Caetano:2018:VISAPP, title = {Statistical Measures from Co-occurrence of Codewords for Action Recognition}, author = {Carlos Antonio Caetano Junior and Jefersson A dos Santos and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/VISAPP_2018_CarlosCaetano.pdf}, year = {2018}, date = {2018-01-27}, booktitle = {VISAPP 2018 - International Conference on Computer Vision Theory and Applications}, pages = {1-8}, keywords = {Action Recognition, Activity Recognition, DeepEyes, GigaFrames, Spatiotemporal Features}, pubstate = {published}, tppubtype = {inproceedings} } |
Kloss, Ricardo Barbosa; Jordao, Artur; Schwartz, William Robson Face Verification Strategies for Employing Deep Models Inproceedings 13th IEEE International Conference on Automatic Face & Gesture Recognition, pp. 258-262, 2018. Links | BibTeX | Tags: Artificial Neural Networks, Face Verification, GigaFrames, Metric Learning, Transfer Learning @inproceedings{Kloss:2018:FG, title = {Face Verification Strategies for Employing Deep Models}, author = {Ricardo Barbosa Kloss and Artur Jordao and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/Face-Verification-Strategies-for-Employing-Deep-Models.pdf}, year = {2018}, date = {2018-01-01}, booktitle = {13th IEEE International Conference on Automatic Face & Gesture Recognition}, pages = {258-262}, keywords = {Artificial Neural Networks, Face Verification, GigaFrames, Metric Learning, Transfer Learning}, pubstate = {published}, tppubtype = {inproceedings} } |
Jordao, Artur; Kloss, Ricardo Barbosa; Schwartz, William Robson Latent hypernet: Exploring all Layers from Convolutional Neural Networks Inproceedings IEEE International Joint Conference on Neural Networks (IJCNN), pp. 1-7, 2018. Links | BibTeX | Tags: Activity Recognition Based on Wearable Sensors, DeepEyes, GigaFrames, Partial Least Squares, Wearable Sensors @inproceedings{Jordao:2018b:IJCNN, title = {Latent hypernet: Exploring all Layers from Convolutional Neural Networks}, author = {Artur Jordao and Ricardo Barbosa Kloss and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/Latent-HyperNet-Exploring-the-Layers.pdf}, year = {2018}, date = {2018-01-01}, booktitle = {IEEE International Joint Conference on Neural Networks (IJCNN)}, pages = {1-7}, keywords = {Activity Recognition Based on Wearable Sensors, DeepEyes, GigaFrames, Partial Least Squares, Wearable Sensors}, pubstate = {published}, tppubtype = {inproceedings} } |
Colque, Rensso Victor Hugo Mora; Junior, Carlos Antonio Caetano; de Melo, Victor Hugo Cunha; Chavez, Guillermo Camara; Schwartz, William Robson Novel Anomalous Event Detection based on Human-object Interactions Inproceedings VISAPP 2018 - International Conference on Computer Vision Theory and Applications, pp. 1-8, 2018. Links | BibTeX | Tags: Anomalous Event Detection, Contextual Information, DeepEyes, GigaFrames, Human-Object Interaction @inproceedings{Colque:2018:VISAPP, title = {Novel Anomalous Event Detection based on Human-object Interactions}, author = {Rensso Victor Hugo Mora Colque and Carlos Antonio Caetano Junior and Victor Hugo Cunha de Melo and Guillermo Camara Chavez and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/VISAPP_2018_92.pdf}, year = {2018}, date = {2018-01-01}, booktitle = {VISAPP 2018 - International Conference on Computer Vision Theory and Applications}, pages = {1-8}, keywords = {Anomalous Event Detection, Contextual Information, DeepEyes, GigaFrames, Human-Object Interaction}, pubstate = {published}, tppubtype = {inproceedings} } |
Bastos, Igor Leonardo Oliveira; de Melo, Victor Hugo Cunha; Gonçalves, Gabriel Resende; Schwartz, William Robson MORA: A Generative Approach to Extract Spatiotemporal Information Applied to Gesture Recognition Inproceedings 15th International Conference on Advanced Video and Signal-based Surveillance (AVSS), pp. 1-6, 2018. Links | BibTeX | Tags: Autoencoders, DeepEyes, Gesture Recognition, GigaFrames, Recurrent Models @inproceedings{Bastos:2018:AVSS, title = {MORA: A Generative Approach to Extract Spatiotemporal Information Applied to Gesture Recognition}, author = {Igor Leonardo Oliveira Bastos and Victor Hugo Cunha de Melo and Gabriel Resende Gonçalves and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/MORA_.pdf}, year = {2018}, date = {2018-01-01}, booktitle = {15th International Conference on Advanced Video and Signal-based Surveillance (AVSS)}, pages = {1-6}, keywords = {Autoencoders, DeepEyes, Gesture Recognition, GigaFrames, Recurrent Models}, pubstate = {published}, tppubtype = {inproceedings} } |
Kloss, Ricardo Barbosa; Jordao, Artur; Schwartz, William Robson Boosted Projection An Ensemble of Transformation Models Inproceedings 22nd Iberoamerican Congress on Pattern Recognition (CIARP), pp. 331-338, 2018. Links | BibTeX | Tags: Computer vision, DeepEyes, Dimensionality Reduction, Ensemble Partial Least Squares, GigaFrames, Machine Learning @inproceedings{Kloss:2018:CIARP, title = {Boosted Projection An Ensemble of Transformation Models}, author = {Ricardo Barbosa Kloss and Artur Jordao and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/Boosted-Projection-An-Ensemble-of-Transformation-Models.pdf}, year = {2018}, date = {2018-01-01}, booktitle = {22nd Iberoamerican Congress on Pattern Recognition (CIARP)}, pages = {331-338}, keywords = {Computer vision, DeepEyes, Dimensionality Reduction, Ensemble Partial Least Squares, GigaFrames, Machine Learning}, pubstate = {published}, tppubtype = {inproceedings} } |
Reis, Renan Oliveira; Dias, Igor Henrique; Schwartz, William Robson Neural network control for active cameras using master-slave setup Inproceedings International Conference on Advanced Video and Signal-based Surveillance (AVSS), pp. 1-6, 2018. Links | BibTeX | Tags: Active Camera, DeepEyes, GigaFrames, Neural network control for active cameras using master-slave setup, SMS @inproceedings{Reis:2018:AVSS, title = {Neural network control for active cameras using master-slave setup}, author = {Renan Oliveira Reis and Igor Henrique Dias and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/renan_avss_2018-1-1.pdf}, year = {2018}, date = {2018-01-01}, booktitle = {International Conference on Advanced Video and Signal-based Surveillance (AVSS)}, pages = {1-6}, keywords = {Active Camera, DeepEyes, GigaFrames, Neural network control for active cameras using master-slave setup, SMS}, pubstate = {published}, tppubtype = {inproceedings} } |
Junior, Antonio Carlos Nazare; de Costa, Filipe Oliveira; Schwartz, William Robson Content-Based Multi-Camera Video Alignment using Accelerometer Data Inproceedings Advanced Video and Signal Based Surveillance (AVSS), 2018 15th IEEE International Conference on, pp. 1-6, 2018. Links | BibTeX | Tags: Camera Synchronization, DeepEyes, GigaFrames, SensorCap, Sensors, SMS @inproceedings{Nazare:2018:AVSS, title = {Content-Based Multi-Camera Video Alignment using Accelerometer Data}, author = {Antonio Carlos Nazare Junior and Filipe Oliveira de Costa and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/2018_avss_svsync_camera_ready.pdf}, year = {2018}, date = {2018-01-01}, booktitle = {Advanced Video and Signal Based Surveillance (AVSS), 2018 15th IEEE International Conference on}, pages = {1-6}, keywords = {Camera Synchronization, DeepEyes, GigaFrames, SensorCap, Sensors, SMS}, pubstate = {published}, tppubtype = {inproceedings} } |
Jordao, Artur; Kloss, Ricardo; Yamada, Fernando; Schwartz, William Robson Pruning Deep Neural Networks using Partial Least Squares Journal Article ArXiv e-prints, 2018. Links | BibTeX | Tags: DeepEyes, GigaFrames, Neural Networks Optimization @article{Jordao:2018:arXivb, title = {Pruning Deep Neural Networks using Partial Least Squares}, author = {Artur Jordao and Ricardo Kloss and Fernando Yamada and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/03/1810.07610.pdf}, year = {2018}, date = {2018-01-01}, journal = {ArXiv e-prints}, keywords = {DeepEyes, GigaFrames, Neural Networks Optimization}, pubstate = {published}, tppubtype = {article} } |
2017 |
Bastos, Igor Leonardo Oliveira; Soares, Larissa Rocha; Schwartz, William Robson Pyramidal Zernike Over Time: A spatiotemporal feature descriptor based on Zernike Moments Inproceedings Iberoamerican Congress on Pattern Recognition (CIARP 2017), pp. 77-85, 2017. Links | BibTeX | Tags: Activity Recognition, DeepEyes, Feature Extraction, GigaFrames, Zernike Moments @inproceedings{Bastos:2017:CIARP, title = {Pyramidal Zernike Over Time: A spatiotemporal feature descriptor based on Zernike Moments}, author = {Igor Leonardo Oliveira Bastos and Larissa Rocha Soares and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/PZOT_camera_ready.pdf}, year = {2017}, date = {2017-11-07}, booktitle = {Iberoamerican Congress on Pattern Recognition (CIARP 2017)}, pages = {77-85}, keywords = {Activity Recognition, DeepEyes, Feature Extraction, GigaFrames, Zernike Moments}, pubstate = {published}, tppubtype = {inproceedings} } |
Colque, Rensso Victor Hugo Mora; Junior, Carlos Antonio Caetano; de Andrade, Matheus Toledo Lustosa; Schwartz, William Robson Histograms of Optical Flow Orientation and Magnitude and Entropy to Detect Anomalous Events in Videos Journal Article IEEE Transactions on Circuits and Systems for Video Technology, 27 (3), pp. 673-682, 2017. Links | BibTeX | Tags: Anomalous Event Detection, DeepEyes, Feature Extraction, GigaFrames @article{Colque:2016:TCSVT, title = {Histograms of Optical Flow Orientation and Magnitude and Entropy to Detect Anomalous Events in Videos}, author = {Rensso Victor Hugo Mora Colque and Carlos Antonio Caetano Junior and Matheus Toledo Lustosa de Andrade and William Robson Schwartz}, url = {http://dx.doi.org/10.1109/TCSVT.2016.2637778}, year = {2017}, date = {2017-01-01}, journal = {IEEE Transactions on Circuits and Systems for Video Technology}, volume = {27}, number = {3}, pages = {673-682}, keywords = {Anomalous Event Detection, DeepEyes, Feature Extraction, GigaFrames}, pubstate = {published}, tppubtype = {article} } |
Junior, Carlos Antonio Caetano; de Melo, Victor Hugo Cunha; dos Santos, Jefersson Alex; Schwartz, William Robson Activity Recognition based on a Magnitude-Orientation Stream Network Inproceedings Conference on Graphics, Patterns and Images (SIBGRAPI), pp. 1-8, 2017. Links | BibTeX | Tags: Activity Recognition, Deep Learning, GigaFrames @inproceedings{Caetano:2017:SIBGRAPI, title = {Activity Recognition based on a Magnitude-Orientation Stream Network}, author = {Carlos Antonio Caetano Junior and Victor Hugo Cunha de Melo and Jefersson Alex dos Santos and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/paper_2017_SIBGRAPI_Caetano.pdf}, year = {2017}, date = {2017-01-01}, booktitle = {Conference on Graphics, Patterns and Images (SIBGRAPI)}, pages = {1-8}, keywords = {Activity Recognition, Deep Learning, GigaFrames}, pubstate = {published}, tppubtype = {inproceedings} } |
2016 |
Jordao, Artur; Sena, Jessica; Schwartz, William Robson A Late Fusion Approach to Combine Multiple Pedestrian Detectors Inproceedings IAPR International Conference on Pattern Recognition (ICPR), pp. 1-6, 2016. Links | BibTeX | Tags: DeepEyes, Featured Publication, GigaFrames, Pedestrian Detection @inproceedings{Correia:2016:ICPR, title = {A Late Fusion Approach to Combine Multiple Pedestrian Detectors}, author = {Artur Jordao and Jessica Sena and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/A-Late-Fusion-Approach-to-Combine-Multiple.pdf}, year = {2016}, date = {2016-12-13}, booktitle = {IAPR International Conference on Pattern Recognition (ICPR)}, pages = {1-6}, keywords = {DeepEyes, Featured Publication, GigaFrames, Pedestrian Detection}, pubstate = {published}, tppubtype = {inproceedings} } |
Gonçalves, Gabriel Resende; Menotti, David; Schwartz, William Robson License Plate Recognition based on Temporal Redundancy Inproceedings IEEE International Conference on Intelligent Transportation Systems (ITSC), pp. 1-5, 2016. Links | BibTeX | Tags: Automatic License Plate Recognition, DeepEyes, GigaFrames @inproceedings{Goncalves:2016:ITSC, title = {License Plate Recognition based on Temporal Redundancy}, author = {Gabriel Resende Gonçalves and David Menotti and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/paper_2016_ITSC.pdf}, year = {2016}, date = {2016-11-04}, booktitle = {IEEE International Conference on Intelligent Transportation Systems (ITSC)}, pages = {1-5}, keywords = {Automatic License Plate Recognition, DeepEyes, GigaFrames}, pubstate = {published}, tppubtype = {inproceedings} } |
Gonçalves, Gabriel Resende; da Silva, Sirlene Pio Gomes; Menotti, David; Schwartz, William Robson Benchmark for License Plate Character Segmentation Journal Article Journal of Electronic Imaging, 25 (5), pp. 1-5, 2016, ISBN: 1017-9909. Links | BibTeX | Tags: Automatic License Plate Recognition, Benchmark, Character Segmentation, DeepEyes, Featured Publication, GigaFrames, Jaccard Coefficient, Novel Dataset, Sense SegPlate @article{2016:JEI:Gabriel, title = {Benchmark for License Plate Character Segmentation}, author = {Gabriel Resende Gonçalves and Sirlene Pio Gomes da Silva and David Menotti and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/JEI-2016-Benchmark.pdf}, isbn = {1017-9909}, year = {2016}, date = {2016-10-24}, journal = {Journal of Electronic Imaging}, volume = {25}, number = {5}, pages = {1-5}, keywords = {Automatic License Plate Recognition, Benchmark, Character Segmentation, DeepEyes, Featured Publication, GigaFrames, Jaccard Coefficient, Novel Dataset, Sense SegPlate}, pubstate = {published}, tppubtype = {article} } |
Vareto, Rafael Henrique; de Costa, Filipe Oliveira; Schwartz, William Robson Face Identification in Large Galleries Inproceedings Workshop on Face Processing Applications, pp. 1-4, 2016. Links | BibTeX | Tags: DeepEyes, Face Identification, Face Recognition, GigaFrames, VER+ @inproceedings{Vareto:2016:WFPA, title = {Face Identification in Large Galleries}, author = {Rafael Henrique Vareto and Filipe Oliveira de Costa and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/paper_2016_WFPA.pdf}, year = {2016}, date = {2016-10-02}, booktitle = {Workshop on Face Processing Applications}, pages = {1-4}, keywords = {DeepEyes, Face Identification, Face Recognition, GigaFrames, VER+}, pubstate = {published}, tppubtype = {inproceedings} } |
de Prates, Raphael Felipe Carvalho; Dutra, Cristianne Rodrigues Santos; Schwartz, William Robson Predominant Color Name Indexing Structure for Person Re-Identification Inproceedings IEEE International Conference on Image Processing (ICIP), 2016. Resumo | Links | BibTeX | Tags: GigaFrames, Person Re-Identification, VER+ @inproceedings{Prates2016ICIP, title = {Predominant Color Name Indexing Structure for Person Re-Identification}, author = {Raphael Felipe Carvalho de Prates and Cristianne Rodrigues Santos Dutra and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/paper_2016_ICIP_Prates.pdf}, year = {2016}, date = {2016-09-25}, booktitle = {IEEE International Conference on Image Processing (ICIP)}, abstract = {The automation of surveillance systems is important to allow real-time analysis of critical events, crime investigation and prevention. A crucial step in the surveillance systems is the person re-identification (Re-ID) which aims at maintaining the identity of agents in non-overlapping camera networks. Most of the works in literature compare a test sample against the entire gallery, restricting the scalability. We address this problem employing multiple indexing lists obtained by color name descriptors extracted from partbased models using our proposed Predominant Color Name (PCN) indexing structure. PCN is a flexible indexing structure that relates features to gallery images without the need of labelled training images and can be integrated with existing supervised and unsupervised person Re-ID frameworks. Experimental results demonstrate that the proposed approach outperforms indexation based on unsupervised clustering methods such as k-means and c-means. Furthermore, PCN reduces the computational efforts with a minimum performance degradation. For instance, when indexing 50% and 75% of the gallery images, we observed a reduction in AUC curve of 0.01 and 0.08, respectively, when compared to indexing the entire gallery.}, keywords = {GigaFrames, Person Re-Identification, VER+}, pubstate = {published}, tppubtype = {inproceedings} } The automation of surveillance systems is important to allow real-time analysis of critical events, crime investigation and prevention. A crucial step in the surveillance systems is the person re-identification (Re-ID) which aims at maintaining the identity of agents in non-overlapping camera networks. Most of the works in literature compare a test sample against the entire gallery, restricting the scalability. We address this problem employing multiple indexing lists obtained by color name descriptors extracted from partbased models using our proposed Predominant Color Name (PCN) indexing structure. PCN is a flexible indexing structure that relates features to gallery images without the need of labelled training images and can be integrated with existing supervised and unsupervised person Re-ID frameworks. Experimental results demonstrate that the proposed approach outperforms indexation based on unsupervised clustering methods such as k-means and c-means. Furthermore, PCN reduces the computational efforts with a minimum performance degradation. For instance, when indexing 50% and 75% of the gallery images, we observed a reduction in AUC curve of 0.01 and 0.08, respectively, when compared to indexing the entire gallery. |
de Prates, Raphael Felipe Carvalho; Oliveira, Marina Santos; Schwartz, William Robson Kernel Partial Least Squares for Person Re-Identification Inproceedings IEEE International Conference on Advanced Video and Signal-Based Surveillance (AVSS), 2016. Resumo | Links | BibTeX | Tags: DeepEyes, Featured Publication, GigaFrames, HAR-HEALTH, Kernel Partial Least Squares, Kernel Partial Least Squares for Person Re-Identification, Person Re-Identification @inproceedings{Prates2016AVSS, title = {Kernel Partial Least Squares for Person Re-Identification}, author = {Raphael Felipe Carvalho de Prates and Marina Santos Oliveira and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/egpaper_for_DoubleBlindReview.pdf}, year = {2016}, date = {2016-09-25}, booktitle = {IEEE International Conference on Advanced Video and Signal-Based Surveillance (AVSS)}, abstract = {Person re-identification (Re-ID) keeps the same identity for a person as he moves along an area with nonoverlapping surveillance cameras. Re-ID is a challenging task due to appearance changes caused by different camera viewpoints, occlusion and illumination conditions. While robust and discriminative descriptors are obtained combining texture, shape and color features in a high-dimensional representation, the achievement of accuracy and efficiency demands dimensionality reduction methods. At this paper, we propose variations of Kernel Partial Least Squares (KPLS) that simultaneously reduce the dimensionality and increase the discriminative power. The Cross-View KPLS (X-KPLS) and KPLS Mode A capture cross-view discriminative information and are successful for unsupervised and supervised Re-ID. Experimental results demonstrate that XKPLS presents equal or higher matching results when compared to other methods in literature at PRID450S.}, keywords = {DeepEyes, Featured Publication, GigaFrames, HAR-HEALTH, Kernel Partial Least Squares, Kernel Partial Least Squares for Person Re-Identification, Person Re-Identification}, pubstate = {published}, tppubtype = {inproceedings} } Person re-identification (Re-ID) keeps the same identity for a person as he moves along an area with nonoverlapping surveillance cameras. Re-ID is a challenging task due to appearance changes caused by different camera viewpoints, occlusion and illumination conditions. While robust and discriminative descriptors are obtained combining texture, shape and color features in a high-dimensional representation, the achievement of accuracy and efficiency demands dimensionality reduction methods. At this paper, we propose variations of Kernel Partial Least Squares (KPLS) that simultaneously reduce the dimensionality and increase the discriminative power. The Cross-View KPLS (X-KPLS) and KPLS Mode A capture cross-view discriminative information and are successful for unsupervised and supervised Re-ID. Experimental results demonstrate that XKPLS presents equal or higher matching results when compared to other methods in literature at PRID450S. |
Goncalves, Gabriel Resende License Plate Recognition based on Temporal Redundancy Masters Thesis Federal University of Minas Gerais, 2016. Resumo | Links | BibTeX | Tags: Automatic License Plate Recognition, DeepEyes, GigaFrames @mastersthesis{Goncalves:2016:MSc, title = {License Plate Recognition based on Temporal Redundancy}, author = {Gabriel Resende Goncalves}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/03/dissertation_2016_Gabriel.pdf}, year = {2016}, date = {2016-08-26}, school = {Federal University of Minas Gerais}, abstract = {Recognition of vehicle license plates is an important task applied to a myriad of real scenarios. Most approaches in the literature first detect an on-track vehicle, locate the license plate, perform a segmentation of its characters and then recognize the characters using an Optical Character Recognition (OCR) approach. However, these approaches focus on performing these tasks using only a single frame of each vehicle in the video. Therefore, such techniques might have their recognition rates reduced due to noise present in that particular frame. On the other hand, in this work we propose an approach to automatically detect the vehicle on the road and identify (locate/recognize) its license plate based on temporal redundant information instead of selecting a single frame to perform the recognition. We also propose two post-processing steps that can be employed to improve the accuracy of the system by querying a license plate database (e.g., the Department of Motor Vehicles database containing a list of all issued license plates and car models). Experimental results demonstrate that it is possible to improve the vehicle recognition rate in 15.5 percentage points (p.p.) (an increase of 23.38%) of the baseline results, using our proposal temporal redundancy approach. Furthermore, additional 7.8 p.p. are achieved using the two post-processing approaches, leading to a final recognition rate of 89.6% on a dataset with 5,200 frame images of $300$ vehicles recorded at Federal University of Minas Gerais (UFMG). In addition, this work also proposes a novel benchmark, designed specifically to evaluate character segmentation techniques, composed of a dataset of 2,000 Brazilian license plates (resulting in 14,000 alphanumeric symbols) and an evaluation protocol considering a novel evaluation measure, the Jaccard-Centroid coefficient.}, keywords = {Automatic License Plate Recognition, DeepEyes, GigaFrames}, pubstate = {published}, tppubtype = {mastersthesis} } Recognition of vehicle license plates is an important task applied to a myriad of real scenarios. Most approaches in the literature first detect an on-track vehicle, locate the license plate, perform a segmentation of its characters and then recognize the characters using an Optical Character Recognition (OCR) approach. However, these approaches focus on performing these tasks using only a single frame of each vehicle in the video. Therefore, such techniques might have their recognition rates reduced due to noise present in that particular frame. On the other hand, in this work we propose an approach to automatically detect the vehicle on the road and identify (locate/recognize) its license plate based on temporal redundant information instead of selecting a single frame to perform the recognition. We also propose two post-processing steps that can be employed to improve the accuracy of the system by querying a license plate database (e.g., the Department of Motor Vehicles database containing a list of all issued license plates and car models). Experimental results demonstrate that it is possible to improve the vehicle recognition rate in 15.5 percentage points (p.p.) (an increase of 23.38%) of the baseline results, using our proposal temporal redundancy approach. Furthermore, additional 7.8 p.p. are achieved using the two post-processing approaches, leading to a final recognition rate of 89.6% on a dataset with 5,200 frame images of $300$ vehicles recorded at Federal University of Minas Gerais (UFMG). In addition, this work also proposes a novel benchmark, designed specifically to evaluate character segmentation techniques, composed of a dataset of 2,000 Brazilian license plates (resulting in 14,000 alphanumeric symbols) and an evaluation protocol considering a novel evaluation measure, the Jaccard-Centroid coefficient. |
Jordao, Artur The Good, the Fast and the Better Pedestrian Detector Masters Thesis Federal University of Minas Gerais, 2016. Resumo | Links | BibTeX | Tags: DeepEyes, DET, GigaFrames, Pedestrian Detection, VER+ @mastersthesis{Jordao:2016:MSc, title = {The Good, the Fast and the Better Pedestrian Detector}, author = {Artur Jordao}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/03/dissertation_2016_ArturJordao.pdf}, year = {2016}, date = {2016-06-24}, school = {Federal University of Minas Gerais}, abstract = {Pedestrian detection is a well-known problem in Computer Vision, mostly because of its direct applications in surveillance, transit safety and robotics. In the past decade, several efforts have been performed to improve the detection in terms of accuracy, speed and feature enhancement. In this work, we propose and analyze techniques focusing on these points. First, we develop an accurate oblique random forest (oRF) associated with Partial Least Squares (PLS). The method utilizes the PLS to find a decision surface, at each node of a decision tree, that better splits the samples presented to it, based on some purity criterion. To measure the advantages provided by PLS on the oRF, we compare the proposed method with the oRF based on SVM. Second, we evaluate and compare filtering approaches to reduce the search space and keep only potential regions of interest to be presented to detectors, speeding up the detection process. Experimental results demonstrate that the evaluated filters are able to discard a large number of detection windows without compromising the accuracy. Finally, we propose a novel approach to extract powerful features regarding the scene. The method combines results of distinct pedestrian detectors by reinforcing the human hypothesis, whereas suppressing a significant number of false positives due to the lack of spatial consensus when multiple detectors are considered. Our proposed approach, referred to as Spatial Consensus (SC), outperforms all previously published state-of-the-art pedestrian detection methods.}, keywords = {DeepEyes, DET, GigaFrames, Pedestrian Detection, VER+}, pubstate = {published}, tppubtype = {mastersthesis} } Pedestrian detection is a well-known problem in Computer Vision, mostly because of its direct applications in surveillance, transit safety and robotics. In the past decade, several efforts have been performed to improve the detection in terms of accuracy, speed and feature enhancement. In this work, we propose and analyze techniques focusing on these points. First, we develop an accurate oblique random forest (oRF) associated with Partial Least Squares (PLS). The method utilizes the PLS to find a decision surface, at each node of a decision tree, that better splits the samples presented to it, based on some purity criterion. To measure the advantages provided by PLS on the oRF, we compare the proposed method with the oRF based on SVM. Second, we evaluate and compare filtering approaches to reduce the search space and keep only potential regions of interest to be presented to detectors, speeding up the detection process. Experimental results demonstrate that the evaluated filters are able to discard a large number of detection windows without compromising the accuracy. Finally, we propose a novel approach to extract powerful features regarding the scene. The method combines results of distinct pedestrian detectors by reinforcing the human hypothesis, whereas suppressing a significant number of false positives due to the lack of spatial consensus when multiple detectors are considered. Our proposed approach, referred to as Spatial Consensus (SC), outperforms all previously published state-of-the-art pedestrian detection methods. |
Dutra, Cristianne Rodrigues Santos Técnicas Otimizadas para Reidentificaçâo de Pessoas Masters Thesis Federal University of Minas Gerais, 2016. Links | BibTeX | Tags: DeepEyes, GigaFrames, Person Re-Identification, VER+ @mastersthesis{Dutra:2016:MSc, title = {Técnicas Otimizadas para Reidentificaçâo de Pessoas}, author = {Cristianne Rodrigues Santos Dutra}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/thesis_2016_Cristianne.pdf}, year = {2016}, date = {2016-01-01}, school = {Federal University of Minas Gerais}, keywords = {DeepEyes, GigaFrames, Person Re-Identification, VER+}, pubstate = {published}, tppubtype = {mastersthesis} } |
2015 |
dos Junior, Cassio Elias Santos Partial Least Squares for Face Hashing Masters Thesis Federal University of Minas Gerais, 2015. Resumo | Links | BibTeX | Tags: DeepEyes, Face Identification, Face Recognition, GigaFrames, Indexing Structure, Local Sensitive Hashing, Partial Least Squares, VER+ @mastersthesis{Santos:2015:MSc, title = {Partial Least Squares for Face Hashing}, author = {Cassio Elias Santos dos Junior}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/dissertation_2015_Cassio.pdf}, year = {2015}, date = {2015-08-24}, school = {Federal University of Minas Gerais}, abstract = {Face identification is an important research topic due to areas such as its application to surveillance, forensics and human-computer interaction. In the past few years, a myriad of methods for face identification has been proposed in the literature, with just a few among them focusing on scalability. In this work, we propose a simple but efficient approach for scalable face identification based on partial least squares (PLS) and random independent hash functions inspired by locality-sensitive hashing (LSH), resulting in the PLS for hashing (PLSH) approach. The original PLSH approach is further extended using feature selection to reduce the computational cost to evaluate the PLS-based hash functions, resulting in the state-of-the-art extended PLSH approach (ePLSH). The proposed approach is evaluated in the dataset FERET and in the dataset FRGCv1. The results show significant reduction in the number of subjects evaluated in the face identification (reduced to 0.3% of the gallery), providing averaged speedups up to 233 times compared to evaluating all subjects in the face gallery and 58 times compared to previous works in the literature.}, keywords = {DeepEyes, Face Identification, Face Recognition, GigaFrames, Indexing Structure, Local Sensitive Hashing, Partial Least Squares, VER+}, pubstate = {published}, tppubtype = {mastersthesis} } Face identification is an important research topic due to areas such as its application to surveillance, forensics and human-computer interaction. In the past few years, a myriad of methods for face identification has been proposed in the literature, with just a few among them focusing on scalability. In this work, we propose a simple but efficient approach for scalable face identification based on partial least squares (PLS) and random independent hash functions inspired by locality-sensitive hashing (LSH), resulting in the PLS for hashing (PLSH) approach. The original PLSH approach is further extended using feature selection to reduce the computational cost to evaluate the PLS-based hash functions, resulting in the state-of-the-art extended PLSH approach (ePLSH). The proposed approach is evaluated in the dataset FERET and in the dataset FRGCv1. The results show significant reduction in the number of subjects evaluated in the face identification (reduced to 0.3% of the gallery), providing averaged speedups up to 233 times compared to evaluating all subjects in the face gallery and 58 times compared to previous works in the literature. |
dos Junior, Cassio Elias Santos; Kijak, E; Gravier, G; Schwartz, William Robson Learning to Hash Faces Using Large Feature Vectors Inproceedings Content-Based Multimedia Indexing (CBMI), 13th International Workshop on, pp. 1–6, IEEE, 2015. Links | BibTeX | Tags: Face Identification, Face Recognition, GigaFrames, Indexing Structure, Locality Sensitive Hashing, Partial Least Squares, SmartView, VER+ @inproceedings{santos2015learning, title = {Learning to Hash Faces Using Large Feature Vectors}, author = {Cassio Elias Santos dos Junior and E Kijak and G Gravier and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/2015-Learning_to_Hash_Faces_Using_Large_Feature_Vectors.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {Content-Based Multimedia Indexing (CBMI), 13th International Workshop on}, pages = {1--6}, publisher = {IEEE}, keywords = {Face Identification, Face Recognition, GigaFrames, Indexing Structure, Locality Sensitive Hashing, Partial Least Squares, SmartView, VER+}, pubstate = {published}, tppubtype = {inproceedings} } |
Pinto, A; Pedrini, H; Schwartz, William Robson; A, Rocha Face Spoofing Detection Through Visual Codebooks of Spectral Temporal Cubes Journal Article Image Processing, IEEE Transactions on, 24 (12), pp. 4726-4740, 2015, ISSN: 1057-7149. Links | BibTeX | Tags: DET, GigaFrames, Spoofing Detection @article{TIP:2015:Pinto, title = {Face Spoofing Detection Through Visual Codebooks of Spectral Temporal Cubes}, author = {A Pinto and H Pedrini and William Robson Schwartz and Rocha A}, url = {http://dx.doi.org/10.1109/TIP.2015.2466088}, issn = {1057-7149}, year = {2015}, date = {2015-01-01}, journal = {Image Processing, IEEE Transactions on}, volume = {24}, number = {12}, pages = {4726-4740}, keywords = {DET, GigaFrames, Spoofing Detection}, pubstate = {published}, tppubtype = {article} } |
Pessoa, Ramon F; Schwartz, William Robson; dos Santos, Jefersson A A Study on Low-Cost Representations for Image Feature Extraction on Mobile Devices Inproceedings 14th Iberoamerican Congress on Pattern Recognition (CIARP), pp. 1-8, 2015. Links | BibTeX | Tags: DET, Feature Extraction, GigaFrames @inproceedings{Pessoa:2015:CIARP, title = {A Study on Low-Cost Representations for Image Feature Extraction on Mobile Devices}, author = {Ramon F Pessoa and William Robson Schwartz and Jefersson A dos Santos}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/paper_2015_CIARP_Pessoa.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {14th Iberoamerican Congress on Pattern Recognition (CIARP)}, pages = {1-8}, keywords = {DET, Feature Extraction, GigaFrames}, pubstate = {published}, tppubtype = {inproceedings} } |
de Prates, Raphael Felipe Carvalho; Schwartz, William Robson CBRA: Color-Based Ranking Aggregation for Person Re-Identification Inproceedings IEEE International Conference on Image Processing (ICIP), pp. 1-5, 2015. Links | BibTeX | Tags: CBRA, GigaFrames, Person Re-Identification, Ranking Aggregation, SmartView, VER+ @inproceedings{Prates:2015:ICB, title = {CBRA: Color-Based Ranking Aggregation for Person Re-Identification}, author = {Raphael Felipe Carvalho de Prates and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/paper_2015_ICIP_Prates.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {IEEE International Conference on Image Processing (ICIP)}, pages = {1-5}, keywords = {CBRA, GigaFrames, Person Re-Identification, Ranking Aggregation, SmartView, VER+}, pubstate = {published}, tppubtype = {inproceedings} } |
2018 |
Gabriel Resende Gonçalves; Matheus Alves Diniz; Rayson Laroca; David Menotti; William Robson Schwartz Real-time Automatic License Plate Recognition Through Deep Multi-Task Networks Inproceedings Conference on Graphic, Patterns and Images (SIBGRAPI), pp. 1-8, 2018. Links | BibTeX | Tags: Automatic License Plate Recognition, Deep Learning, DeepEyes, GigaFrames, Multi-Task Learning, Sense-ALPR @inproceedings{Goncalves:2018:SIBGRAPI, title = {Real-time Automatic License Plate Recognition Through Deep Multi-Task Networks}, author = {Gabriel Resende Gonçalves and Matheus Alves Diniz and Rayson Laroca and David Menotti and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/paper.pdf}, year = {2018}, date = {2018-09-04}, booktitle = {Conference on Graphic, Patterns and Images (SIBGRAPI)}, pages = {1-8}, keywords = {Automatic License Plate Recognition, Deep Learning, DeepEyes, GigaFrames, Multi-Task Learning, Sense-ALPR}, pubstate = {published}, tppubtype = {inproceedings} } |
Carlos Antonio Caetano Junior; Jefersson A dos Santos; William Robson Schwartz Statistical Measures from Co-occurrence of Codewords for Action Recognition Inproceedings VISAPP 2018 - International Conference on Computer Vision Theory and Applications, pp. 1-8, 2018. Links | BibTeX | Tags: Action Recognition, Activity Recognition, DeepEyes, GigaFrames, Spatiotemporal Features @inproceedings{Caetano:2018:VISAPP, title = {Statistical Measures from Co-occurrence of Codewords for Action Recognition}, author = {Carlos Antonio Caetano Junior and Jefersson A dos Santos and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/VISAPP_2018_CarlosCaetano.pdf}, year = {2018}, date = {2018-01-27}, booktitle = {VISAPP 2018 - International Conference on Computer Vision Theory and Applications}, pages = {1-8}, keywords = {Action Recognition, Activity Recognition, DeepEyes, GigaFrames, Spatiotemporal Features}, pubstate = {published}, tppubtype = {inproceedings} } |
Ricardo Barbosa Kloss; Artur Jordao; William Robson Schwartz Face Verification Strategies for Employing Deep Models Inproceedings 13th IEEE International Conference on Automatic Face & Gesture Recognition, pp. 258-262, 2018. Links | BibTeX | Tags: Artificial Neural Networks, Face Verification, GigaFrames, Metric Learning, Transfer Learning @inproceedings{Kloss:2018:FG, title = {Face Verification Strategies for Employing Deep Models}, author = {Ricardo Barbosa Kloss and Artur Jordao and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/Face-Verification-Strategies-for-Employing-Deep-Models.pdf}, year = {2018}, date = {2018-01-01}, booktitle = {13th IEEE International Conference on Automatic Face & Gesture Recognition}, pages = {258-262}, keywords = {Artificial Neural Networks, Face Verification, GigaFrames, Metric Learning, Transfer Learning}, pubstate = {published}, tppubtype = {inproceedings} } |
Artur Jordao; Ricardo Barbosa Kloss; William Robson Schwartz Latent hypernet: Exploring all Layers from Convolutional Neural Networks Inproceedings IEEE International Joint Conference on Neural Networks (IJCNN), pp. 1-7, 2018. Links | BibTeX | Tags: Activity Recognition Based on Wearable Sensors, DeepEyes, GigaFrames, Partial Least Squares, Wearable Sensors @inproceedings{Jordao:2018b:IJCNN, title = {Latent hypernet: Exploring all Layers from Convolutional Neural Networks}, author = {Artur Jordao and Ricardo Barbosa Kloss and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/Latent-HyperNet-Exploring-the-Layers.pdf}, year = {2018}, date = {2018-01-01}, booktitle = {IEEE International Joint Conference on Neural Networks (IJCNN)}, pages = {1-7}, keywords = {Activity Recognition Based on Wearable Sensors, DeepEyes, GigaFrames, Partial Least Squares, Wearable Sensors}, pubstate = {published}, tppubtype = {inproceedings} } |
Rensso Victor Hugo Mora Colque; Carlos Antonio Caetano Junior; Victor Hugo Cunha de Melo; Guillermo Camara Chavez; William Robson Schwartz Novel Anomalous Event Detection based on Human-object Interactions Inproceedings VISAPP 2018 - International Conference on Computer Vision Theory and Applications, pp. 1-8, 2018. Links | BibTeX | Tags: Anomalous Event Detection, Contextual Information, DeepEyes, GigaFrames, Human-Object Interaction @inproceedings{Colque:2018:VISAPP, title = {Novel Anomalous Event Detection based on Human-object Interactions}, author = {Rensso Victor Hugo Mora Colque and Carlos Antonio Caetano Junior and Victor Hugo Cunha de Melo and Guillermo Camara Chavez and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/VISAPP_2018_92.pdf}, year = {2018}, date = {2018-01-01}, booktitle = {VISAPP 2018 - International Conference on Computer Vision Theory and Applications}, pages = {1-8}, keywords = {Anomalous Event Detection, Contextual Information, DeepEyes, GigaFrames, Human-Object Interaction}, pubstate = {published}, tppubtype = {inproceedings} } |
Igor Leonardo Oliveira Bastos; Victor Hugo Cunha de Melo; Gabriel Resende Gonçalves; William Robson Schwartz MORA: A Generative Approach to Extract Spatiotemporal Information Applied to Gesture Recognition Inproceedings 15th International Conference on Advanced Video and Signal-based Surveillance (AVSS), pp. 1-6, 2018. Links | BibTeX | Tags: Autoencoders, DeepEyes, Gesture Recognition, GigaFrames, Recurrent Models @inproceedings{Bastos:2018:AVSS, title = {MORA: A Generative Approach to Extract Spatiotemporal Information Applied to Gesture Recognition}, author = {Igor Leonardo Oliveira Bastos and Victor Hugo Cunha de Melo and Gabriel Resende Gonçalves and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/MORA_.pdf}, year = {2018}, date = {2018-01-01}, booktitle = {15th International Conference on Advanced Video and Signal-based Surveillance (AVSS)}, pages = {1-6}, keywords = {Autoencoders, DeepEyes, Gesture Recognition, GigaFrames, Recurrent Models}, pubstate = {published}, tppubtype = {inproceedings} } |
Ricardo Barbosa Kloss; Artur Jordao; William Robson Schwartz Boosted Projection An Ensemble of Transformation Models Inproceedings 22nd Iberoamerican Congress on Pattern Recognition (CIARP), pp. 331-338, 2018. Links | BibTeX | Tags: Computer vision, DeepEyes, Dimensionality Reduction, Ensemble Partial Least Squares, GigaFrames, Machine Learning @inproceedings{Kloss:2018:CIARP, title = {Boosted Projection An Ensemble of Transformation Models}, author = {Ricardo Barbosa Kloss and Artur Jordao and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/Boosted-Projection-An-Ensemble-of-Transformation-Models.pdf}, year = {2018}, date = {2018-01-01}, booktitle = {22nd Iberoamerican Congress on Pattern Recognition (CIARP)}, pages = {331-338}, keywords = {Computer vision, DeepEyes, Dimensionality Reduction, Ensemble Partial Least Squares, GigaFrames, Machine Learning}, pubstate = {published}, tppubtype = {inproceedings} } |
Renan Oliveira Reis; Igor Henrique Dias; William Robson Schwartz Neural network control for active cameras using master-slave setup Inproceedings International Conference on Advanced Video and Signal-based Surveillance (AVSS), pp. 1-6, 2018. Links | BibTeX | Tags: Active Camera, DeepEyes, GigaFrames, Neural network control for active cameras using master-slave setup, SMS @inproceedings{Reis:2018:AVSS, title = {Neural network control for active cameras using master-slave setup}, author = {Renan Oliveira Reis and Igor Henrique Dias and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/renan_avss_2018-1-1.pdf}, year = {2018}, date = {2018-01-01}, booktitle = {International Conference on Advanced Video and Signal-based Surveillance (AVSS)}, pages = {1-6}, keywords = {Active Camera, DeepEyes, GigaFrames, Neural network control for active cameras using master-slave setup, SMS}, pubstate = {published}, tppubtype = {inproceedings} } |
Antonio Carlos Nazare Junior; Filipe Oliveira de Costa; William Robson Schwartz Content-Based Multi-Camera Video Alignment using Accelerometer Data Inproceedings Advanced Video and Signal Based Surveillance (AVSS), 2018 15th IEEE International Conference on, pp. 1-6, 2018. Links | BibTeX | Tags: Camera Synchronization, DeepEyes, GigaFrames, SensorCap, Sensors, SMS @inproceedings{Nazare:2018:AVSS, title = {Content-Based Multi-Camera Video Alignment using Accelerometer Data}, author = {Antonio Carlos Nazare Junior and Filipe Oliveira de Costa and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/2018_avss_svsync_camera_ready.pdf}, year = {2018}, date = {2018-01-01}, booktitle = {Advanced Video and Signal Based Surveillance (AVSS), 2018 15th IEEE International Conference on}, pages = {1-6}, keywords = {Camera Synchronization, DeepEyes, GigaFrames, SensorCap, Sensors, SMS}, pubstate = {published}, tppubtype = {inproceedings} } |
Artur Jordao; Ricardo Kloss; Fernando Yamada; William Robson Schwartz Pruning Deep Neural Networks using Partial Least Squares Journal Article ArXiv e-prints, 2018. Links | BibTeX | Tags: DeepEyes, GigaFrames, Neural Networks Optimization @article{Jordao:2018:arXivb, title = {Pruning Deep Neural Networks using Partial Least Squares}, author = {Artur Jordao and Ricardo Kloss and Fernando Yamada and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/03/1810.07610.pdf}, year = {2018}, date = {2018-01-01}, journal = {ArXiv e-prints}, keywords = {DeepEyes, GigaFrames, Neural Networks Optimization}, pubstate = {published}, tppubtype = {article} } |
2017 |
Igor Leonardo Oliveira Bastos; Larissa Rocha Soares; William Robson Schwartz Pyramidal Zernike Over Time: A spatiotemporal feature descriptor based on Zernike Moments Inproceedings Iberoamerican Congress on Pattern Recognition (CIARP 2017), pp. 77-85, 2017. Links | BibTeX | Tags: Activity Recognition, DeepEyes, Feature Extraction, GigaFrames, Zernike Moments @inproceedings{Bastos:2017:CIARP, title = {Pyramidal Zernike Over Time: A spatiotemporal feature descriptor based on Zernike Moments}, author = {Igor Leonardo Oliveira Bastos and Larissa Rocha Soares and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/PZOT_camera_ready.pdf}, year = {2017}, date = {2017-11-07}, booktitle = {Iberoamerican Congress on Pattern Recognition (CIARP 2017)}, pages = {77-85}, keywords = {Activity Recognition, DeepEyes, Feature Extraction, GigaFrames, Zernike Moments}, pubstate = {published}, tppubtype = {inproceedings} } |
Rensso Victor Hugo Mora Colque; Carlos Antonio Caetano Junior; Matheus Toledo Lustosa de Andrade; William Robson Schwartz Histograms of Optical Flow Orientation and Magnitude and Entropy to Detect Anomalous Events in Videos Journal Article IEEE Transactions on Circuits and Systems for Video Technology, 27 (3), pp. 673-682, 2017. Links | BibTeX | Tags: Anomalous Event Detection, DeepEyes, Feature Extraction, GigaFrames @article{Colque:2016:TCSVT, title = {Histograms of Optical Flow Orientation and Magnitude and Entropy to Detect Anomalous Events in Videos}, author = {Rensso Victor Hugo Mora Colque and Carlos Antonio Caetano Junior and Matheus Toledo Lustosa de Andrade and William Robson Schwartz}, url = {http://dx.doi.org/10.1109/TCSVT.2016.2637778}, year = {2017}, date = {2017-01-01}, journal = {IEEE Transactions on Circuits and Systems for Video Technology}, volume = {27}, number = {3}, pages = {673-682}, keywords = {Anomalous Event Detection, DeepEyes, Feature Extraction, GigaFrames}, pubstate = {published}, tppubtype = {article} } |
Carlos Antonio Caetano Junior; Victor Hugo Cunha de Melo; Jefersson Alex dos Santos; William Robson Schwartz Activity Recognition based on a Magnitude-Orientation Stream Network Inproceedings Conference on Graphics, Patterns and Images (SIBGRAPI), pp. 1-8, 2017. Links | BibTeX | Tags: Activity Recognition, Deep Learning, GigaFrames @inproceedings{Caetano:2017:SIBGRAPI, title = {Activity Recognition based on a Magnitude-Orientation Stream Network}, author = {Carlos Antonio Caetano Junior and Victor Hugo Cunha de Melo and Jefersson Alex dos Santos and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/paper_2017_SIBGRAPI_Caetano.pdf}, year = {2017}, date = {2017-01-01}, booktitle = {Conference on Graphics, Patterns and Images (SIBGRAPI)}, pages = {1-8}, keywords = {Activity Recognition, Deep Learning, GigaFrames}, pubstate = {published}, tppubtype = {inproceedings} } |
2016 |
Artur Jordao; Jessica Sena; William Robson Schwartz A Late Fusion Approach to Combine Multiple Pedestrian Detectors Inproceedings IAPR International Conference on Pattern Recognition (ICPR), pp. 1-6, 2016. Links | BibTeX | Tags: DeepEyes, Featured Publication, GigaFrames, Pedestrian Detection @inproceedings{Correia:2016:ICPR, title = {A Late Fusion Approach to Combine Multiple Pedestrian Detectors}, author = {Artur Jordao and Jessica Sena and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/A-Late-Fusion-Approach-to-Combine-Multiple.pdf}, year = {2016}, date = {2016-12-13}, booktitle = {IAPR International Conference on Pattern Recognition (ICPR)}, pages = {1-6}, keywords = {DeepEyes, Featured Publication, GigaFrames, Pedestrian Detection}, pubstate = {published}, tppubtype = {inproceedings} } |
Gabriel Resende Gonçalves; David Menotti; William Robson Schwartz License Plate Recognition based on Temporal Redundancy Inproceedings IEEE International Conference on Intelligent Transportation Systems (ITSC), pp. 1-5, 2016. Links | BibTeX | Tags: Automatic License Plate Recognition, DeepEyes, GigaFrames @inproceedings{Goncalves:2016:ITSC, title = {License Plate Recognition based on Temporal Redundancy}, author = {Gabriel Resende Gonçalves and David Menotti and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/paper_2016_ITSC.pdf}, year = {2016}, date = {2016-11-04}, booktitle = {IEEE International Conference on Intelligent Transportation Systems (ITSC)}, pages = {1-5}, keywords = {Automatic License Plate Recognition, DeepEyes, GigaFrames}, pubstate = {published}, tppubtype = {inproceedings} } |
Gabriel Resende Gonçalves; Sirlene Pio Gomes da Silva; David Menotti; William Robson Schwartz Benchmark for License Plate Character Segmentation Journal Article Journal of Electronic Imaging, 25 (5), pp. 1-5, 2016, ISBN: 1017-9909. Links | BibTeX | Tags: Automatic License Plate Recognition, Benchmark, Character Segmentation, DeepEyes, Featured Publication, GigaFrames, Jaccard Coefficient, Novel Dataset, Sense SegPlate @article{2016:JEI:Gabriel, title = {Benchmark for License Plate Character Segmentation}, author = {Gabriel Resende Gonçalves and Sirlene Pio Gomes da Silva and David Menotti and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/JEI-2016-Benchmark.pdf}, isbn = {1017-9909}, year = {2016}, date = {2016-10-24}, journal = {Journal of Electronic Imaging}, volume = {25}, number = {5}, pages = {1-5}, keywords = {Automatic License Plate Recognition, Benchmark, Character Segmentation, DeepEyes, Featured Publication, GigaFrames, Jaccard Coefficient, Novel Dataset, Sense SegPlate}, pubstate = {published}, tppubtype = {article} } |
Rafael Henrique Vareto; Filipe Oliveira de Costa; William Robson Schwartz Face Identification in Large Galleries Inproceedings Workshop on Face Processing Applications, pp. 1-4, 2016. Links | BibTeX | Tags: DeepEyes, Face Identification, Face Recognition, GigaFrames, VER+ @inproceedings{Vareto:2016:WFPA, title = {Face Identification in Large Galleries}, author = {Rafael Henrique Vareto and Filipe Oliveira de Costa and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/paper_2016_WFPA.pdf}, year = {2016}, date = {2016-10-02}, booktitle = {Workshop on Face Processing Applications}, pages = {1-4}, keywords = {DeepEyes, Face Identification, Face Recognition, GigaFrames, VER+}, pubstate = {published}, tppubtype = {inproceedings} } |
Raphael Felipe Carvalho de Prates; Cristianne Rodrigues Santos Dutra; William Robson Schwartz Predominant Color Name Indexing Structure for Person Re-Identification Inproceedings IEEE International Conference on Image Processing (ICIP), 2016. Resumo | Links | BibTeX | Tags: GigaFrames, Person Re-Identification, VER+ @inproceedings{Prates2016ICIP, title = {Predominant Color Name Indexing Structure for Person Re-Identification}, author = {Raphael Felipe Carvalho de Prates and Cristianne Rodrigues Santos Dutra and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/paper_2016_ICIP_Prates.pdf}, year = {2016}, date = {2016-09-25}, booktitle = {IEEE International Conference on Image Processing (ICIP)}, abstract = {The automation of surveillance systems is important to allow real-time analysis of critical events, crime investigation and prevention. A crucial step in the surveillance systems is the person re-identification (Re-ID) which aims at maintaining the identity of agents in non-overlapping camera networks. Most of the works in literature compare a test sample against the entire gallery, restricting the scalability. We address this problem employing multiple indexing lists obtained by color name descriptors extracted from partbased models using our proposed Predominant Color Name (PCN) indexing structure. PCN is a flexible indexing structure that relates features to gallery images without the need of labelled training images and can be integrated with existing supervised and unsupervised person Re-ID frameworks. Experimental results demonstrate that the proposed approach outperforms indexation based on unsupervised clustering methods such as k-means and c-means. Furthermore, PCN reduces the computational efforts with a minimum performance degradation. For instance, when indexing 50% and 75% of the gallery images, we observed a reduction in AUC curve of 0.01 and 0.08, respectively, when compared to indexing the entire gallery.}, keywords = {GigaFrames, Person Re-Identification, VER+}, pubstate = {published}, tppubtype = {inproceedings} } The automation of surveillance systems is important to allow real-time analysis of critical events, crime investigation and prevention. A crucial step in the surveillance systems is the person re-identification (Re-ID) which aims at maintaining the identity of agents in non-overlapping camera networks. Most of the works in literature compare a test sample against the entire gallery, restricting the scalability. We address this problem employing multiple indexing lists obtained by color name descriptors extracted from partbased models using our proposed Predominant Color Name (PCN) indexing structure. PCN is a flexible indexing structure that relates features to gallery images without the need of labelled training images and can be integrated with existing supervised and unsupervised person Re-ID frameworks. Experimental results demonstrate that the proposed approach outperforms indexation based on unsupervised clustering methods such as k-means and c-means. Furthermore, PCN reduces the computational efforts with a minimum performance degradation. For instance, when indexing 50% and 75% of the gallery images, we observed a reduction in AUC curve of 0.01 and 0.08, respectively, when compared to indexing the entire gallery. |
Raphael Felipe Carvalho de Prates; Marina Santos Oliveira; William Robson Schwartz Kernel Partial Least Squares for Person Re-Identification Inproceedings IEEE International Conference on Advanced Video and Signal-Based Surveillance (AVSS), 2016. Resumo | Links | BibTeX | Tags: DeepEyes, Featured Publication, GigaFrames, HAR-HEALTH, Kernel Partial Least Squares, Kernel Partial Least Squares for Person Re-Identification, Person Re-Identification @inproceedings{Prates2016AVSS, title = {Kernel Partial Least Squares for Person Re-Identification}, author = {Raphael Felipe Carvalho de Prates and Marina Santos Oliveira and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/egpaper_for_DoubleBlindReview.pdf}, year = {2016}, date = {2016-09-25}, booktitle = {IEEE International Conference on Advanced Video and Signal-Based Surveillance (AVSS)}, abstract = {Person re-identification (Re-ID) keeps the same identity for a person as he moves along an area with nonoverlapping surveillance cameras. Re-ID is a challenging task due to appearance changes caused by different camera viewpoints, occlusion and illumination conditions. While robust and discriminative descriptors are obtained combining texture, shape and color features in a high-dimensional representation, the achievement of accuracy and efficiency demands dimensionality reduction methods. At this paper, we propose variations of Kernel Partial Least Squares (KPLS) that simultaneously reduce the dimensionality and increase the discriminative power. The Cross-View KPLS (X-KPLS) and KPLS Mode A capture cross-view discriminative information and are successful for unsupervised and supervised Re-ID. Experimental results demonstrate that XKPLS presents equal or higher matching results when compared to other methods in literature at PRID450S.}, keywords = {DeepEyes, Featured Publication, GigaFrames, HAR-HEALTH, Kernel Partial Least Squares, Kernel Partial Least Squares for Person Re-Identification, Person Re-Identification}, pubstate = {published}, tppubtype = {inproceedings} } Person re-identification (Re-ID) keeps the same identity for a person as he moves along an area with nonoverlapping surveillance cameras. Re-ID is a challenging task due to appearance changes caused by different camera viewpoints, occlusion and illumination conditions. While robust and discriminative descriptors are obtained combining texture, shape and color features in a high-dimensional representation, the achievement of accuracy and efficiency demands dimensionality reduction methods. At this paper, we propose variations of Kernel Partial Least Squares (KPLS) that simultaneously reduce the dimensionality and increase the discriminative power. The Cross-View KPLS (X-KPLS) and KPLS Mode A capture cross-view discriminative information and are successful for unsupervised and supervised Re-ID. Experimental results demonstrate that XKPLS presents equal or higher matching results when compared to other methods in literature at PRID450S. |
Gabriel Resende Goncalves License Plate Recognition based on Temporal Redundancy Masters Thesis Federal University of Minas Gerais, 2016. Resumo | Links | BibTeX | Tags: Automatic License Plate Recognition, DeepEyes, GigaFrames @mastersthesis{Goncalves:2016:MSc, title = {License Plate Recognition based on Temporal Redundancy}, author = {Gabriel Resende Goncalves}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/03/dissertation_2016_Gabriel.pdf}, year = {2016}, date = {2016-08-26}, school = {Federal University of Minas Gerais}, abstract = {Recognition of vehicle license plates is an important task applied to a myriad of real scenarios. Most approaches in the literature first detect an on-track vehicle, locate the license plate, perform a segmentation of its characters and then recognize the characters using an Optical Character Recognition (OCR) approach. However, these approaches focus on performing these tasks using only a single frame of each vehicle in the video. Therefore, such techniques might have their recognition rates reduced due to noise present in that particular frame. On the other hand, in this work we propose an approach to automatically detect the vehicle on the road and identify (locate/recognize) its license plate based on temporal redundant information instead of selecting a single frame to perform the recognition. We also propose two post-processing steps that can be employed to improve the accuracy of the system by querying a license plate database (e.g., the Department of Motor Vehicles database containing a list of all issued license plates and car models). Experimental results demonstrate that it is possible to improve the vehicle recognition rate in 15.5 percentage points (p.p.) (an increase of 23.38%) of the baseline results, using our proposal temporal redundancy approach. Furthermore, additional 7.8 p.p. are achieved using the two post-processing approaches, leading to a final recognition rate of 89.6% on a dataset with 5,200 frame images of $300$ vehicles recorded at Federal University of Minas Gerais (UFMG). In addition, this work also proposes a novel benchmark, designed specifically to evaluate character segmentation techniques, composed of a dataset of 2,000 Brazilian license plates (resulting in 14,000 alphanumeric symbols) and an evaluation protocol considering a novel evaluation measure, the Jaccard-Centroid coefficient.}, keywords = {Automatic License Plate Recognition, DeepEyes, GigaFrames}, pubstate = {published}, tppubtype = {mastersthesis} } Recognition of vehicle license plates is an important task applied to a myriad of real scenarios. Most approaches in the literature first detect an on-track vehicle, locate the license plate, perform a segmentation of its characters and then recognize the characters using an Optical Character Recognition (OCR) approach. However, these approaches focus on performing these tasks using only a single frame of each vehicle in the video. Therefore, such techniques might have their recognition rates reduced due to noise present in that particular frame. On the other hand, in this work we propose an approach to automatically detect the vehicle on the road and identify (locate/recognize) its license plate based on temporal redundant information instead of selecting a single frame to perform the recognition. We also propose two post-processing steps that can be employed to improve the accuracy of the system by querying a license plate database (e.g., the Department of Motor Vehicles database containing a list of all issued license plates and car models). Experimental results demonstrate that it is possible to improve the vehicle recognition rate in 15.5 percentage points (p.p.) (an increase of 23.38%) of the baseline results, using our proposal temporal redundancy approach. Furthermore, additional 7.8 p.p. are achieved using the two post-processing approaches, leading to a final recognition rate of 89.6% on a dataset with 5,200 frame images of $300$ vehicles recorded at Federal University of Minas Gerais (UFMG). In addition, this work also proposes a novel benchmark, designed specifically to evaluate character segmentation techniques, composed of a dataset of 2,000 Brazilian license plates (resulting in 14,000 alphanumeric symbols) and an evaluation protocol considering a novel evaluation measure, the Jaccard-Centroid coefficient. |
Artur Jordao The Good, the Fast and the Better Pedestrian Detector Masters Thesis Federal University of Minas Gerais, 2016. Resumo | Links | BibTeX | Tags: DeepEyes, DET, GigaFrames, Pedestrian Detection, VER+ @mastersthesis{Jordao:2016:MSc, title = {The Good, the Fast and the Better Pedestrian Detector}, author = {Artur Jordao}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/03/dissertation_2016_ArturJordao.pdf}, year = {2016}, date = {2016-06-24}, school = {Federal University of Minas Gerais}, abstract = {Pedestrian detection is a well-known problem in Computer Vision, mostly because of its direct applications in surveillance, transit safety and robotics. In the past decade, several efforts have been performed to improve the detection in terms of accuracy, speed and feature enhancement. In this work, we propose and analyze techniques focusing on these points. First, we develop an accurate oblique random forest (oRF) associated with Partial Least Squares (PLS). The method utilizes the PLS to find a decision surface, at each node of a decision tree, that better splits the samples presented to it, based on some purity criterion. To measure the advantages provided by PLS on the oRF, we compare the proposed method with the oRF based on SVM. Second, we evaluate and compare filtering approaches to reduce the search space and keep only potential regions of interest to be presented to detectors, speeding up the detection process. Experimental results demonstrate that the evaluated filters are able to discard a large number of detection windows without compromising the accuracy. Finally, we propose a novel approach to extract powerful features regarding the scene. The method combines results of distinct pedestrian detectors by reinforcing the human hypothesis, whereas suppressing a significant number of false positives due to the lack of spatial consensus when multiple detectors are considered. Our proposed approach, referred to as Spatial Consensus (SC), outperforms all previously published state-of-the-art pedestrian detection methods.}, keywords = {DeepEyes, DET, GigaFrames, Pedestrian Detection, VER+}, pubstate = {published}, tppubtype = {mastersthesis} } Pedestrian detection is a well-known problem in Computer Vision, mostly because of its direct applications in surveillance, transit safety and robotics. In the past decade, several efforts have been performed to improve the detection in terms of accuracy, speed and feature enhancement. In this work, we propose and analyze techniques focusing on these points. First, we develop an accurate oblique random forest (oRF) associated with Partial Least Squares (PLS). The method utilizes the PLS to find a decision surface, at each node of a decision tree, that better splits the samples presented to it, based on some purity criterion. To measure the advantages provided by PLS on the oRF, we compare the proposed method with the oRF based on SVM. Second, we evaluate and compare filtering approaches to reduce the search space and keep only potential regions of interest to be presented to detectors, speeding up the detection process. Experimental results demonstrate that the evaluated filters are able to discard a large number of detection windows without compromising the accuracy. Finally, we propose a novel approach to extract powerful features regarding the scene. The method combines results of distinct pedestrian detectors by reinforcing the human hypothesis, whereas suppressing a significant number of false positives due to the lack of spatial consensus when multiple detectors are considered. Our proposed approach, referred to as Spatial Consensus (SC), outperforms all previously published state-of-the-art pedestrian detection methods. |
Cristianne Rodrigues Santos Dutra Técnicas Otimizadas para Reidentificaçâo de Pessoas Masters Thesis Federal University of Minas Gerais, 2016. Links | BibTeX | Tags: DeepEyes, GigaFrames, Person Re-Identification, VER+ @mastersthesis{Dutra:2016:MSc, title = {Técnicas Otimizadas para Reidentificaçâo de Pessoas}, author = {Cristianne Rodrigues Santos Dutra}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/thesis_2016_Cristianne.pdf}, year = {2016}, date = {2016-01-01}, school = {Federal University of Minas Gerais}, keywords = {DeepEyes, GigaFrames, Person Re-Identification, VER+}, pubstate = {published}, tppubtype = {mastersthesis} } |
2015 |
Cassio Elias Santos dos Junior Partial Least Squares for Face Hashing Masters Thesis Federal University of Minas Gerais, 2015. Resumo | Links | BibTeX | Tags: DeepEyes, Face Identification, Face Recognition, GigaFrames, Indexing Structure, Local Sensitive Hashing, Partial Least Squares, VER+ @mastersthesis{Santos:2015:MSc, title = {Partial Least Squares for Face Hashing}, author = {Cassio Elias Santos dos Junior}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/dissertation_2015_Cassio.pdf}, year = {2015}, date = {2015-08-24}, school = {Federal University of Minas Gerais}, abstract = {Face identification is an important research topic due to areas such as its application to surveillance, forensics and human-computer interaction. In the past few years, a myriad of methods for face identification has been proposed in the literature, with just a few among them focusing on scalability. In this work, we propose a simple but efficient approach for scalable face identification based on partial least squares (PLS) and random independent hash functions inspired by locality-sensitive hashing (LSH), resulting in the PLS for hashing (PLSH) approach. The original PLSH approach is further extended using feature selection to reduce the computational cost to evaluate the PLS-based hash functions, resulting in the state-of-the-art extended PLSH approach (ePLSH). The proposed approach is evaluated in the dataset FERET and in the dataset FRGCv1. The results show significant reduction in the number of subjects evaluated in the face identification (reduced to 0.3% of the gallery), providing averaged speedups up to 233 times compared to evaluating all subjects in the face gallery and 58 times compared to previous works in the literature.}, keywords = {DeepEyes, Face Identification, Face Recognition, GigaFrames, Indexing Structure, Local Sensitive Hashing, Partial Least Squares, VER+}, pubstate = {published}, tppubtype = {mastersthesis} } Face identification is an important research topic due to areas such as its application to surveillance, forensics and human-computer interaction. In the past few years, a myriad of methods for face identification has been proposed in the literature, with just a few among them focusing on scalability. In this work, we propose a simple but efficient approach for scalable face identification based on partial least squares (PLS) and random independent hash functions inspired by locality-sensitive hashing (LSH), resulting in the PLS for hashing (PLSH) approach. The original PLSH approach is further extended using feature selection to reduce the computational cost to evaluate the PLS-based hash functions, resulting in the state-of-the-art extended PLSH approach (ePLSH). The proposed approach is evaluated in the dataset FERET and in the dataset FRGCv1. The results show significant reduction in the number of subjects evaluated in the face identification (reduced to 0.3% of the gallery), providing averaged speedups up to 233 times compared to evaluating all subjects in the face gallery and 58 times compared to previous works in the literature. |
Cassio Elias Santos dos Junior; E Kijak; G Gravier; William Robson Schwartz Learning to Hash Faces Using Large Feature Vectors Inproceedings Content-Based Multimedia Indexing (CBMI), 13th International Workshop on, pp. 1–6, IEEE, 2015. Links | BibTeX | Tags: Face Identification, Face Recognition, GigaFrames, Indexing Structure, Locality Sensitive Hashing, Partial Least Squares, SmartView, VER+ @inproceedings{santos2015learning, title = {Learning to Hash Faces Using Large Feature Vectors}, author = {Cassio Elias Santos dos Junior and E Kijak and G Gravier and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/2015-Learning_to_Hash_Faces_Using_Large_Feature_Vectors.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {Content-Based Multimedia Indexing (CBMI), 13th International Workshop on}, pages = {1--6}, publisher = {IEEE}, keywords = {Face Identification, Face Recognition, GigaFrames, Indexing Structure, Locality Sensitive Hashing, Partial Least Squares, SmartView, VER+}, pubstate = {published}, tppubtype = {inproceedings} } |
A Pinto; H Pedrini; William Robson Schwartz; Rocha A Face Spoofing Detection Through Visual Codebooks of Spectral Temporal Cubes Journal Article Image Processing, IEEE Transactions on, 24 (12), pp. 4726-4740, 2015, ISSN: 1057-7149. Links | BibTeX | Tags: DET, GigaFrames, Spoofing Detection @article{TIP:2015:Pinto, title = {Face Spoofing Detection Through Visual Codebooks of Spectral Temporal Cubes}, author = {A Pinto and H Pedrini and William Robson Schwartz and Rocha A}, url = {http://dx.doi.org/10.1109/TIP.2015.2466088}, issn = {1057-7149}, year = {2015}, date = {2015-01-01}, journal = {Image Processing, IEEE Transactions on}, volume = {24}, number = {12}, pages = {4726-4740}, keywords = {DET, GigaFrames, Spoofing Detection}, pubstate = {published}, tppubtype = {article} } |
Ramon F Pessoa; William Robson Schwartz; Jefersson A dos Santos A Study on Low-Cost Representations for Image Feature Extraction on Mobile Devices Inproceedings 14th Iberoamerican Congress on Pattern Recognition (CIARP), pp. 1-8, 2015. Links | BibTeX | Tags: DET, Feature Extraction, GigaFrames @inproceedings{Pessoa:2015:CIARP, title = {A Study on Low-Cost Representations for Image Feature Extraction on Mobile Devices}, author = {Ramon F Pessoa and William Robson Schwartz and Jefersson A dos Santos}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/paper_2015_CIARP_Pessoa.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {14th Iberoamerican Congress on Pattern Recognition (CIARP)}, pages = {1-8}, keywords = {DET, Feature Extraction, GigaFrames}, pubstate = {published}, tppubtype = {inproceedings} } |
Raphael Felipe Carvalho de Prates; William Robson Schwartz CBRA: Color-Based Ranking Aggregation for Person Re-Identification Inproceedings IEEE International Conference on Image Processing (ICIP), pp. 1-5, 2015. Links | BibTeX | Tags: CBRA, GigaFrames, Person Re-Identification, Ranking Aggregation, SmartView, VER+ @inproceedings{Prates:2015:ICB, title = {CBRA: Color-Based Ranking Aggregation for Person Re-Identification}, author = {Raphael Felipe Carvalho de Prates and William Robson Schwartz}, url = {http://smartsenselab.dcc.ufmg.br/wp-content/uploads/2019/02/paper_2015_ICIP_Prates.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {IEEE International Conference on Image Processing (ICIP)}, pages = {1-5}, keywords = {CBRA, GigaFrames, Person Re-Identification, Ranking Aggregation, SmartView, VER+}, pubstate = {published}, tppubtype = {inproceedings} } |