-
Notifications
You must be signed in to change notification settings - Fork 0
/
publications.bib
1704 lines (1462 loc) · 165 KB
/
publications.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
%%%%%%%%%%%%
%% 2024 %%
%%%%%%%%%%%%
@misc{ssrn-2024,
title = {Automatic Transformer-Based Grading of Multiple Retinal Inflammatory Signs on Fluorescein Angiography},
url = {https://papers.ssrn.com/abstract=4960069},
doi = {10.2139/ssrn.4960069},
abstract = {Background: Grading fluorescein angiography ({FA}) in the context of uveitis is complex, often leading to the oversight of retinal inflammation in clinical studies. This study aims to develop an automated method for grading retinal inflammation.},
number = {4960069},
author = {Amiot, Victor and Jimenez-del-Toro, Oscar and Guex-Croisier, Yan and Ott, Muriel and Bogaciu, Teodora-Elena and Banerjee, Shalini and Howell, Jeremy and Amstutz, Christoph and Chiquet, Christophe and Bergin, Ciara and Meloni, Ilenia and Tomasoni, Mattia and Hoogewoud, Florence and Anjos, André},
year = {2024},
month = 9,
day = 24,
keywords = {capillaropathy, Deep Learning, disease grading, fluorescein angiography, inter-grader agreement, macular edema, optic disc hyperfluorescence, ordinal classification index, papillitis, retinal inflammation, transformers, Uveitis, vascular leakage, vasculitis},
}
@inproceedings{miccai-2024,
author = {Queiroz Neto, Dilermando and Anjos, Andr{\'{e}} and Berton, Lilian},
keywords = {Fairness, Foundation Model, Medical Image},
month = 10,
title = {Using Backbone Foundation Model for Evaluating Fairness in Chest Radiography Without Demographic Data},
booktitle = {Proceedings of the International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI)},
year = {2024},
abstract = {Ensuring consistent performance across diverse populations and incorporating fairness into machine learning models are crucial for advancing medical image diagnostics and promoting equitable healthcare. However, many databases do not provide protected attributes or contain unbalanced representations of demographic groups, complicating the evaluation of model performance across different demographics and the application of bias mitigation techniques that rely on these attributes. This study aims to investigate the effectiveness of using the backbone of Foundation Models as an embedding extractor for creating groups that represent protected attributes, such as gender and age. We propose utilizing these groups in different stages of bias mitigation, including pre-processing, in-processing, and evaluation. Using databases in and out-of-distribution scenarios, it is possible to identify that the method can create groups that represent gender in both databases and reduce in 4.44\% the difference between the gender attribute in-distribution and 6.16\% in out-of-distribution. However, the model lacks robustness in handling age attributes, underscoring the need for more fundamentally fair and robust Foundation models. These findings suggest a role in promoting fairness assessment in scenarios where we lack knowledge of attributes, contributing to the development of more equitable medical diagnostics.},
pdf = {https://publications.idiap.ch/attachments/papers/2024/QueirozNeto_CVPR_2024.pdf}
}
@inproceedings{eccv-2024,
author = {Queiroz Neto, Dilermando and Carlos, Anderson and Fatoretto, Ma{\'{\i}}ra and Nakayama, Luis Filipe and Anjos, Andr{\'{e}} and Berton, Lilian},
projects = {FAIRMI},
month = 10,
title = {Does Data-Efficient Generalization Exacerbate Bias in Foundation Models?},
booktitle = {Proceedings of the 18th European Conference on Computer Vision (ECCV)},
year = {2024},
abstract = {Foundation models have emerged as robust models with label efficiency in diverse domains. In medical imaging, these models contribute to the advancement of medical diagnoses due to the difficulty in obtaining labeled data. However, it is unclear whether using a large amount of unlabeled data, biased by the presence of sensitive attributes during pre-training, influences the fairness of the model. This research examines the bias in the Foundation model (RetFound) when it is applied to fine-tune the Brazilian Multilabel Ophthalmological Dataset (BRSET), which has a different population than the pre-training dataset. The model evaluation, in comparison with supervised learning, shows that the Foundation Model has the potential to reduce the gap between the maximum AUC and minimum AUC evaluations across gender and age groups. However, in a data-efficient generalization, the model increases the bias when the data amount decreases. These findings suggest that when deploying a Foundation Model in real-life scenarios with limited data, the possibility of fairness issues should be considered.},
pdf = {https://publications.idiap.ch/attachments/papers/2024/QueirozNeto_ECCV_2024.pdf}
}
@inproceedings{euvip-2024-2,
author = {Jimenez-del-Toro, Oscar and Aberle, Christoph and Schaer, Roger and Bach, Michael and Flouris, Kyriakos and Konukoglu, Ender and Stieltjes, Bram and Obmann, Markus M. and Anjos, Andr{\'{e}} and M{\"{u}}ller, Henning and Depeursinge, Adrien},
month = 9,
title = {Comparing Stability and Discriminatory Power of Hand-crafted Versus Deep Radiomics: A 3D-Printed Anthropomorphic Phantom Study},
booktitle = {Proceedings of the 12th European Workshop on Visual Information Processing},
year = {2024},
abstract = {Radiomics have the ability to comprehensively quantify human tissue characteristics in medical imaging studies. However, standard radiomic features are highly unstable due to their sensitivity to scanner and reconstruction settings. We present an evaluation framework for the extraction of 3D deep radiomics features using a pre-trained neural network on real computed tomography (CT) scans for tissue characterization. We compare both the stability and discriminative power of the proposed 3D deep learning radiomic features versus standard hand-crafted radiomic features using 8 image acquisition protocols with a 3D-printed anthropomorphic phantom containing 4 classes of liver lesions and normal tissue. Even when the deep learning model was trained on an external dataset and for a different tissue characterization task, the resulting generic deep radiomics are at least twice more stable on 8 CT parameter variations than any category of hand-crafted features. Moreover, the 3D deep radiomics were also discriminative for the tissue characterization between 4 classes of liver tissue and lesions, with an average discriminative power of 93.5\%.},
pdf = {https://publications.idiap.ch/attachments/papers/2024/Jimenez-del-Toro_EUVIP2024_2024.pdf}
}
@inproceedings{euvip-2024-1,
author = {G{\"{u}}ler, {\"{O}}zg{\"{u}}r and G{\"{u}}nther, Manuel and Anjos, Andr{\'{e}}},
month = 9,
title = {Refining Tuberculosis Detection in CXR Imaging: Addressing Bias in Deep Neural Networks via Interpretability},
booktitle = {Proceedings of the 12th European Workshop on Visual Information Processing},
year = {2024},
abstract = {Automatic classification of active tuberculosis from chest X-ray images has the potential to save lives, especially in low- and mid-income countries where skilled human experts can be scarce. Given the lack of available labeled data to train such systems and the unbalanced nature of publicly available datasets, we argue that the reliability of deep learning models is limited, even if they can be shown to obtain perfect classification accuracy on the test data. One way of evaluating the reliability of such systems is to ensure that models use the same regions of input images for predictions as medical experts would. In this paper, we show that pre-training a deep neural network on a large-scale proxy task, as well as using mixed objective optimization network (MOON), a technique to balance different classes during pre-training and fine-tuning, can improve the alignment of decision foundations between models and experts, as compared to a model directly trained on the target dataset. At the same time, these approaches keep perfect classification accuracy according to the area under the receiver operating characteristic curve (AUROC) on the test set, and improve generalization on an independent, unseen dataset. For the purpose of reproducibility, our source code is made available online.},
pdf = {https://publications.idiap.ch/attachments/papers/2024/Guler_EUVIP24_2024.pdf}
}
@article{mvr-2024,
author = {Mautuit, Thibaud and Cunnac, Pierre and Truffer, Fr{\'{e}}d{\'{e}}ric and Anjos, Andr{\'{e}} and Dufrane, Rebecca and Ma{\^{\i}}tre, Gilbert and Geiser, Martial and Chiquet, Christophe},
month = 1,
title = {Absolute retinal blood flow in healthy eyes and in eyes with retinal vein occlusion},
journal = {Microvascular Research},
volume = {152},
year = {2024},
issn = {0026-2862},
doi = {10.1016/j.mvr.2023.104648},
abstract = {Purpose: To measure non-invasively retinal venous blood flow
(RBF) in healthy subjects and patients with retinal venous occlusion (RVO).
Methods: The prototype named AO-LDV (Adaptive Optics Laser Doppler
Velocimeter), which combines a new absolute laser Doppler velocimeter with
an adaptive optics fundus camera (rtx1, Imagine Eyes{\textregistered},
Orsay, France), was studied for the measurement of absolute RBF as a
function of retinal vessel diameters and simultaneous measurement of red
blood cell velocity. RBF was measured in healthy subjects (n = 15) and
patients with retinal venous occlusion (RVO, n = 6). We also evaluated two
softwares for the measurement of retinal vessel diameters: software 1
(automatic vessel detection, profile analysis) and software 2 (based on the
use of deep neural networks for semantic segmentation of vessels, using a
M2u-Net architecture).
Results: Software 2 provided a higher rate of automatic retinal vessel
measurement (99.5 \% of 12,320 AO images) than software 1 (64.9 \%) and
wider measurements (75.5 ± 15.7 μm vs 70.9 ± 19.8 μm, p < 0.001). For
healthy subjects (n = 15), all the retinal veins in one eye were measured
to obtain the total RBF. In healthy subjects, the total RBF was 37.8 ± 6.8
μl/min. There was a significant linear correlation between retinal vessel
diameter and maximal velocity (slope = 0.1016; p < 0.001; r2 = 0.8597) and
a significant power curve correlation between retinal vessel diameter and
blood flow (3.63 × 10−5 × D2.54; p < 0.001; r2 = 0.7287). No significant
relationship was found between total RBF and systolic and diastolic blood
pressure, ocular perfusion pressure, heart rate, or hematocrit. For RVO
patients (n = 6), a significant decrease in RBF was noted in occluded veins
(3.51 ± 2.25 μl/min) compared with the contralateral healthy eye (11.07 ±
4.53 μl/min). For occluded vessels, the slope between diameter and velocity
was 0.0195 (p < 0.001; r2 = 0.6068) and the relation between diameter and
flow was Q = 9.91 × 10−6 × D2.41 (p < 0.01; r2 = 0.2526).
Conclusion: This AO-LDV prototype offers new opportunity to study RBF in humans
and to evaluate treatment in retinal vein diseases.},
}
%%%%%%%%%%%%
%% 2023 %%
%%%%%%%%%%%%
@inproceedings{cbms-2023,
author = {Amiot, Victor AND Jimenez-del-Toro, Oscar AND Eyraud, Pauline AND Guex-Crosier, Yan AND Bergin, Ciara AND Anjos, André AND Hoogewoud, Florence AND Tomasoni, Mattia},
title = {Fully Automatic Grading of Retinal Vasculitis on Fluorescein Angiography Time-lapse from Real-world Data in Clinical Settings},
booktitle={2023 IEEE 36th International Symposium on Computer-Based Medical Systems (CBMS)},
year = {2023},
month = 6,
doi = {10.1109/CBMS58004.2023.00301},
abstract = {The objective of this study is to showcase a pipeline able to perform fully automated grading of retinal inflammation based on a standardised, clinically-validated grading scale. The application of such scale has so far been hindered by the the amount of time required to (manually) apply it in clinical settings. Our dataset includes 3,205 fluorescein angiography images from 148 patients and 242 eyes from the uveitis department of Jules Gonin Eye Hospital. The data was automatically extracted from a medical device, in hospital settings. Images were graded by a medical expert. We focused specifically on one type of inflammation, namely retinal vasculitis. Our pipeline comprises both learning-based models (Pasa model with F1 score = 0.81, AUC = 0.86), and an intensity-based approach to serve as a baseline (F1 score = 0.57, AUC = 0.66). A recall of up to 0.833 computed in an independent test set is comparable to the scores obtained by available state-of-the-art approaches. Here we present the first fully automated pipeline for the grading of retinal vasculitis from raw medical images that is applicable to a real-world clinical data.},
}
@article{ijtld-2023,
title = {The rise of artificial intelligence reading of chest X-rays for enhanced TB diagnosis and elimination},
doi = {10.5588/ijtld.22.0687},
abstract = {We provide an overview of the latest evidence on computer-aided detection (CAD) software for automated interpretation of chest radiographs (CXRs) for TB detection. CAD is a useful tool that can assist in rapid and consistent CXR interpretation for TB. CAD can achieve high sensitivity TB detection among people seeking care with symptoms of TB and in population-based screening, has accuracy on-par with human readers. However, implementation challenges remain. Due to diagnostic heterogeneity between settings and sub-populations, users need to select threshold scores rather than use pre-specified ones, but some sites may lack the resources and data to do so. Efficient standardisation is further complicated by frequent updates and new CAD versions, which also challenges implementation and comparison. CAD has not been validated for TB diagnosis in children and its accuracy for identifying non-TB abnormalities remains to be evaluated. A number of economic and political issues also remain to be addressed through regulation for CAD to avoid furthering health inequities. Although CAD-based CXR analysis has proven remarkably accurate for TB detection in adults, the above issues need to be addressed to ensure that the technology meets the needs of high-burden settings and vulnerable sub-populations.},
journal = {INT J TUBERC LUNG DIS},
volume = {27},
journaltitle = {International Journal of Tuberculosis and Lung Diseases},
author = {Geric, C. AND Qin, Z. Z. AND Denkinger, C. M. AND Kik, S. V. AND Marais, B. AND Anjos, André AND David, P.-M. AND Khan, F. A. AND Trajman, A.},
year = {2023},
month = 5,
keywords = {computer-aided detection; chest radiology; pulmonary disease; tuberculosis; AI technology},
}
%%%%%%%%%%%%
%% 2022 %%
%%%%%%%%%%%%
@article{elsevier-csal-2022,
title = {Towards lifelong human assisted speaker diarization},
issn = {0885-2308},
doi = {10.1016/j.csl.2022.101437},
abstract = {This paper introduces the resources necessary to develop and evaluate human assisted lifelong learning speaker diarization systems. It describes the {ALLIES} corpus and associated protocols, especially designed for diarization of a collection audio recordings across time. This dataset is compared to existing corpora and the performances of three baseline systems, based on x-vectors, i-vectors and {VBxHMM}, are reported for reference. Those systems are then extended to include an active correction process that efficiently guides a human annotator to improve the automatically generated hypotheses. An open-source simulated human expert is provided to ensure reproducibility of the human assisted correction process and its fair evaluation. An exhaustive evaluation, of the human assisted correction shows the high potential of this approach. The {ALLIES} corpus, a baseline system including the active correction module and all evaluation tools are made freely available to the scientific community.},
journal = {Computer Speech \& Language},
journaltitle = {Computer Speech \& Language},
author = {Shamsi, Meysam and Larcher, Anthony and Barrault, Loic and Meignier, Sylvain and Prokopalo, Yevheni and Tahon, Marie and Mehrish, Ambuj and Petitrenaud, Simon and Galibert, Olivier and Gaist, Samuel and Anjos, André and Marcel, Sebastien and Costa-jussà, Marta R.},
year = {2022},
month = 7,
date = {2022-07-27},
pdf = {https://www.idiap.ch/~aanjos/papers/elsevier-csal-2022.pdf},
keywords = {Evaluation, Human assisted learning, Lifelong learning, Speaker diarization},
}
@inproceedings{union-2022,
author = {Raposo, Geoffrey and Trajman, Anete and Anjos, Andr{\'{e}}},
month = 11,
title = {Pulmonary Tuberculosis Screening from Radiological Signs on Chest X-Ray Images Using Deep Models},
booktitle = {Union World Conference on Lung Health},
year = {2022},
addendum = {(Issued from master thesis supervision)},
date = {2022-11-01},
organization = {The Union},
abstract = {Background: The World Health Organization has recently recommended the use of computer-aided detection (CAD) systems for screening pulmonary tuberculosis (PT) in Chest X-Ray images. Previous CAD models are based on direct image to probability detection techniques - and do not generalize well (from training to validation databases). We propose a method that overcomes these limitations by using radiological signs as intermediary proxies for PT detection.
Design/Methods: We developed a multi-class deep learning model, mapping images to 14 radiological signs such as cavities, infiltration, nodules, and fibrosis, using the National Institute of Health (NIH) CXR14 dataset, which contains 112,120 images. Using three public PTB datasets (Montgomery County - MC, Shenzen - CH, and Indian - IN), summing up 955 images, we developed a second model mapping F probabilities to PTB diagnosis (binary labels). We evaluated this approach for its generalization capabilities against direct models, learnt directly from PTB training data or by transfer learning via cross-folding and cross-database experiments. The area under the specificity vs. sensitivity curve (AUC) considering all folds was used to summarize the performance of each approach.
Results: The AUC for intra-dataset tests baseline direct detection deep models achieved 0.95 (MC), 0.95 (CH) and 0.91 (IN), with up to 35\% performance drop on a cross-dataset evaluation scenario. Our proposed approach achieved AUC of 0.97 (MC), 0.90 (CH), and 0.93 (IN), with at most 11\% performance drop on a cross-dataset evaluation (Table/figures). In most tests, the difference was less than 5\%.
Conclusions: A two-step CAD model based on radiological signs offers an adequate base for the development of PT screening systems and is more generalizable than a direct model. Unlike commercially available CADS, our model is completely reproducible and available open source at https://pypi.org/project/bob.med.tb/.}
}
@article{nsr-2022,
title = {State-of-the-art retinal vessel segmentation with minimalistic models},
volume = {12},
rights = {2022 The Author(s)},
issn = {2045-2322},
url = {https://www.nature.com/articles/s41598-022-09675-y},
pdf = {https://www.nature.com/articles/s41598-022-09675-y.pdf},
doi = {10.1038/s41598-022-09675-y},
abstract = {The segmentation of retinal vasculature from eye fundus images is a fundamental task in retinal image analysis. Over recent years, increasingly complex approaches based on sophisticated Convolutional Neural Network architectures have been pushing performance on well-established benchmark datasets. In this paper, we take a step back and analyze the real need of such complexity. We first compile and review the performance of 20 different techniques on some popular databases, and we demonstrate that a minimalistic version of a standard U-Net with several orders of magnitude less parameters, carefully trained and rigorously evaluated, closely approximates the performance of current best techniques. We then show that a cascaded extension (W-Net) reaches outstanding performance on several popular datasets, still using orders of magnitude less learnable weights than any previously published work. Furthermore, we provide the most comprehensive cross-dataset performance analysis to date, involving up to 10 different databases. Our analysis demonstrates that the retinal vessel segmentation is far from solved when considering test images that differ substantially from the training data, and that this task represents an ideal scenario for the exploration of domain adaptation techniques. In this context, we experiment with a simple self-labeling strategy that enables moderate enhancement of cross-dataset performance, indicating that there is still much room for improvement in this area. Finally, we test our approach on Artery/Vein and vessel segmentation from {OCTA} imaging problems, where we again achieve results well-aligned with the state-of-the-art, at a fraction of the model complexity available in recent literature. Code to reproduce the results in this paper is released.},
addendum = {(Issued from internship supervision)},
pages = {6174},
number = {1},
journal = {Nature Scientific Reports},
journaltitle = {Scientific Reports},
shortjournal = {Sci Rep},
author = {Galdran, Adrian and Anjos, André and Dolz, José and Chakor, Hadi and Lombaert, Hervé and Ayed, Ismail Ben},
year = {2022},
month = 4,
date = {2022-04-13},
langid = {english},
note = {Number: 1
Publisher: Nature Publishing Group},
keywords = {Biomedical engineering, Computer science, Machine learning},
}
%%%%%%%%%%%%
%% 2021 %%
%%%%%%%%%%%%
@inproceedings{cbic-2021,
title = {Development of a lung segmentation algorithm for analog imaged chest X-Ray: preliminary results},
url = {https://sbic.org.br/eventos/cbic_2021/cbic2021-123/},
pdf = {https://www.idiap.ch/~aanjos/papers/cbic-2021.pdf},
doi = {10.21528/CBIC2021-123},
shorttitle = {Development of a lung segmentation algorithm for analog imaged chest X-Ray},
addendum = {(Issued from internship supervision)},
abstract = {Analog X-Ray radiography is still used in many underdeveloped regions around the world. To allow these populations to benefit from advances in automatic computer-aided detection (CAD) systems, X-Ray films must be digitized. Unfortunately, this procedure may introduce imaging artefacts, which may severely impair the performance of such systems.
This work investigates the impact digitized images may cause to deep neural networks trained for lung (semantic) segmentation on digital x-ray samples. While three public datasets for lung segmentation evaluation exist for digital samples, none are available for digitized data. To this end, a U-Net-style architecture was trained on publicly available data, and used to predict lung segmentation on a newly annotated set of digitized images.
Using typical performance metrics such as the area under the precision-recall curve (AUPRC), our results show that the model is capable to identify lung regions at digital X-Rays with a high intra-dataset (AUPRC: 0.99), and cross-dataset (AUPRC: 0.99) efficiency on unseen test data. When challenged against digitized data, the performance is substantially degraded (AUPRC: 0.90).
Our analysis also suggests that typical performance markers, maximum F1 score and AUPRC, seems not to be informative to characterize segmentation problems in test images. For this goal pixels does not have independence due to natural connectivity of lungs in images, this implies that a lung pixel tends to be surrounded by other lung pixels.
This work is reproducible. Source code, evaluation protocols and baseline results are available at: https://pypi.org/project/bob.ip.binseg/.},
eventtitle = {Congresso Brasileiro de Inteligência Computacional},
pages = {1--8},
booktitle = {Anais do 15. Congresso Brasileiro de Inteligência Computacional},
year = {2021},
month = 10,
publisher = {{SBIC}},
author = {Renzo, Matheus A. and Fernandez, Natália and Baceti, André A. and Moura Junior, Natanael Nunes and Anjos, André},
}
%%%%%%%%%%%%
%% 2020 %%
%%%%%%%%%%%%
@misc{arxiv-2020,
title = {The Little W-Net That Could: State-of-the-Art Retinal Vessel Segmentation with Minimalistic Models},
author = {Galdran, Adrian and Anjos, André and Dolz, José and Chakor, Hadi and Lombaert, Hervé and Ayed, Ismail Ben},
year = {2020},
month = 9,
doi = {10.48550/arXiv.2009.01907},
eprinttype = {arxiv},
eprint = {2009.01907},
archivePrefix = {arXiv},
primaryClass = {cs.CV},
journaltitle = {{arXiv}:2009.01907 [cs, eess] (submitted to Nature Scientific Reports)},
url = {https://arxiv.org/abs/2009.01907},
pdf = {https://arxiv.org/pdf/2009.01907},
abstract = {The segmentation of the retinal vasculature from eye fundus images represents one of the most fundamental tasks in retinal image analysis. Over recent years, increasingly complex approaches based on sophisticated Convolutional Neural Network architectures have been slowly pushing performance on well-established benchmark datasets. In this paper, we take a step back and analyze the real need of such complexity. Specifically, we demonstrate that a minimalistic version of a standard UNet with several orders of magnitude less parameters, carefully trained and rigorously evaluated, closely approximates the performance of current best techniques. In addition, we propose a simple extension, dubbed W-Net, which reaches outstanding performance on several popular datasets, still using orders of magnitude less learnable weights than any previously published approach. Furthermore, we provide the most comprehensive cross-dataset performance analysis to date, involving up to 10 different databases. Our analysis demonstrates that the retinal vessel segmentation problem is far from solved when considering test images that differ substantially from the training data, and that this task represents an ideal scenario for the exploration of domain adaptation techniques. In this context, we experiment with a simple self-labeling strategy that allows us to moderately enhance cross-dataset performance, indicating that there is still much room for improvement in this area. Finally, we also test our approach on the Artery/Vein segmentation problem, where we again achieve results well-aligned with the state-of-the-art, at a fraction of the model complexity in recent literature. All the code to reproduce the results in this paper is released.},
}
@article{compbiomed-2020,
title = {Competitive neural layer-based method to identify people with high risk for diabetic foot},
volume = {120},
url = {https://www.sciencedirect.com/science/article/pii/S0010482520301244},
pdf = {https://www.idiap.ch/~aanjos/papers/compbiomed-2020.pdf},
doi = {10.1016/j.compbiomed.2020.103744},
abstract = {Background and objective: To automatically identify patients with diabetes mellitus (DM) who have high risk of developing diabetic foot, via an unsupervised machine learning technique. Methods: We collected a new database containing 54 known risk factors from 250 patients diagnosed with diabetes mellitus. The database also contained a separate validation cohort composed of 73 subjects, where the perceived risk was annotated by expert nurses. A competitive neuron layer-based method was used to automatically split training data into two risk groups. Results: We found that one of the groups was composed of patients with higher risk of developing diabetic foot. The dominant variables that described group membership via our method agreed with the findings from other studies, and indicated a greater risk for developing such a condition. Our method was validated on the available test data, reaching 71\% sensitivity, 100\% specificity, and 90\% accuracy. Conclusions Unsupervised learning may be deployed to screen patients with diabetes mellitus, pointing out high-risk individuals who require priority follow-up in the prevention of diabetic foot with very high accuracy. The proposed method is automatic and does not require clinical examinations to perform risk assessment, being solely based on the information of a questionnaire answered by patients. Our study found that discriminant variables for predicting risk group membership are highly correlated with expert opinion.},
journal = {Computers in Biology and Medicine},
author = {Ferreira, Ana Cl\'audia Barbosa Hon\'orio and Ferreira, Danton Diego and Oliveira, Henrique Ceretta and Resende, Igor Carvalho de and Anjos, Andr\'e and Lopes, Maria Helena Baena de Moraes},
month = 5,
year = {2020},
keywords = {Artificial neural network, Diabetes mellitus, Diabetic foot},
}
%%%%%%%%%%%%
%% 2019 %%
%%%%%%%%%%%%
@misc{arxiv-2019,
title = {On the Evaluation and Real-World Usage Scenarios of Deep Vessel Segmentation for Retinography},
author = {Tim Laibacher and Andr\'e Anjos},
addendum = {(Issued from intership supervision)},
year = {2019},
month = 9,
eprint = {1909.03856},
archivePrefix = {arXiv},
primaryClass = {cs.CV},
doi = {10.48550/arXiv.1909.03856},
url = {https://arxiv.org/abs/1909.03856},
pdf = {https://arxiv.org/pdf/1909.03856},
journaltitle = {{arXiv}:1909.03856 [cs] (submitted to IEEE International Symposium on Biomedical Imaging 2021)},
abstract = {We identify and address three research gaps in the field of vessel segmentation for retinography. The first focuses on the task of inference on high-resolution fundus images for which only a limited set of ground-truth data is publicly available. Notably, we highlight that simple rescaling and padding or cropping of lower resolution datasets is surprisingly effective. We further explore the effectiveness of semi-supervised learning for better domain adaptation in this context. Our results show competitive performance on a set of common public retina datasets, using a small and light-weight neural network. For HRF, the only very high-resolution dataset currently available, we reach comparable, if not superior, state-of-the-art performance by solely relying on training images from lower-resolution datasets. The second topic we address concerns the lack of standardisation in evaluation metrics. We investigate the variability of the F1-score on the existing datasets and report results for recently published architectures. Our evaluation show that most reported results are actually comparable to each other in performance. Finally, we address the issue of reproducibility, by open-sourcing the complete framework used to produce results shown here.},
}
@patent{3dfv-patent-2019,
author = {Sonna Momo, Lambert and Cerqueira Torres, Luciano and Marcel, S\'ebastien and Anjos, Andr\'e and Liebling, Michael and Shajkofci, Adrian and Amoos, Serge and Woeffray, Alain and Sierro, Alexandre and Roduit, Pierre and Ferrez, Pierre and Bonvin, Lucas},
title = {Method and Device for Biometric Vascular Recognition and/or Identification},
year = {2019},
month = 8,
day = 8,
number = {WO/2019/150254},
type = {Patent},
filing_num = {PCT/IB2019/050708},
yearfiled = {2019},
monthfiled = 1,
dayfiled = 29,
pat_refs = {P\&TS SA (AG, LTD.); Av. J.-J. Rousseau 4 P.O. Box 2848 2001 Neuchâtel, CH},
abstract = {The invention concerns a method and a biometric acquisition device for biometric vascular recognition and/or identification. The method comprising a step of capturing a plurality of veins images (116, 117, 118) of supposed subcutaneous veins (21) of a same inspecting portion (20) of a presented entity (2) from various converging orientations (113, 114, 115). The method further comprises a step of determine if said entity is a spoof based on estimated likelihood that said supposed subcutaneous veins within said plurality of veins images (116, 117, 118) are likely projections of solid veins (120).},
url = {https://patentscope.wipo.int/search/en/detail.jsf?docId=WO2019150254}
}
@article{tifs-2019-2,
author = {George, Anjith and Mostaani, Zohreh and Geissenbuhler, David and Nikisins, Olegs and Anjos, Andr{\'{e}} and Marcel, S{\'{e}}bastien},
title = {Biometric Face Presentation Attack Detection with Multi-Channel Convolutional Neural Network},
journal = {IEEE Transactions on Information Forensics and Security},
month = 5,
year = {2019},
doi = {10.1109/TIFS.2019.2916652},
pdf = {https://www.idiap.ch/~aanjos/papers/tifs-2019-2.pdf},
abstract = {Face recognition is a mainstream biometric authentication method. However, vulnerability to presentation attacks (a.k.a spoofing) limits its usability in unsupervised applications. Even though there are many methods available for tackling presentation attacks (PA), most of them fail to detect sophisticated attacks such as silicone masks.
As the quality of presentation attack instruments improves over time, achieving reliable PA detection with visual spectra alone remains very challenging. We argue that analysis in multiple channels might help to address this issue. In this context, we propose a multi-channel Convolutional Neural Network based approach for presentation attack detection (PAD).
We also introduce the new Wide Multi-Channel presentation Attack (WMCA) database for face PAD which contains a wide variety of 2D and 3D presentation attacks for both impersonation and obfuscation attacks. Data from different channels such as color, depth, near-infrared and thermal are available to advance the research in face PAD. The proposed method was compared with feature-based approaches and found to outperform the baselines achieving an ACER of 0.3\% on the introduced dataset. The database and the software to reproduce the results are made available publicly.},
}
@incollection{hopad-2019,
title = {Recent Advances in Face Presentation Attack Detection},
author = {Bhattacharjee, Sushil and Mohammadi, Amir and Anjos, Andr{\'{e}} and Marcel, S{\'{e}}bastien},
editor = "Marcel, S{\'{e}}bastien AND Nixon, Mark AND Fierrez, Julian AND Evans, Nicholas",
edition = "2nd edition (in press)",
booktitle = "Handbook of Biometric Anti-Spoofing",
publisher = "Springer-Verlag",
year = "2019",
month = 1,
pages = "207--228",
isbn = "ISBN 978-3-319-92627-8",
doi = "10.1007/978-3-319-92627-8_10",
abstract = "The undeniable convenience of face-recognition (FR) based biomet- rics has made it an attractive tool for access control in various applications, from immigration-control to remote banking. Widespread adoption of face biometrics, however, depends on the how secure such systems are perceived to be. One particular vulnerability of FR systems comes from presentation attacks (PA), where a subject A attempts to impersonate another subject B, by presenting, for example, a photograph of B to the biometric sensor (i.e., the camera). PAs are the most likely forms of attacks on face biometric systems, as the camera is the only component of the biometric system that is exposed to the outside world. Robust presentation attack detection (PAD) methods are necessary to construct secure FR based access control systems. The first edition of the Handbook of Biometric Anti-spoofing included two chapters on face-PAD. In this chapter we present the significant advances in face-PAD research since the publication of the first edition of this book. In addition to PAD methods designed to work with color images, we also discuss advances in face-PAD methods using other imaging modalities, namely, near-infrared (NIR) and thermal imaging. This chapter also presents a number of recently published datasets for face-PAD experiments.",
}
@incollection{hopad-2019-2,
title = "Evaluation Methodologies for Biometric Presentation Attack Detection",
author = {Chingovska, Ivana and Mohammadi, Amir and Anjos, Andr{\'{e}} and Marcel, S{\'{e}}bastien},
editor = "Marcel, S{\'{e}}bastien AND Nixon, Mark AND Fierrez, Julian AND Evans, Nicholas",
addendum = {(Issued from Ph.D co-supervision)},
edition = "2nd edition (in press)",
booktitle = "Handbook of Biometric Anti-Spoofing",
publisher = "Springer-Verlag",
year = "2019",
month = 1,
pages = "457--480",
isbn = "ISBN 978-3-319-92627-8",
doi = "10.1007/978-3-319-92627-8_20",
abstract = "Presentation attack detection (PAD, also known as anti-spoofing) systems, regardless of the technique, biometric mode or degree of independence of external equipment, are most commonly treated as binary classification systems. The two classes that they differentiate are bona-fide and presentation attack samples. From this perspective, their evaluation is equivalent to the established evaluation standards for the binary classification systems. However, PAD systems are designed to operate in conjunction with recognition systems and as such can affect their performance. From the point of view of a recognition system, the presentation attacks are a separate class that they need to be detected and rejected. As the problem of presentation attack detection grows to this pseudo-ternary status, the evaluation methodologies for the recognition systems need to be revised and updated. Consequentially, the database requirements for presentation attack databases become more specific. The focus of this chapter is the task of biometric verification and its scope is three-fold: firstly, it gives the definition of the presentation attack detection problem from the two perspectives. Secondly, it states the database requirements for a fair and unbiased evaluation. Finally, it gives an overview of the existing evaluation techniques for presentation attacks detection systems and verification systems under presentation attacks.",
}
@incollection{hopad-2019-3,
title = "An Introduction to Vein Presentation Attacks and Detection",
author = "Anjos, Andr{\'{e}} and Tome, Pedro and Marcel, S{\'{e}}bastien",
editor = "Marcel, S{\'{e}}bastien AND Nixon, Mark AND Fierrez, Julian AND Evans, Nicholas",
edition = "2nd edition (in press)",
booktitle = "Handbook of Biometric Anti-Spoofing",
publisher = "Springer-Verlag",
year = "2019",
month = 1,
pages = "419--438",
isbn = "ISBN 978-3-319-92627-8",
doi = "10.1007/978-3-319-92627-8_18",
abstract = "The domain of presentation attacks (PA), including vulnerability studies and detection (PAD) remains very much unexplored by available scientific literature in biometric vein recognition. Contrary to other modalities that use visual spectral sensors for capturing biometric samples, vein biometrics is typically implemented with near-infrared imaging. The use of invisible light spectra challenges the cre- ation PA instruments, but does not render it impossible. In this chapter, we provide an overview of current landscape for PA manufacturing in possible attack vectors for vein recognition, describe existing public databases and baseline techniques to counter such attacks. The reader will also find material to reproduce experiments and findings for fingervein recognition systems. We provide this material with the hope it will be extended to other vein recognition systems and improved in time.",
}
@article{tifs-2019,
author = {de Freitas Pereira, Tiago and Anjos, André and Marcel, Sébastien},
month = 12,
title = {Heterogeneous Face Recognition Using Domain Specific Units},
journal = {IEEE Transactions on Information Forensics and Security},
year = {2019},
addendum = {(Issued from Ph.D co-supervision)},
doi = {10.1109/TIFS.2018.2885284},
url = "https://publications.idiap.ch/index.php/publications/show/3963",
pdf = "https://www.idiap.ch/~aanjos/papers/ieee-tifs-2018.pdf",
abstract = {The task of Heterogeneous Face Recognition consists in matching face images that are sensed in different domains, such as sketches to photographs (visual spectra images), thermal images to photographs or near-infrared images to photographs. In this work we suggest that high level features of Deep Convolutional Neural Networks trained on visual spectra images are potentially domain independent and can be used to encode faces sensed in different image domains. A generic framework for Heterogeneous Face Recognition is proposed by adapting Deep Convolutional Neural Networks low level features in, so called, “Domain Specific Units”. The adaptation using Domain Specific Units allow the learning of shallow feature detectors specific for each new image domain. Furthermore, it handles its transformation to a generic face space shared between all image domains. Experiments carried out with four different face databases covering three different image domains show substantial improvements, in terms of recognition rate, surpassing the state-of-the-art for most of them. This work is made reproducible: all the source code, scores and trained models of this approach are made publicly available.},
}
%%%%%%%%%%%%
%% 2018 %%
%%%%%%%%%%%%
@inproceedings{iwbf-2018,
author = "Nikisins, Olegs and Eglitis, Teodors and Anjos, André and Marcel, Sébastien",
month = 6,
title = "Fast cross-correlation based wrist vein recognition algorithm with rotation and translation compensation",
booktitle = "Sixth International Workshop on Biometrics and Forensics",
year = "2018",
url = "https://publications.idiap.ch/index.php/publications/show/3835",
pdf = "https://www.idiap.ch/~aanjos/papers/iwbf-2018.pdf",
doi= "10.1109/IWBF.2018.8401550",
abstract = "Most of the research on vein biometrics addresses the problems of either palm or finger vein recognition with a considerably smaller emphasis on wrist vein modality. This paper paves the way to a better understanding of capabilities and challenges in the field of wrist vein verification. This is achieved by introducing and discussing a fully automatic cross-correlation based wrist vein verification technique. Overcoming the limitations of ordinary cross-correlation, the proposed system is capable of compensating for scale, translation and rotation between vein patterns in a computationally efficient way. Introduced comparison algorithm requires only two cross-correlation operations to compensate for both translation and rotation, moreover the well known property of log-polar transformation of Fourier magnitudes is not involved in any form. To emphasize the veins, a two-layer Hessian-based vein enhancement approach with adaptive brightness normalization is introduced, improving the connectivity and the stability of extracted vein patterns. The experiments on the publicly available PUT Vein wrist database give promising results with FNMR of 3.75\% for FMR of 0.1\%. In addition we make this research reproducible providing the source code and instructions to replicate all findings in this work.",
}
@patent{beat-patent-2018,
author = {Marcel Sébastien AND Anjos, André AND Abbet, Philip},
title = {Method and internet-connected server for reviewing a computer-executable experiment},
year = {2018},
month = 5,
day = 15,
number = {US9973503B2},
type = {Patent},
location = {US},
filing_num = {14/970,333},
yearfiled = {2015},
monthfiled = 12,
dayfiled = 15,
pat_refs = {P\&TS SA (AG, LTD.); Av. J.-J. Rousseau 4 P.O. Box 2848 2001 Neuchâtel, CH},
abstract = {An internet-connected server comprising a first module for authorizing a user to access the server for: setting up, on the server, a given configuration for conducting a computer-executable experiment, wherein the given configuration comprises at least an executable instruction and a parameter or input data; executing, on the server, the computer-executable experiment with the given configuration so to produce a numerical result; certifying, on the server, the numerical result so to produce a certified result; and generating, on the server, a certification identifier of the certified result. The internet-connected server further comprises a second module for authorizing a reviewer for: providing the server with the certification identifier; and requesting and/or accessing, on the server, the certified numerical result on the basis of the provided certification identifier.},
url = {https://patft.uspto.gov/netacgi/nph-Parser?Sect2=PTO1&Sect2=HITOFF&p=1&u=/netahtml/PTO/search-bool.html&r=1&f=G&l=50&d=PALL&RefSrch=yes&Query=PN/9973503}
}
@inproceedings{icb-2018,
author = "Nikisins, Olegs and Mohammadi, Amir and Anjos, André and Marcel, Sébastien",
month = 2,
title = "On Effectiveness of Anomaly Detection Approaches against Unseen Presentation Attacks in Face Anti-Spoofing",
booktitle = "The 11th IAPR International Conference on Biometrics (ICB 2018)",
year = "2018",
url = "https://publications.idiap.ch/index.php/publications/show/3793",
pdf = "https://www.idiap.ch/~aanjos/papers/icb-2018.pdf",
doi= "10.1109/ICB2018.2018.00022",
abstract = "While face recognition systems got a significant boost in terms of recognition performance in recent years, they are known to be vulnerable to presentation attacks. Up to date, most of the research in the field of face anti-spoofing or presentation attack detection was considered as a two-class classification task: features of bona-fide samples versus features coming from spoofing attempts. The main focus has been on boosting the anti-spoofing performance for databases with identical types of attacks across both training and evaluation subsets. However, in realistic applications the types of attacks are likely to be unknown, potentially occupying a broad space in the feature domain. Therefore, a failure to generalize on unseen types of attacks is one of the main potential challenges in existing anti-spoofing approaches. First, to demonstrate the generalization issues of two-class anti-spoofing systems we establish new evaluation protocols for existing publicly available databases. Second, to unite the data collection efforts of various institutions we introduce a challenging Aggregated database composed of 3 publicly available datasets: Replay-Attack, Replay-Mobile and MSU MFSD, reporting the performance on it. Third, considering existing limitations we propose a number of systems approaching a task of presentation attack detection as an anomaly detection, or a one-class classification problem, using only bona-fide features in the training stage. Using less training data, hence requiring less effort in the data collection, the introduced approach demonstrates a better generalization properties against previously unseen types of attacks on the proposed Aggregated database.",
}
%%%%%%%%%%%%
%% 2017 %%
%%%%%%%%%%%%
@misc{arxiv-2017-2,
author = "Heusch, Guillaume and Anjos, Andr{\'{e}} and Marcel, S{\'{e}}bastien",
title = "A reproducible study on remote heart rate measurement",
journal = "arXiv",
year = "2017",
month = 9,
archivePrefix = "arXiv",
eprint = "1709.00962",
primaryClass = "cs-se",
addendum = {(Issued from project co-supervision)},
url = "https://arxiv.org/abs/1709.00962",
doi = {10.48550/arXiv.1709.00962},
abstract = "This paper studies the problem of reproducible research in remote photoplethysmography (rPPG). Most of the work published in this domain is assessed on privately-owned databases, making it difficult to evaluate proposed algorithms in a standard and principled manner. As a consequence, we present a new, publicly available database containing a relatively large number of subjects recorded under two different lighting conditions. Also, three state-of-the-art rPPG algorithms from the literature were selected, implemented and released as open source free software. After a thorough, unbiased experimental evaluation in various settings, it is shown that none of the selected algorithms is precise enough to be used in a real-world scenario.",
}
@patent{beat-patent-2017,
author = {Anjos, André AND Marcel, Sébastien},
title = {A data-network connected server, a device, a platform and a method for conducting computer-executable experiments},
year = {2017},
month = 12,
day = 28,
number = {WO/2017/221049},
type = {Patent},
location = {CH},
filing_num = {PCT/IB2016/053683},
yearfiled = {2016},
monthfiled = 6,
dayfiled = 21,
pat_refs = {P\&TS SA (AG, LTD.); Av. J.-J. Rousseau 4 P.O. Box 2848 2001 Neuchâtel, CH},
abstract = {The invention concerns a platform (1), a server (10, 10') and a client device (20) for conducting computer-executable experiments. The server comprises a restricted-access memory module (11,11') for locally storing a data structure with numerical values whose access is restricted to authorized devices and/or users. The server is provided with an instruction receiving module (12,12') for receiving a list of executable instructions for conducting a computer-executable experiment based on numerical values with restricted access from the client device being not authorized to accessing numerical values with restricted access. The server comprises an execution module (13,13') for conducting the experiment so to produce a numerical result; and a communication module (12,12') for transmitting the result to the client device and/or to the user of the client device.},
url = {https://patentscope.wipo.int/search/en/detail.jsf?docId=WO2017221049}
}
@inproceedings{icml-2017-2,
author = "Anjos, André and Günther, Manuel and de Freitas Pereira, Tiago and Korshunov, Pavel and Mohammadi, Amir and Marcel, Sébastien",
month = 8,
title = "Continuously Reproducing Toolchains in Pattern Recognition and Machine Learning Experiments",
booktitle = "Thirty-fourth International Conference on Machine Learning",
year = "2017",
location = "Sidney, Australia",
url = "https://publications.idiap.ch/index.php/publications/show/3666",
pdf = "https://www.idiap.ch/~aanjos/papers/icml-2017-2.pdf",
poster = "https://www.idiap.ch/~aanjos/posters/icml-2017-2.pdf",
abstract = "Pattern recognition and machine learning research work often contains experimental results on real-world data, which corroborates hypotheses and provides a canvas for the development and comparison of new ideas. Results, in this context, are typically summarized as a set of tables and figures, allowing the comparison of various methods, highlighting the advantages of the proposed ideas. Unfortunately, result reproducibility is often an overlooked feature of original research publications, competitions, or benchmark evaluations. The main reason for such a gap is the complexity on the development of software associated with these reports. Software frameworks are difficult to install, maintain, and distribute, while scientific experiments often consist of many steps and parameters that are difficult to report. The increasingly rising complexity of research challenges make it even more difficult to reproduce experiments and results. In this paper, we emphasize that a reproducible research work should be repeatable, shareable, extensible, and stable, and discuss important lessons we learned in creating, distributing, and maintaining software and data for reproducible research in pattern recognition and machine learning. We focus on a specific use-case of face recognition and describe in details how we can make the recognition experiments reproducible in practice.",
}
@inproceedings{icml-2017-1,
author = "Anjos, André and El Shafey, Laurent and Marcel, Sébastien",
month = 8,
title = "BEAT: An Open-Science Web Platform",
booktitle = "Thirty-fourth International Conference on Machine Learning",
year = "2017",
location = "Sydney, Australia",
url = "https://publications.idiap.ch/index.php/publications/show/3665",
pdf = "https://www.idiap.ch/~aanjos/papers/icml-2017-1.pdf",
poster = "https://www.idiap.ch/~aanjos/posters/icml-2017-1.pdf",
abstract = "With the increased interest in computational sciences, machine learning (ML), pattern recognition (PR) and big data, governmental agencies, academia and manufacturers are overwhelmed by the constant influx of new algorithms and techniques promising improved performance, generalization and robustness. Sadly, result reproducibility is often an overlooked feature accompanying original research publications, competitions and benchmark evaluations. The main reasons behind such a gap arise from natural complications in research and development in this area: the distribution of data may be a sensitive issue; software frameworks are difficult to install and maintain; Test protocols may involve a potentially large set of intricate steps which are difficult to handle.
To bridge this gap, we built an open platform for research in computational sciences related to pattern recognition and machine learning, to help on the development, reproducibility and certification of results obtained in the field. By making use of such a system, academic, governmental or industrial organizations enable users to easily and socially develop processing toolchains, re-use data, algorithms, workflows and compare results from distinct algorithms and/or parameterizations with minimal effort. This article presents such a platform and discusses some of its key features, uses and limitations. We overview a currently operational prototype and provide design insights.",
}
@inproceedings{interspeech-2017,
author = "Cernak, Milos and Komaty, Alain and Mohammadi, Amir and Anjos, André and Marcel, Sébastien",
month = 8,
title = "Bob Speaks Kaldi",
booktitle = "Proceedings of Interspeech",
year = "2017",
url = "https://publications.idiap.ch/index.php/publications/show/3623",
pdf = "https://www.idiap.ch/~aanjos/papers/interspeech-2017.pdf",
abstract = "This paper introduces and demonstrates Kaldi integration into Bob signal-processing and machine learning toolbox. The motivation for this integration is two-fold. Firstly, Bob benefits from using advanced speech processing tools developed in Kaldi. Secondly, Kaldi benefits from using complementary Bob modules, such as modulation-based VAD with an adaptive thresholding. In addition, Bob is designed as an open science tool, and this integration might offer to the Kaldi speech community a framework for better reproducibility of state-of-the-art research results.",
}
@misc{arxiv-2017,
author = "André Anjos AND Laurent El-Shafey AND Sébastien Marcel",
title = "BEAT: An Open-Source Web-Based Open-Science Platform",
year = "2017",
month = 4,
archivePrefix = "arXiv",
eprint = "1704.02319",
primaryClass = "cs-se",
doi = {10.48550/arXiv.1704.02319},
url = "https://arxiv.org/abs/1704.02319",
abstract = "With the increased interest in computational sciences, machine learning (ML), pattern recognition (PR) and big data, governmental agencies, academia and manufacturers are overwhelmed by the constant influx of new algorithms and techniques promising improved performance, generalization and robustness. Sadly, result reproducibility is often an overlooked feature accompanying original research publications, competitions and benchmark evaluations. The main reasons behind such a gap arise from natural complications in research and development in this area: the distribution of data may be a sensitive issue; software frameworks are difficult to install and maintain; Test protocols may involve a potentially large set of intricate steps which are difficult to handle. Given the raising complexity of research challenges and the constant increase in data volume, the conditions for achieving reproducible research in the domain are also increasingly difficult to meet.
To bridge this gap, we built an open platform for research in computational sciences related to pattern recognition and machine learning, to help on the development, reproducibility and certification of results obtained in the field. By making use of such a system, academic, governmental or industrial organizations enable users to easily and socially develop processing toolchains, re-use data, algorithms, workflows and compare results from distinct algorithms and/or parameterizations with minimal effort. This article presents such a platform and discusses some of its key features, uses and limitations. We overview a currently operational prototype and provide design insights.",
}
%%%%%%%%%%%%
%% 2016 %%
%%%%%%%%%%%%
@incollection{face-spoof-2016,
author = "Chingovska, Ivana and Erdogmus, Nesli and Anjos, Andr{\'{e}} and Marcel, S{\'{e}}bastien",
month = 2,
title = "Face Recognition Systems Under Spoofing Attacks",
booktitle = "Face Recognition Systems Under Spoofing Attacks",
edition = "1st edition",
chapter = "8",
year = "2016",
pages = "165--194",
publisher = "Springer International Publishing",
doi = "10.1007/978-3-319-28501-6_8",
addendum = {(Issued from Ph.D co-supervision)},
abstract = "In this chapter, we give an overview of spoofing attacks and spoofing countermeasures for face recognition systems , with a focus on visual spectrum systems (VIS) in 2D and 3D, as well as near-infrared (NIR) and multispectral systems . We cover the existing types of spoofing attacks and report on their success to bypass several state-of-the-art face recognition systems. The results on two different face spoofing databases in VIS and one newly developed face spoofing database in NIR show that spoofing attacks present a significant security risk for face recognition systems in any part of the spectrum. The risk is partially reduced when using multispectral systems. We also give a systematic overview of the existing anti-spoofing techniques, with an analysis of their advantages and limitations and prospective for future work.",
}
@article{ieee-access-2016,
author = {Morales, Aythami AND Fierrez, Julian AND Tolosana, Ruben AND Ortega-Garcia, Javier AND Galbally, Javier AND Gomez-Barrero, Marta AND Anjos, Andr{\'{e}} AND Marcel, S{\'{e}}bastien},
month = 11,
title = "Keystroke Biometrics Ongoing Competition",
journal = "IEEE Access",
volume = "4",
year = "2016",
pages = "7736--7746",
issn = "2169-3536",
doi = "10.1109/ACCESS.2016.2626718",
pdf = "https://www.idiap.ch/~aanjos/papers/ieee-access-2016.pdf",
abstract = "This paper presents the first Keystroke Biometrics Ongoing Competition (KBOC) organized to establish a reproducible baseline in person authentication using keystroke biometrics. The competition has been developed using the BEAT platform and includes one of the largest keystroke databases publicly available based on a fixed text scenario. The database includes genuine and attacker keystroke sequences from 300 users acquired in 4 different sessions distributed in a four month time span. The sequences correspond to the user's name and surname and therefore each user comprises an individual and personal sequence. As baseline for KBOC we report the results of 31 different algorithms evaluated according to performance and robustness. The systems have achieved EERs as low as 5.32\% and high robustness against multisession variability with drop of performances lower than 1\% for probes separated by months. The entire database is publicly available at the competition website.",
}
%%%%%%%%%%%%
%% 2015 %%
%%%%%%%%%%%%
@article{tifs-2015,
author = "Chingovska, Ivana and Anjos, Andr{\'{e}}",
keywords = "Biometric Verification, Counter-Measures, Counter-Spoofing, Liveness Detection, Replay, Spoofing Attacks",
title = "On the use of client identity information for face anti-spoofing",
journal = "IEEE Transactions on Information Forensics and Security, Special Issue on Biometric Anti-spoofing",
addendum = {(Issued from Ph.D co-supervision)},
volume = "10",
number = "4",
month = 2,
year = "2015",
pages = "787--796",
doi = "10.1109/TIFS.2015.2400392",
pdf = "https://www.idiap.ch/~aanjos/papers/tifs-2015.pdf",
abstract = "With biometrics playing the role of a password which can not be replaced if stolen, the necessity of establishing counter-measures to biometric spoofing attacks has been recognized. Regardless of the biometric mode, the typical approach of anti-spoofing systems is to classify biometric evidence based on features discriminating between real accesses and spoofing attacks. For the first time, to the best of our knowledge, this paper studies the amount of client-specific information within these features and how it affects the performance of anti-spoofing systems. We make use of this information to build two client-specific anti-spoofing solutions, one relying on a generative and another one on a discriminative paradigm. The proposed methods, tested on a set of state-of-the-art anti-spoofing features for the face mode, outperform the client-independent approaches with up to 50\% relative improvement and exhibit better generalization capabilities on unseen types of spoofing attacks.",
}
%%%%%%%%%%%%
%% 2014 %%
%%%%%%%%%%%%
@incollection{eob-2014,
author = "Chingovska, Ivana and Anjos, Andr{\'{e}} and Marcel, S{\'{e}}bastien",
editor = "Z.Li, Stan and Jain, Anil",
title = "Anti-spoofing: Evaluation Methodologies",
booktitle = "Encyclopedia of Biometrics",
edition = "2nd edition",
year = "2014",
publisher = "Springer US",
isbn = "978-3-642-27733-7",
doi = "10.1007/978-3-642-27733-7",
addendum = {(Issued from Ph.D co-supervision)},
abstract = "Following the definition of the task of the anti-spoofing systems to discriminate between real accesses and spoofing attacks, anti-spoofing can be regarded as a binary classification problem. The spoofing databases and the evaluation methodologies for anti-spoofing systems most often comply to the standards for binary classification problems. However, the anti-spoofing systems are not destined to work stand-alone, and their main purpose is to protect a verification system from spoofing attacks. In the process of combining the decision of an anti-spoofing and a recognition system, effects on the recognition performance can be expected. Therefore, it is important to analyze the problem of anti-spoofing under the umbrella of biometric recognition systems. This brings certain requirements in the database design, as well as adapted concepts for evaluation of biometric recognition systems under spoofing attacks.",
}
@incollection{eob-2014-2,
author = "Anjos, Andr{\'{e}} AND Chingovska, Ivana AND Marcel, S{\'{e}}bastien",
editor = "Z.Li, Stan AND Jain, Anil",
title = "Anti-Spoofing: Face Databases",
booktitle = "Encyclopedia of Biometrics",
edition = "2nd edition",
year = "2014",
publisher = "Springer US",
isbn = "978-3-642-27733-7",
doi = "10.1007/978-3-642-27733-7_9212-2",
addendum = {(Issued from Ph.D co-supervision)},
abstract = "Datasets for the evaluation of face verification system vulnerabilities to spoofing attacks and for the evaluation of face spoofing countermeasures.",
}
@article{tifs-2014,
author = "Ivana Chingovska AND André Anjos AND Sébastien Marcel",
title = "Biometrics Evaluation Under Spoofing Attacks",
journal = "IEEE Transactions on Information, Forensics and Security",
year = "2014",
month = 8,
volume = "9",
number = "12",
doi = "10.1109/TIFS.2014.2349158",
pdf = "https://www.idiap.ch/~aanjos/papers/tifs-2014.pdf",
addendum = {(Issued from Ph.D co-supervision)},
abstract = "While more accurate and reliable than ever, the trustworthiness of biometric verification systems is compromised by the emergence of spoofing attacks. Responding to this threat, numerous research publications address isolated spoofing detection, resulting in efficient counter-measures for many biometric modes. However, an important, but often overlooked issue regards their engagement into a verification task and how to measure their impact on the verification systems themselves. A novel evaluation framework for verification systems under spoofing attacks, called Expected Performance and Spoofability (EPS) framework, is the major contribution of this paper. Its purpose is to serve for an objective comparison of different verification systems with regards to their verification performance and vulnerability to spoofing, taking into account the system’s application-dependent susceptibility to spoofing attacks and cost of the errors. The convenience of the proposed open-source framework is demonstrated for the face mode, by comparing the security guarantee of four baseline face verification systems before and after they are secured with anti-spoofing algorithms.",
}
@incollection{hopad-2014,
title = "Face Anti-Spoofing: Visual Approach",
author = "André Anjos AND Jukka Komulainen AND Sébastien Marcel AND Abdenour Hadid and Matti Pietikainen",
editor = "Marcel, S{\'{e}}bastien AND Nixon, Mark AND Z.Li, Stan",
chapter = "4",
booktitle = "Handbook of Biometric Anti-Spoofing",
publisher = "Springer-Verlag",
pages = "65--82",
year = "2014",
doi = {10.1007/978-1-4471-6524-8_4},
abstract = "User authentication is an important step to protect information and in this regard face biometrics is advantageous. Face biometrics is natural, easy to use and less human-invasive. Unfortunately, recent work revealed that face biometrics is quite vulnerable to spoofing attacks. This chapter presents the different modalities of attacks to visual spectrum face recognition systems. We introduce public datasets for the evaluation of vulnerability of recognition systems and performance of counter-measures. Finally, we build a comprehensive view of anti-spoofing techniques for visual spectrum face recognition and provide an outlook of issues that remain unaddressed.",
}
@incollection{hopad-2014-2,
title = "Evaluation Methodologies",
author = "Ivana Chingovska AND André Anjos AND Sébastien Marcel",
editor = "Marcel, S{\'{e}}bastien AND Nixon, Mark AND Z.Li, Stan",
chapter = "10",
pages = "185--204",
booktitle = "Handbook of Biometric Anti-Spoofing",
publisher = "Springer-Verlag",
year = "2014",
doi = "10.1007/978-1-4471-6524-8_10",
addendum = {(Issued from Ph.D co-supervision)},
abstract = "Following the definition of the task of the anti-spoofing systems to discriminate between real accesses and spoofing attacks, anti-spoofing can be regarded as a binary classification problem. The spoofing databases and the evaluation methodologies for anti-spoofing systems most often comply to the standards for binary classification problems. However the anti-spoofing systems are not destined to work stand-alone, and their main purpose is to protect a verification system from spoofing attacks. In the process of combining the decision of an anti-spoofing and a recognition system, effects on the recognition performance can be expected. Therefore, it is important to analyze the problem of anti-spoofing under the umbrella of biometric recognition systems. This brings certain requirements in the database design, as well as adapted concepts for evaluation of biometric recognition systems under spoofing attacks.",
}
@incollection{hopad-2014-3,
author = "Z.Li, Stan AND Galbally, Javier AND Anjos, Andr{\'{e}} AND Marcel, S{\'{e}}bastien",
editor = "Marcel, S{\'{e}}bastien AND Nixon, Mark AND Z.Li, Stan",
title = "Evaluation Databases",
booktitle = "Handbook of Biometric Anti-Spoofing",
chapter = "Appendix A",
year = "2014",
pages = "247--278",
publisher = "Springer-Verlag",
isbn = "978-1-4471-6523-1",
doi = "10.1007/978-1-4471-6524-8",
}
@article{eurasip-2014,
author = "de Freitas Pereira, Tiago AND Komulainen, Jukka AND Anjos, André AND De Martino, José Mario AND Hadid, Abdenour AND Pietikainen, Matti and Marcel, Sébastien",
title = "Face liveness detection using dynamic texture",
journal = "EURASIP Journal on Image and Video Processing",
year = "2014",
month = 1,
doi = "10.1186/1687-5281-2014-2",
volume = "2014:2",
pdf = "https://www.idiap.ch/~aanjos/papers/eurasip-2014.pdf",
abstract = "User authentication is an important step to protect information, and in this context, face biometrics is potentially advantageous. Face biometrics is natural, intuitive, easy to use, and less human-invasive. Unfortunately, recent work has revealed that face biometrics is vulnerable to spoofing attacks using cheap low-tech equipment. This paper introduces a novel and appealing approach to detect face spoofing using the spatiotemporal (dynamic texture) extensions of the highly popular local binary pattern operator. The key idea of the approach is to learn and detect the structure and the dynamics of the facial micro-textures that characterise real faces but not fake ones. We evaluated the approach with two publicly available databases (Replay-Attack Database and CASIA Face Anti-Spoofing Database). The results show that our approach performs better than state-of-the-art techniques following the provided evaluation protocols of each database.",
}
%%%%%%%%%%%%
%% 2013 %%
%%%%%%%%%%%%
@article{iet-biometrics-2013,
author = "André Anjos AND Murali Mohan Chakka AND Sébastien Marcel",
title = "Motion-Based Counter-Measures to Photo Attacks in Face Recognition",
journal = "IET Biometrics",
year = "2013",
month = 7,
pdf = "https://www.idiap.ch/~aanjos/papers/iet-biometrics-2013.pdf",
doi = "10.1049/iet-bmt.2012.0071",
abstract = "Identity spoofing is a contender for high-security face recognition applications. With the advent of social media and globalized search, our face images and videos are wide-spread on the internet and can be potentially used to attack biometric systems without previous user consent. Yet, research to counter these threats is just on its infancy - we lack public standard databases, protocols to measure spoofing vulnerability and baseline methods to detect these attacks. The contributions of this work to the area are three-fold: firstly we introduce a publicly available PHOTO-ATTACK database with associated protocols to measure the effectiveness of counter-measures. Based on the data available, we conduct a study on current state-of-the-art spoofing detection algorithms based on motion analysis, showing they fail under the light of these new dataset. By last, we propose a new technique of counter-measure solely based on foreground/background motion correlation using Optical Flow that outperforms all other algorithms achieving nearly perfect scoring with an equal-error rate of 1.52\% on the available test data. The source code leading to the reported results is made available for the replicability of findings in this article.",
}
@inproceedings{cvpr-bw-2013,
author = "Ivana Chingovska AND André Anjos AND Sébastien Marcel",
title = "Anti-spoofing in action: joint operation with a verification system",
booktitle = "Computer Vision and Pattern Recognition Conference - Biometrics Workshop",
year = "2013",
doi = "10.1109/CVPRW.2013.22",
month = 6,
pdf = "https://www.idiap.ch/~aanjos/papers/cvpr-bw-2013.pdf",
abstract = "Besides the recognition task, today’s biometric systems need to cope with additional problem: spoofing attacks. Up to date, academic research considers spoofing as a binary classification problem: systems are trained to discriminate between real accesses and attacks. However, spoofing counter-measures are not designated to operate stand-alone, but as a part of a recognition system they will protect. In this paper, we study techniques for decisionlevel and score-level fusion to integrate a recognition and anti-spoofing systems, using an open-source framework that handles the ternary classification problem (clients, impostors and attacks) transparently. By doing so, we are able to report the impact of different spoofing counter-measures, fusion techniques and thresholding on the overall performance of the final recognition system. For a specific use case covering face verification, experiments show to what extent simple fusion improves the trustworthiness of the system when exposed to spoofing attacks.",
}
@inproceedings{icb-2013-1,
author = "Tiago de Freitas Pereira AND André Anjos AND José Mario De Martino and Sébastien Marcel",
title = "Can face anti-spoofing countermeasures work in a real world scenario?",
booktitle = "International Conference on Biometrics 2013",
month = 6,
year = "2013",
doi = "10.1109/ICB.2013.6612981",
pdf = "https://www.idiap.ch/~aanjos/papers/icb-2013-1.pdf",
abstract = "User authentication is an important step to protect in- formation and in this field face biometrics is advantageous. Face biometrics is natural, easy to use and less human-invasive. Unfortunately, recent work has revealed that face biometrics is vulnerable to spoofing attacks using low-tech equipments. This article assesses how well existing face anti-spoofing countermeasures can work in a more realistic condition. Experiments carried out with two freely available video databases (Replay Attack Database and CASIA Face Anti-Spoofing Database) show low generalization and possible database bias in the evaluated countermeasures. To generalize and deal with the diversity of attacks in a real world scenario we introduce two strategies that show promising results.",
}
@inproceedings{icb-2013-2,
author = "Jukka Komulainen AND Abdenour Hadid AND Matti Pietikäinen AND André Anjos AND Sébastien Marcel",
title = "Complementary Countermeasures for Detecting Scenic Face Spoofing Attacks",
booktitle = "International Conference on Biometrics 2013",
month = 6,
year = "2013",
pdf = "https://www.idiap.ch/~aanjos/papers/icb-2013-2.pdf",
doi = "10.1109/ICB.2013.6612968",
abstract = "The face recognition community has finally started paying more attention to the long-neglected problem of spoofing attacks. The number of countermeasures is gradually increasing and fairly good results have been reported on the publicly available databases. There exists no superior anti-spoofing technique due to the varying nature of attack scenarios and acquisition conditions. Therefore, it is important to find out complementary countermeasures and study how they should be combined in order to construct an easily extensible anti-spoofing framework. In this paper, we address this issue by studying fusion of motion and texture based countermeasures under several types of scenic face attacks. We provide an intuitive way to explore the fusion potential of different visual cues and show that the performance of the individual methods can be vastly improved by performing fusion at score level. The Half-Total Error Rate (HTER) of the best individual countermeasure was decreased from 11.2\% to 5.1\% on the Replay Attack Database. More importantly, we question the idea of using complex classification schemes in individual countermeasures, since nearly same fusion performance is obtained by replacing them with a simple linear one. In this manner, the computational efficiency and also probably the generalization ability of the resulting anti-spoofing framework are increased.",
}
@inproceedings{icb-2013-3,
author = "I. Chingovska AND J. Yang AND Z. Lei AND D. Yi AND S. Z. Li AND O. Kähm AND C. Glaser AND N. Damer AND A. Kuijper AND A. Nouak AND J. Komulainen AND T. Pereira AND S. Gupta AND S. Khandelwal AND S. Bansal AND A. Rai AND T. Krishna AND D. Goyal AND M.-A. Waris AND H. Zhang AND I. Ahmad AND S. Kiranyaz AND M. Gabbouj AND R. Tronci AND M. Pili AND N. Sirena AND F. Roli AND J. Galbally AND J. Fierrez AND A. Pinto AND H. Pedrini AND W. S. Schwartz AND A. Rocha AND A. Anjos AND S. Marcel",
title = "The 2nd Competition on Counter Measures to 2D Face Spoofing Attacks",
booktitle = "International Conference on Biometrics 2013",
month = 6,
year = "2013",
pdf = "https://www.idiap.ch/~aanjos/papers/icb-2013-3.pdf",
doi = "10.1109/ICB.2013.6613026",
abstract = "As a crucial security problem, anti-spoofing in biometrics, and particularly for the face modality, has achieved great progress in the recent years. Still, new threats arrive in form of better, more realistic and more sophisticated spoofing attacks. The objective of the 2nd Competition on Counter Measures to 2D Face Spoofing Attacks is to challenge researchers to create counter measures effectively detecting a variety of attacks. The submitted propositions are evaluated on the Replay-Attack database and the achieved results are presented in this paper.",
}
%%%%%%%%%%%%
%% 2012 %%
%%%%%%%%%%%%
@inproceedings{accv-2012,
author = "Tiago de Freitas Pereira AND André Anjos AND José Mario De Martino AND Sébastien Marcel",
title = "LBP-TOP based countermeasure against facial spoofing attacks",
booktitle = "International Workshop on Computer Vision With Local Binary Pattern Variants",
year = "2012",
doi = "10.1007/978-3-642-37410-4_11",
pdf = "https://www.idiap.ch/~aanjos/papers/accv-2012.pdf",
abstract = "User authentication is an important step to protect informa- tion and in this field face biometrics is advantageous. Face biometrics is natural, easy to use and less human-invasive. Unfortunately, recent work has revealed that face biometrics is vulnerable to spoofing attacks using low-tech cheap equipments. This article presents a countermeasure against such attacks based on the LBP−TOP operator combining both space and time information into a single multiresolution texture descrip- tor. Experiments carried out with the REPLAY ATTACK database show a Half Total Error Rate (HTER) improvement from 15.16\% to 7.60\%.",
}
@inproceedings{acmmm-2012,
author = "André Anjos AND Laurent El Shafey AND Roy Wallace AND Manuel Günther AND Chris McCool AND Sébastien Marcel",
title = "Bob: a free signal processing and machine learning toolbox for researchers",
booktitle = "ACM Multimedia 2012",
year = "2012",
pages = "1449--1452",
pdf = "https://www.idiap.ch/~aanjos/papers/acmmm-2012.pdf",
doi = "10.1145/2393347.2396517",
abstract = "Bob is a free signal processing and machine learning toolbox originally developed by the Biometrics group at Idiap Research Institute, Switzerland. The toolbox is designed to meet the needs of researchers by reducing development time and efficiently processing data. Firstly, Bob provides a researcher-friendly Python environment for rapid development. Secondly, efficient processing of large amounts of multimedia data is provided by fast C++ implementations of identified bottlenecks. The Python environment is integrated seamlessly with the C++ library, which ensures the library is easy to use and extensible. Thirdly, Bob supports reproducible research through its integrated experimental protocols for several databases. Finally, a strong emphasis is placed on code clarity, documentation, and thorough unit testing. Bob is thus an attractive resource for researchers due to this unique combination of ease of use, efficiency, extensibility and transparency. Bob is an open-source library and an ongoing community effort.",
}
@inproceedings{biosig-2012,
author = "Ivana Chingovska AND André Anjos AND Sébastien Marcel",
title = "On the Effectiveness of Local Binary Patterns in Face Anti-spoofing",
booktitle = "IEEE International Conference of the Biometrics Special Interest Group",
year = "2012",
pdf = "https://www.idiap.ch/~aanjos/papers/biosig-2012.pdf",
isbn = "978-3-88579-290-1",
abstract = "Spoofing attacks are one of the security traits that biometric recognition systems are proven to be vulnerable to. When spoofed, a biometric recognition system is bypassed by presenting a copy of the biometric evidence of a valid user. Among all biometric modalities, spoofing a face recognition system is particularly easy to perform: all that is needed is a simple photograph of the user. In this paper, we address the problem of detecting face spoofing attacks. In particular, we inspect the potential of texture features based on Local Binary Patterns (LBP) and their variations on three types of attacks: printed photographs, and photos and videos displayed on electronic screens of different sizes. For this purpose, we introduce REPLAY-ATTACK, a novel publicly available face spoofing database which contains all the mentioned types of attacks. We conclude that LBP, with ~15\% Half Total Error Rate, show moderate discriminability when confronted with a wide set of attack types.",
}
%%%%%%%%%%%%
%% 2011 %%
%%%%%%%%%%%%
@inproceedings{ijcb-2011-2,
author = "André Anjos AND Sébastien Marcel",
title = "Counter-Measures to Photo Attacks in Face Recognition: a public database and a baseline",
booktitle = "International Joint Conference on Biometrics 2011",
year = "2011",
month = 10,
pdf = "https://www.idiap.ch/~aanjos/papers/ijcb-2011-2.pdf",
doi = "10.1109/IJCB.2011.6117503",
abstract = "A common technique to by-pass 2-D face recognition systems is to use photographs of spoofed identities. Unfortunately, research in counter-measures to this type of attack have not kept-up - even if such threats have been known for nearly a decade, there seems to exist no consensus on best practices, techniques or protocols for developing and testing spoofing-detectors for face recognition. We attribute the reason for this delay, partly, to the unavailability of public databases and protocols to study solutions and compare results. To this purpose we introduce the publicly available PRINT-ATTACK database and exemplify how to use its companion protocol with a motion-based algorithm that detects correlations between the person\'s head movements and the scene context. The results are to be used as basis for comparison to other counter-measure techniques. The PRINT-ATTACK database contains 200 videos of real-accesses and 200 videos of spoof attempts using printed photographs of 50 different identities.",
}
@inproceedings{ijcb-2011,
author = "Murali Mohan Chakka AND André Anjos AND Sébastien Marcel AND others",
title = "Competition on Counter Measures to 2-D Facial Spoofing Attacks",
booktitle = "International Joint Conference on Biometrics 2011",
year = "2011",
month = 10,
doi = "10.1109/IJCB.2011.6117509",
pdf = "https://www.idiap.ch/~aanjos/papers/ijcb-2011.pdf",
abstract = "Spoofing identities using photographs is one of the most common techniques to attack 2-D face recognition systems. There seems to exist no comparative studies of different techniques using the same protocols and data. The motivation behind this competition is to compare the performance of different state-of-the-art algorithms on the same database using a unique evaluation method. Six different teams from universities around the world have participated in the contest. Use of one or multiple techniques from motion, texture analysis and liveness detection appears to be the common trend in this competition. Most of the algorithms are able to clearly separate spoof attempts from real accesses. The results suggest the investigation of more complex attacks.",
}
%%%%%%%%%%%%
%% 2009 %%
%%%%%%%%%%%%
@article{cpc-2009,
author = "Torres, R.C. AND Anjos, A. AND Seixas, J.M.",
title = "Automatizing the Online Filter Test Management for a General-Purpose Particle Detector",
journal = "Computer Physics Communications",
year = "2009",
month = 10,
doi = "10.1016/j.cpc.2010.10.003",
OPTvolume = "",
OPTnumber = "",
OPTpages = "",
pdf = "https://www.idiap.ch/~aanjos/papers/cpc-2009.pdf",
abstract = "This paper presents a software environment to automatically configure and run online triggering and dataflow farms for the ATLAS experiment at the Large Hadron Collider (LHC). It provides support for a broad set of users, with distinct knowledge about the online triggering system, ranging from casual testers to final system deployers. This level of automatization improves the overall ATLAS TDAQ work flow for software and hardware tests and speeds-up system modifications and deployment.",
}
@inproceedings{nima-2010,
author = "The ATLAS Collaboration",
title = "ATLAS Trigger and Data Acquisition: capabilities and commissioning",
year = "2010",
volume = "617",
number = "1",
pages = "306--309",
pdf = "https://www.idiap.ch/~aanjos/papers/nima-2010.pdf",
booktitle = "11th Pisa Meeting on Advanced Detectors",
issn = "0168-9002",
doi = "10.1016/j.nima.2009.06.114",
abstract = "The ATLAS trigger system is based on three levels of event selection that selects the physics of interest from an initial bunch crossing rate of 40~MHz to an output rate of sim200~Hz compatible with the offline computing power and storage capacity. During nominal LHC operations at a luminosity of 1034~cm−2s−1, decisions must be taken every 25~ns. The LHC is expected to begin operations with a peak luminosity of 1031~cm−2s−1 with far fewer number of bunches, but quickly ramp up to higher luminosities. Hence, the ATLAS Trigger and Data Acquisition system needs to adapt to the changing beam conditions preserving the interesting physics and detector requirements that may vary with these conditions.",
}
@inproceedings{chep-2009-2,
author = "The ATLAS Collaboration",
title = "Commissioning of the ATLAS High Level Trigger with Single Beam and Cosmic Rays",
booktitle = "Computing in High Energy and Nuclear Physics, Prague, Czech Republic, 21 - 27 Mar 2009",
year = "2009",
OPTvolume = "",
OPTnumber = "",
OPTpages = "",
pdf = "https://www.idiap.ch/~aanjos/papers/chep-2009-2.pdf",
abstract = "ATLAS is one of the two general-purpose detectors at the Large Hadron Collider (LHC). The trigger system is responsible for making the online selection of interesting collision events. At the LHC design luminosity of 10^34 cm^-2s^-1 it will need to achieve a rejection factor of the order of 10^-7 against random proton-proton interactions, while selecting with high efficiency events that are needed for physics analyses. After a first processing level using custom electronics based on FPGAs and ASICs, the trigger selection is made by software running on two processor farms, containing a total of around two thousand multi-core machines. This system is known as the High Level Trigger (HLT). To reduce the network data traffic and the processing time to manageable levels, the HLT uses seeded, step-wise reconstruction, aiming at the earliest possible rejection of background events. The recent LHC startup and short single-beam run provided a \'stress test\' of the system and some initial calibration data. Following this period, ATLAS continued to collect cosmic-ray events for detector alignment and calibration purposes. After giving an overview of the trigger design and its innovative features, this paper focuses on the experience gained from operating the ATLAS trigger with single LHC beams and cosmic-rays.",
}
@inproceedings{chep-2009,
author = "The ATLAS Collaboration",
title = "The ATLAS online High Level Trigger framework: experience reusing offline software components in the ATLAS trigger",
booktitle = "Computing in High Energy and Nuclear Physics, Prague, Czech Republic, 21 - 27 Mar 2009",
year = "2009",
OPTvolume = "",
OPTnumber = "",
OPTpages = "",
pdf = "https://www.idiap.ch/~aanjos/papers/chep-2009.pdf",
abstract = "Event selection in the Atlas High Level Trigger is accomplished to a large extent by reusing software components and event selection algorithms developed and tested in an offline environment. Many of these offline software modules are not specifically designed to run in a heavily multi-threaded online data flow environment. The Atlas High Level Trigger (HLT) framework based on the Gaudi and Atlas Athena frameworks, forms the interface layer, which allows the execution of the HLT selection and monitoring code within the online run control and data flow software. While such an approach provides a unified environment for trigger event selection across all of Atlas, it also poses strict requirements on the reused software components in terms of performance, memory usage and stability. Experience of running the HLT selection software in the different environments and especially on large multi-node trigger farms has been gained in several commissioning periods using preloaded Monte Carlo events, in data taking periods with cosmic events and in a short period with proton beams from LHC. The contribution discusses the architectural aspects of the HLT framework, its performance and its software environment within the Atlas computing, trigger and data flow projects. Emphasis is also put on the architectural implications for the software by the use of multi-core processors in the computing farms and the experiences gained with multi-threading and multi-process technologies.",
}
@inproceedings{tipp-2009,
author = "The ATLAS Collaboration",
title = "Configuration and Control of the ATLAS Trigger and Data Acquisition",
booktitle = "The 1st international conference on Technology and Instrumentation in Particle Physics",
year = "2009",
OPTvolume = "",
OPTnumber = "",
OPTpages = "",
pdf = "https://www.idiap.ch/~aanjos/papers/tipp-2009.pdf",
abstract = "ATLAS is the biggest of the experiments aimed at studying high-energy particle interactions at the Large Hadron Collider (LHC). This paper describes the evolution of the Controls and Configuration system of the ATLAS Trigger and Data Acquisition (TDAQ) from the Technical Design Report (TDR) in 2003 to the first events taken at CERN with circulating beams in autumn 2008. The present functionality and performance and the lessons learned during the development are outlined. At the end we will also highlight some of the challenges which still have to be met by 2010, when the full scale of the trigger farm will be deployed.",
}
@inproceedings{lhc-2009,
author = "The ATLAS Collaboration",
title = "Atlas trigger for first physics and beyond",
booktitle = "Physics at LHC 2008 29 September - October 4, 2008 Split, Croatia",
year = "2009",
OPTvolume = "",
OPTnumber = "",
OPTpages = "",
pdf = "https://www.idiap.ch/~aanjos/papers/lhc-2009.pdf",
abstract = "ATLAS is a multi-purpose spectrometer built to perform precision measurements of Standard Model parameters and is aiming at discovery of Higgs particle, Super Symmetry and possible other physics channels beyond Standard Model. Operating at 14 TeV center of mass energy ATLAS will see 40 million events per second at nominal luminosity with about 25 overlapping interactions. Most of the events are inelastic proton-proton interactions with only few W, Z bosons or ttbar pairs produced each second, and expectations for Higgs or SUSY production cross-section are much smaller than that. ATLAS trigger has a difficult task to select one out of 10 5 events online and to ensure that most physics channels of interests are preserved for analysis. In this talk we will review the design of ATLAS trigger system, the trigger menu prepared for initial LHC run as well as for high luminosity run. The expected trigger performance of the base-line ATLAS physics programs will be reviewed and first results from the commissioning period will be given. The methods to measure trigger efficiencies and biases directly from data will be discussed.",
}
@inproceedings{acat-2009,
author = "The ATLAS Collaboration",
title = "ATLAS Trigger Status and Results From Commissioning Operations",
booktitle = "Advanced Computing on High-Energy Physics 2008, Erice, Sicily, Italy",
year = "2009",
OPTvolume = "",
OPTnumber = "",
OPTpages = "",
pdf = "https://www.idiap.ch/~aanjos/papers/acat-2009.pdf",
abstract = "The ATLAS trigger system is designed to select rare physics processes of interest from an extremely high rate of proton-proton collisions, reducing the LHC incoming rate of about 107. The short LHC bunch crossing period of 25 ns and the large background of soft-scattering events overlapped in each bunch crossing pose serious challenges, both on hardware and software, that the ATLAS trigger must overcome in order to efficiently select interesting events. The ATLAS trigger consists of hardware based Level-1, and a two-level software based High-Level Trigger (HLT). Data bandwidth and processing times in the higher level triggers are reduced by region of interest guidance in the HLT reconstruction steps. High flexibility is critical in order to adapt to the changing luminosity, backgrounds and physics goals. It is achieved by the use of inclusive trigger menus and modular software design. Selection algorithms have been developed which provide the required elasticity to detect different physics signatures and to control the trigger rates. In this paper an overview of the ATLAS trigger design, status and expected performance, as well as the results from the on-going commissioning with cosmic rays and first LHC beams, is presented.",
}
%%%%%%%%%%%%
%% 2008 %%
%%%%%%%%%%%%
@inproceedings{talk-2008,
author = "André Anjos",
title = "Trigger Systems",
booktitle = "Experimental High-Energy Physics and Associated Technologies Workshop",
year = "2008",
OPTvolume = "",
OPTnumber = "",
OPTpages = "",
pdf = "https://www.idiap.ch/~aanjos/papers/talk-2008.pptx",
abstract = "This is an invited talk. The contents were based on the fundamentals of Triggering System in High-Energy Physics experiments.",
}
@inproceedings{acat-2008,
author = "André Anjos on behalf of the ATLAS Collaboration",
title = "The DAQ/HLT system of the ATLAS experiment",
booktitle = "International Workshop on Advanced Computing and Analysis Techniques in Physics Research",
year = "2008",
OPTvolume = "",
OPTnumber = "",
OPTpages = "",
pdf = "https://www.idiap.ch/~aanjos/papers/acat-2008.pdf",
abstract = "The DAQ/HLT system of the ATLAS experiment at CERN, Switzerland, is being commissioned for first collisions in 2009. Presently, the system is composed of an already very large farm of computers that accounts for about one-third of its event processing capacity. Event selection is conducted in two steps after the hardware-based Level-1 Trigger: a Level-2 Trigger processes detector data based on regions of interest (RoI) and an Event Filter operates on the full event data assembled by the Event Building system. The detector readout is fully commissioned and can be operated at its full design capacity. This places on the High-Level Triggers system the responsibility to maximize the quality of data that will finally reach the offline reconstruction farms. This paper brings an overview of the current ATLAS DAQ/HLT implementation and performance based on studies originated from its operation with simulated, cosmic particles and first-beam data. Its built-in event processing parallelism is discussed for both HLT levels as well as an outlook of options to improve it.",
}
@inproceedings{iprd-2008,
author = "The ATLAS Collaboration",
title = "Readiness of the ATLAS Trigger and Data Acquisition system for the first LHC beams",
booktitle = "11th Topical Seminar On Innovative Particle And Radiation Detectors, Siena, Italy",
year = "2008",
OPTvolume = "",
OPTnumber = "",
OPTpages = "",
pdf = "https://www.idiap.ch/~aanjos/papers/iprd-2008.pdf",
abstract = "The ATLAS Trigger and Data Acquisition (TDAQ) system is based on O(2k) processing nodes, interconnected by a multi-layer Gigabit network, and consists of a combination of custom electronics and commercial products. In its final configuration, O(20k) applications will provide the needed capabilities in terms of event selection, data flow, local storage and data monitoring. In preparation for the first LHC beams, many TDAQ sub-systems already reached the final configuration and roughly one third of the final processing power has been deployed. Therefore, the current system allows for a sensible evaluation of the performance and scaling properties. In this paper we introduce the ATLAS TDAQ system requirements and architecture and we discuss the status of software and hardware component. We moreover present the results of performance measurements validating the system design and providing a figure for the ATLAS data acquisition capabilities in the initial data taking period.",
}
@techreport{cern-2008,
author = "The ATLAS Collaboration",
title = "Expected Performance of the ATLAS Experiment Detector, Trigger, Physics",
institution = "CERN Open Documentation",
year = "2008",
OPTvolume = "",
number = "2008--020",
pdf = "https://www.idiap.ch/~aanjos/papers/cern-2008.pdf",
abstract = "A detailed study is presented of the expected performance of the ATLAS detector. The reconstruction of tracks, leptons, photons, missing energy and jets is investigated, together with the performance of b-tagging and the trigger. The physics potential for a variety of interesting physics processes, within the Standard Model and beyond, is examined. The study comprises a series of notes based on simulations of the detector and physics processes, with particular emphasis given to the data expected from the first years of operation of the LHC at CERN.",
}
@article{jinst-2008,
author = "The ATLAS Collaboration",
title = "The ATLAS Experiment at the CERN Large Hadron Collider",
journal = "Journal of Instrumentation",
year = "2008",
month = 8,
OPTvolume = "",
number = "S08003",
OPTpages = "",
pdf = "https://www.idiap.ch/~aanjos/papers/jinst-2008.pdf",
abstract = "The ATLAS detector as installed in its experimental cavern at point 1 at CERN is described in this paper. A brief overview of the expected performance of the detector when the Large Hadron Collider begins operation is also presented.",
}
%%%%%%%%%%%%
%% 2007 %%
%%%%%%%%%%%%
@inproceedings{nss-2007,
author = "The ATLAS Collaboration",
title = "The ATLAS Event Builder",
booktitle = "IEEE Nuclear Science Symposium and Medical Imaging Conference",
year = "2007",
OPTvolume = "",
OPTnumber = "",
OPTpages = "",
pdf = "https://www.idiap.ch/~aanjos/papers/nss-2007.pdf",
abstract = "Event data from proton-proton collisions at the LHC will be selected by the ATLAS experiment in a three-level trigger system, which, at its first two trigger levels (LVL1+LVL2), reduces the initial bunch crossing rate of 40 MHz to ∼3 kHz. At this rate, the Event Builder collects the data from the readout system PCs (ROSs) and provides fully assembled events to the Event Filter (EF). The EF is the third trigger level and its aim is to achieve a further rate reduction to ∼200 Hz on the permanent storage. The Event Builder is based on a farm of O(100).
",
}
@inproceedings{eurochep-2007,
author = "The ATLAS Collaboration",
title = "The ATLAS trigger - high-level trigger commissioning and operation during early data taking",
booktitle = "International Europhysics Conference on High Energy Physics",
year = "2007",
OPTvolume = "",
OPTnumber = "",
OPTpages = "",
pdf = "https://www.idiap.ch/~aanjos/papers/eurochep-2007.pdf",
abstract = "The ATLAS experiment is one of the two general-purpose experiments due to start operation soon at the Large Hadron Collider (LHC). The LHC will collide protons at a centre of mass energy of 14~TeV, with a bunch-crossing rate of 40~MHz. The ATLAS three-level trigger will reduce this input rate to match the foreseen offline storage capability of 100-200~Hz. This paper gives an overview of the ATLAS High Level Trigger focusing on the system design and its innovative features. We then present the ATLAS trigger strategy for the initial phase of LHC exploitation. Finally, we report on the valuable experience acquired through in-situ commissioning of the system where simulated events were used to exercise the trigger chain. In particular we show critical quantities such as event processing times, measured in a large-scale HLT farm using a complex trigger menu.",
}
@inproceedings{chep-2007,
author = "The ATLAS Collaboration",
title = "Alignment data streams for the ATLAS Inner Detector",
booktitle = "Computing for High-Energy Physics",
year = "2007",
OPTvolume = "",
OPTnumber = "",
OPTpages = "",
pdf = "https://www.idiap.ch/~aanjos/papers/chep-2007.pdf",
abstract = "The ATLAS experiment uses a complex trigger strategy to be able to reduce the Event Filter rate output, down to a level that allows the storage and processing of these data. These concepts are described in the ATLAS Computing Model which embraces Grid paradigm. The output coming from the Event Filter consists of four main streams: physical stream, express stream, calibration stream, and diagnostic stream. The calibration stream will be transferred to the Tier-0 facilities that will provide the prompt reconstruction of this stream with a minimum latency of 8 hours, producing calibration constants of sufficient quality to allow a first-pass processing. The Inner Detector community is developing and testing an independent common calibration stream selected at the Event Filter after track reconstruction. It is composed of raw data, in byte-stream format, contained in Readout Buffers (ROBs) with hit information of the selected tracks, and it will be used to derive and update a set of calibration and alignment constants. This option was selected because it makes use of the Byte Stream Converter infrastructure and possibly gives better bandwidth usage and storage optimization. Processing is done using specialized algorithms running in the Athena framework in dedicated Tier-0 resources, and the alignment constants will be stored and distributed using the COOL conditions database infrastructure. This work is addressing in particular the alignment requirements, the needs for track and hit selection, and the performance issues.",
}