diff --git a/doc/mvp.css b/doc/mvp.css new file mode 100644 index 0000000..a5cc5b9 --- /dev/null +++ b/doc/mvp.css @@ -0,0 +1,568 @@ +/* MVP.css v1.7.3 - https://github.com/andybrewer/mvp */ + +:root { + --border-radius: 5px; + --box-shadow: 2px 2px 10px; + --color: darkblue; + --hover-color: blue; + --color-accent: #118bee15; + --color-bg: #fff; + --color-bg-secondary: #e9e9e9; + --color-secondary: darkblue; /*#920de9;*/ + --color-secondary-accent: #920de90b; + --color-shadow: #f4f4f4; + --color-text: #000; + --color-text-secondary: #999; + --font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Oxygen-Sans, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; + --hover-brightness: 1.2; + --justify-important: center; + --justify-normal: left; + --line-height: 1.5; + --width-card: 285px; + --width-card-medium: 460px; + --width-card-wide: 800px; + --width-content: 720px; +} + +/* +@media (prefers-color-scheme: dark) { + :root { + --color: #0097fc; + --color-accent: #0097fc4f; + --color-bg: #333; + --color-bg-secondary: #555; + --color-secondary: #e20de9; + --color-secondary-accent: #e20de94f; + --color-shadow: #bbbbbb20; + --color-text: #f7f7f7; + --color-text-secondary: #aaa; + } +} +*/ + +/* Layout */ +article aside { + background: var(--color-secondary-accent); + border-left: 4px solid var(--color-secondary); + padding: 0.01rem 0.8rem; +} + +body { + background: var(--color-bg); + color: var(--color-text); + font-family: var(--font-family); + line-height: var(--line-height); + margin: 0; + overflow-x: hidden; + padding: 1rem 0; +} + +footer, +header, +main { + margin: 0 auto; + max-width: var(--width-content); + width: calc(100% - 2rem); + padding: .5rem 1rem; +} + +header{ + position: fixed; + top: 0; + left: calc(50% - var(--width-content) / 2); + padding-left: 0; + padding-right: 0; + background-color: white; + border-bottom: solid 1px var(--color-bg-secondary); + z-index: 999; +} + +@media screen and (max-width: 740px) { + header{ + left: 0; + width: 100%; + } + +} + +hr { + background-color: var(--color-bg-secondary); + border: none; + height: 1px; + margin: 4rem 0; + width: 100%; +} + +section { + justify-content: var(--justify-important); +} + +section img, +article img { + max-width: 100%; +} + +section pre { + overflow: auto; +} + +section aside { + border: 1px solid var(--color-bg-secondary); + border-radius: var(--border-radius); + box-shadow: var(--box-shadow) var(--color-shadow); + margin: 1rem; + padding: 1.25rem; + width: var(--width-card); +} + +section aside:hover { + box-shadow: var(--box-shadow) var(--color-bg-secondary); +} + +[hidden] { + display: none; +} + +/* Headers */ +article header, +div header, +main header { + padding-top: 0; +} + +header { + /* text-align: var(--justify-important); */ +} + +header a b, +header a em, +header a i, +header a strong { + margin-left: 0.5rem; + margin-right: 0.5rem; +} + + +header nav > a{ + color: var(--color-text); + padding-left: .5rem +} +header nav img { + margin: 1rem 0; +} + +section header { + padding-top: 0; + width: 100%; +} + +/* Nav */ +nav { + align-items: center; + display: flex; + font-weight: bold; + justify-content: space-between; + /* margin-bottom: 7rem; */ +} + +nav ul { + list-style: none; + padding: 0; +} + +nav ul li { + display: inline-block; + margin: 0 0.5rem; + position: relative; + text-align: left; +} + +/* Nav Dropdown */ +nav ul li:hover ul { + display: block; +} + +nav ul li ul { + background: var(--color-bg); + border: 1px solid var(--color-bg-secondary); + border-radius: var(--border-radius); + box-shadow: var(--box-shadow) var(--color-shadow); + display: none; + height: auto; + right: -2px; + padding: .5rem 1rem; + position: absolute; + top: 1.7rem; + white-space: nowrap; + width: auto; + z-index: 1; +} + +nav ul li ul li.space{ + margin-top:1rem; +} + +nav ul li ul::before { + /* fill gap above to make mousing over them easier */ + content: ""; + position: absolute; + left: 0; + right: 0; + top: -0.5rem; + height: 0.5rem; +} + +nav ul li ul li, +nav ul li ul li a { + display: block; +} + +/* Typography */ +code, +samp { + background-color: var(--color-accent); + border-radius: var(--border-radius); + color: var(--color-text); + display: inline-block; + margin: 0 0.1rem; + padding: 0 0.5rem; +} + +details { + margin: 1.3rem 0; +} + +details summary { + font-weight: bold; + cursor: pointer; +} + +h1, +h2, +h3, +h4, +h5, +h6 { + text-align: left; + line-height: var(--line-height); + padding-top:5rem +} + +h1{ + padding-top: 100px; + margin-bottom: 4rem; + font-size: 150%; +} +h2{ + font-size: 125%; +} +/* h1::before{ + content: "CHAPTER " counter(h1counter) ".\0000a0\0000a0"; + counter-increment: h1counter; + counter-reset: h2counter; +} +h2:before { + content: counter(h1counter) "." counter(h2counter) ".\0000a0\0000a0"; + counter-increment: h2counter; + counter-reset: h3counter; +} +h3:before { + content: counter(h1counter) "." counter(h2counter) "." counter(h3counter) ".\0000a0\0000a0"; + counter-increment: h3counter; +} */ + +/* thanks: http://philarcher.org/diary/2013/headingnumbers/ */ +body {counter-reset: h1} +h1 {counter-reset: h2} +h2 {counter-reset: h3} +h3 {counter-reset: h4} +h4 {counter-reset: h5} +h5 {counter-reset: h6} + +h1:before {counter-increment: h1; content: "CHAPTER "counter(h1) ". " } +h2:before {counter-increment: h2; content: counter(h1) "." counter(h2) ". "} +h3:before {counter-increment: h3; content: counter(h1) "." counter(h2) "." counter(h3) ". "} + +h1.nocount:before, h1.title:before, h1.Title:before, h2.nocount:before, h3.nocount:before, h4.nocount:before, h5.nocount:before, h6.nocount:before { content: ""; counter-increment: none } + + +mark { + padding: 0.1rem; +} + +ol li, +ul li { + padding: 0.2rem 0; +} + +p { + margin: 0.75rem 0; + padding: 0; +} + +pre { + margin: 1rem 0; + max-width: var(--width-card-wide); + padding: 1rem 0; +} + +pre code, +pre samp { + display: block; + max-width: var(--width-card-wide); + padding: 0.5rem 2rem; + white-space: pre-wrap; +} + +small { + color: var(--color-text-secondary); +} + +sup { + background-color: var(--color-secondary); + border-radius: var(--border-radius); + color: var(--color-bg); + font-size: xx-small; + font-weight: bold; + margin: 0.2rem; + padding: 0.2rem 0.3rem; + position: relative; + top: -2px; +} +/* sup::before{ + content: '['; +} +sup::after{ + content: ']'; +} */ + +/* Links */ +a { + color: var(--color); + display: inline-block; + font-weight: bold; + text-decoration: none; +} + +a:hover { + color: var(--hover-color); + /* text-decoration: underline; */ +} +a:hover sup{ + background-color: var(--hover-color); +} + +a b, +a em, +a i, +a strong, +button { + border-radius: var(--border-radius); + display: inline-block; + font-size: medium; + font-weight: bold; + line-height: var(--line-height); + margin: 0.5rem 0; + padding: 1rem 2rem; +} + +button { + font-family: var(--font-family); +} + +button:hover { + cursor: pointer; + color: var(--hover-color); +} + +a b, +a strong, +button { + background-color: var(--color); + border: 2px solid var(--color); + color: var(--color-bg); +} + +a em, +a i { + border: 2px solid var(--color); + border-radius: var(--border-radius); + color: var(--color); + display: inline-block; + padding: 1rem 2rem; +} + +article aside a { + color: var(--color-secondary); +} + +/* Images */ +figure { + margin: 0; + padding: 0; +} + +figure img { + max-width: 100%; +} + +figure figcaption { + color: var(--color-text-secondary); +} + +/* Forms */ + +button:disabled, +input:disabled { + background: var(--color-bg-secondary); + border-color: var(--color-bg-secondary); + color: var(--color-text-secondary); + cursor: not-allowed; +} + +button[disabled]:hover { + filter: none; +} + +form { + border: 1px solid var(--color-bg-secondary); + border-radius: var(--border-radius); + box-shadow: var(--box-shadow) var(--color-shadow); + display: block; + max-width: var(--width-card-wide); + min-width: var(--width-card); + padding: 1.5rem; + text-align: var(--justify-normal); +} + +form header { + margin: 1.5rem 0; + padding: 1.5rem 0; +} + +input, +label, +select, +textarea { + display: block; + font-size: inherit; + max-width: var(--width-card-wide); +} + +input[type="checkbox"], +input[type="radio"] { + display: inline-block; +} + +input[type="checkbox"]+label, +input[type="radio"]+label { + display: inline-block; + font-weight: normal; + position: relative; + top: 1px; +} + +input, +select, +textarea { + border: 1px solid var(--color-bg-secondary); + border-radius: var(--border-radius); + margin-bottom: 1rem; + padding: 0.4rem 0.8rem; +} + +input[readonly], +textarea[readonly] { + background-color: var(--color-bg-secondary); +} + +label { + font-weight: bold; + margin-bottom: 0.2rem; +} + +/* Tables */ +table { + border: 1px solid var(--color-bg-secondary); + border-radius: var(--border-radius); + border-spacing: 0; + display: inline-block; + max-width: 100%; + overflow-x: auto; + padding: 0; + /* white-space: nowrap; */ +} + +table td, +table th, +table tr { + padding: 0.4rem 0.8rem; + text-align: var(--justify-important); +} + +table thead { + background-color: var(--color); + border-collapse: collapse; + border-radius: var(--border-radius); + color: var(--color-bg); + margin: 0; + padding: 0; +} + +table thead th:first-child { + border-top-left-radius: var(--border-radius); +} + +table thead th:last-child { + border-top-right-radius: var(--border-radius); +} + +table thead th:first-child, +table tr td:first-child { + text-align: var(--justify-normal); +} + +table tr:nth-child(even) { + background-color: var(--color-accent); +} + +/* Quotes */ +blockquote { + display: block; + font-size: 100%; + line-height: var(--line-height); + margin: 1rem auto; + max-width: var(--width-card-medium); + padding: 1.5rem 1rem; + text-align: var(--justify-important); +} + +blockquote footer { + color: var(--color-text-secondary); + display: block; + font-size: small; + line-height: var(--line-height); + padding: 1.5rem 0; +} + + +h1.title{ + font-size: 50px; + padding: 20vh 0; +} +h1.Title{ + font-size: xx-large; + padding: 20vh 0; + text-align: center; +} + +.keypoints{ + background-color: black; + color: var(--color-bg-secondary); + padding: 1rem; + +} +.keypoints > p > strong{ + margin-left:2.5rem; + text-transform: uppercase; +} \ No newline at end of file diff --git a/doc/output.html b/doc/output.html index 4a4a6a1..dea9bb0 100644 --- a/doc/output.html +++ b/doc/output.html @@ -7,209 +7,74 @@ Biometric and Behavioural Mass Surveillance in EU Member States
-

Biometric and Behavioural Mass Surveillance in EU Member States

+ +

Biometric and Behavioural Mass Surveillance in EU Member States


-

Biometric and Behavioural Mass Surveillance in EU Member States

+

Report for the Greens/EFA in the European Parliament

-

Draft v.3

01/10/2021

-

Francesco Ragazzi

-

Elif Mendos Kuskonmaz

-

Ildikó Plájás

-

Ruben van de Ven

-

Ben Wagner

-

TABLE OF CONTENTS

-

TABLE OF FIGURES 4

-

AUTHORS 5

-

ACRONYMS 6

-

EXECUTIVE SUMMARY 9

-

CHAPTER 1. Introduction 16

-

1.1 Objectives of the report 17

-

1.2 The international context 17

-

1.3 The European context 19

-

1.4 Four positions in the policy debates 19

-

1.5 Lack of transparency and the stifling of public debate 21

-

1.6 Scope and working definitions 21

-

1.7 Methodology 22

-

PART I: OVERVIEW OF EUROPEAN PRACTICES 23

-

CHAPTER 2. Technical overview 24

-

2.1 Remote Biometric Identification and classification: defining key terms 24

-

2.2 Detection vs recognition 24

-

2.3 Facial Recognition: verification/authentication vs identification 25

-

2.4 Forensic (ex-post) vs Live Facial Recognition 25

-

2.5 Other systems: gait recognition, emotion recognition 25

-

2.6 How does image-based remote biometric identification work? 27

-

2.7 Technical limits, problems, and challenges of facial recognition 30

-

CHAPTER 3. Overview of deployments in Europe 33

-

3.1 Authentication 33

-

3.2 Surveillance 38

-

3.3 Remote Biometric Identification 40

-

3.4 Conclusion 41

-

CHAPTER 4. Legal bases 43

-

4.1 EU Fundamental Rights Framework for the Right to Privacy and the Right to Protection of Personal Data 43

-

4.2 EU Secondary Law: GDPR & LED 46

-

4.3 EU Soft law: Convention 108+ 48

-

CHAPTER 5. Main political issues and debates 49

-

5.1 The emergence of remote biometric identification as a policy issue 49

-

5.2 Four positions in the policy debates 50

-

5.3 EU Commission Proposal on the Regulation for the Artificial Intelligence Act 55

-

PART II: CASE STUDIES 58

-

CHAPTER 6. Facial Recognition cameras at Brussels International Airport (Belgium) 59

-

6.1 The Zaventem pilot in the context of Face Recognition Technology in Belgium 60

-

6.2 Legal bases and challenges 61

-

6.3 Mobilisations and contestations 62

-

6.4 Effects of the technologies 63

-

CHAPTER 7. The Burglary Free Neighbourhood in Rotterdam (Netherlands) 64

-

7.1 Detection and decision-making in the “Burglary free neighbourhood” Fieldlab 64

-

7.2 Legal bases and challenges 66

-

7.3 Mobilisations and contestations 67

-

7.4 Effects of the technologies 69

-

CHAPTER 8. The Safe City Projects in Nice (France) 71

-

8.1 The various facets of the “Safe city” project in Nice 71

-

8.2 Legal bases and challenges 72

-

8.3 Mobilisations and contestations 74

-

8.4 Effects of the technologies 75

-

CHAPTER 9. Facial Recognition in Hamburg, Mannheim & Berlin (Germany) 77

-

9.1 RBI Deployments in Germany 77

-

9.2 Legal bases and challenges 79

-

9.3 Mobilisations and contestations 81

-

9.4 Effects of the technologies: normalising surveillance 82

-

CHAPTER 10. The Dragonfly project (Hungary) 84

-

10.1 Remote Biometric Identification in Hungary 85

-

10.2 Legal bases and challenges 87

-

10.3 Mobilisations and contestations 89

-

10.4 Effects of the technologies 90

-

CHAPTER 11. Recommendations 92

-

REFERENCES 95

-

ANNEX: CASES 107

-

11.1 CJEU Decisions 107

-

11.2 ECtHR decisions 107

-

11.3 Decisions of National Courts 107

+

Francesco Ragazzi

+

Elif Mendos Kuskonmaz

+

Ildikó Plájás

+

Ruben van de Ven

+

Ben Wagner

+ + + +

TABLE OF FIGURES

Figure 1. EU Countries use of FRT for forensic applications 35

@@ -220,360 +85,358 @@

AUTHORS

-

Dr. Francesco Ragazzi (scientific coordinator) is an associate professor in International Relations at Leiden University (Netherlands), an associated scholar at the Centre d’Etude sur les Conflits, Liberté et Sécurité (France). He holds a PhD in Political Science from Sciences Po Paris (France) and Northwestern University (USA). His research interests include radicalisation, terrorism, and mass surveillance. His current research project, Security Vision, funded by a European Research Council Consolidator Grant analyses the politics of computer vision in the field of security. His work has been published in numerous peer-reviewed journals and edited volumes. He serves on the editorial board of the journals International Political Sociology, Citizenship Studies and Cultures & Conflits. He has been consulted as an expert on issues of security by the European Parliament, for whom he has co-authored several reports, the Council of Europe and the French Senate.

-

Dr. Elif Mendos Kuskonmaz is a lecturer at the School of Law at the University of Portsmouth. She holds a Master’s Degree in Public Law from Istanbul University, and an LLM in Public International Law and a PhD from Queen Mary University of London. She researches on surveillance measures and the nexus with the right to privacy and data protection. Elif is also a registered lawyer with the Istanbul Bar Association.

-

Ildikó Z Plájás is a post-doctoral researcher at the Institute of Political Science, Leiden University. She has studied anthropology and cultural studies in Romania and Hungary, later graduating in Visual Ethnography at Leiden University, the Netherlands. She is currently completing her PhD at the University of Amsterdam. Her research examines how visual technologies in governance enact certain groups of people as “racial others” in Europe.

-

Ruben van de Ven is a PhD candidate in Political Science at the Institute of Political Science, Leiden University. His PhD project studies the ethical and political implications of surveillance algorithms that order human gestures. Since graduating from the Master in Media Design programme at the Piet Zwart Institute, he has researched algorithmic politics through media art, computer programming and scholarly work. He has focused on how the human individual becomes both the subject of and input into machine learning processes. Earlier artistic work on the quantification of emotions examined the transformation of humanistic concepts as they are digitised. His work has been presented at both art exhibitions and academic conferences.

-

Dr Ben Wagner is an assistant professor at the Faculty of Technology, Policy and Management at TU Delft, where his research focuses on technology policy, human rights and accountable information systems. He is Associate Faculty at the Complexity Science Hub Vienna and a visiting researcher at the Human Centred Computing Group, University of Oxford. He previously worked at WU Vienna, TU-Berlin, the University of Pennsylvania and European University Viadrina. He holds a PhD in Political and Social Sciences from the European University Institute in Florence.

+

Dr. Francesco Ragazzi (scientific coordinator) is an associate professor in International Relations at Leiden University (Netherlands), an associated scholar at the Centre d’Etude sur les Conflits, Liberté et Sécurité (France). He holds a PhD in Political Science from Sciences Po Paris (France) and Northwestern University (USA). His research interests include radicalisation, terrorism, and mass surveillance. His current research project, Security Vision, funded by a European Research Council Consolidator Grant analyses the politics of computer vision in the field of security. His work has been published in numerous peer-reviewed journals and edited volumes. He serves on the editorial board of the journals International Political Sociology, Citizenship Studies and Cultures & Conflits. He has been consulted as an expert on issues of security by the European Parliament, for whom he has co-authored several reports, the Council of Europe and the French Senate.

+

Dr. Elif Mendos Kuskonmaz is a lecturer at the School of Law at the University of Portsmouth. She holds a Master’s Degree in Public Law from Istanbul University, and an LLM in Public International Law and a PhD from Queen Mary University of London. She researches on surveillance measures and the nexus with the right to privacy and data protection. Elif is also a registered lawyer with the Istanbul Bar Association.

+

Ildikó Z Plájás is a post-doctoral researcher at the Institute of Political Science, Leiden University. She has studied anthropology and cultural studies in Romania and Hungary, later graduating in Visual Ethnography at Leiden University, the Netherlands. She is currently completing her PhD at the University of Amsterdam. Her research examines how visual technologies in governance enact certain groups of people as “racial others” in Europe.

+

Ruben van de Ven is a PhD candidate in Political Science at the Institute of Political Science, Leiden University. His PhD project studies the ethical and political implications of surveillance algorithms that order human gestures. Since graduating from the Master in Media Design programme at the Piet Zwart Institute, he has researched algorithmic politics through media art, computer programming and scholarly work. He has focused on how the human individual becomes both the subject of and input into machine learning processes. Earlier artistic work on the quantification of emotions examined the transformation of humanistic concepts as they are digitised. His work has been presented at both art exhibitions and academic conferences.

+

Dr. Ben Wagner is an assistant professor at the Faculty of Technology, Policy and Management at TU Delft, where his research focuses on technology policy, human rights and accountable information systems. He is Associate Faculty at the Complexity Science Hub Vienna and a visiting researcher at the Human Centred Computing Group, University of Oxford. He previously worked at WU Vienna, TU-Berlin, the University of Pennsylvania and European University Viadrina. He holds a PhD in Political and Social Sciences from the European University Institute in Florence.

ACRONYMS

- - - - - - + + + + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -601,9 +464,6 @@
  • A better understanding of the technical components and possible usage applications of image-based RBI technologies is needed in order to assess their potential political implications.

  • RBI technologies are subject to technical challenges and limitations which should be considered in any broader analysis of their ethical, legal, and political implications.

  • -
    -

     

    -

    CHAPTER 3: Overview of deployments in Europe

    • Current deployments of RBI technologies within Europe are primarily experimental and localised. However, the technology coexists with a broad range of algorithmic processing of security images being carried out on a scale which ranges from the individual level to what could be classed as biometric mass surveillance. Distinguishing the various characteristics of these deployments is not only important to inform the public debate, but also helps to focus the discussion on the most problematic uses of the technologies.

    • @@ -618,7 +478,7 @@
    • The normative legal framework for conducting biometric surveillance in public spaces can be found in the EU secondary legislation on data protection (GDPR and LED). The use of biometric data under this framework must be reviewed in light of the protection offered by fundamental rights.

    • The European Commission’s April 2021 proposal on the Regulation for the Artificial Intelligence Act aims to harmonise regulatory rules for Member States on AI-based systems. The Proposed Regulation lays out rules focused on three categories of risks (unacceptable, high, and low/ minimal risk) and anticipates covering the use of RBI systems. It also aims to compliment the rules and obligations set out in the GDPR and LED.

    -

     

    +

    CHAPTER 5: Political developments and main issues of contention

    • Four main positions on RBI systems have emerged among political actors as a result of both technical developments in the field and early legislative activity of EU institutions: 1) active promotion 2) support with safeguards; 3) moratorium and 4) outright ban.

    • @@ -644,9 +504,6 @@
    • The infrastructure installed for the experiments can potentially be used for more invasive forms of monitoring. During the project, local police, for example, already voiced an interest in access to the cameras.

    • In March 2021, the Fieldlab trial ended. The data collected over the course of the project was not sufficient enough to have the computer distinguish suspicious trajectories. The infrastructure of cameras and microphones is currently disabled, yet remains in place.

    -
    -

     

    -

    CHAPTER 8: The Safe City Projects in Nice (France)

    • Several French cities have launched “safe city” projects involving biometric technologies, however Nice is arguably the national leader. The city currently has the highest CCTV coverage of any city in France and has more than double the police agents per capita of the neighbouring city of Marseille.

    • @@ -688,12 +545,12 @@

    3. The EU should promote the reinforcement of robust accountability mechanisms for biometric surveillance systems.

      -
    • +
    • The current legislative framework remains unclear as to which institutions may review or authorise biometric surveillance systems. In light of the GDPR and the LED, the Data Protection Authorities (DPAs) in some member states enforce the relevant data protection legislation and oversee the processing of biometric data, while in others a separate authority is tasked with the responsibility to review the compatibility with the relevant legislation insofar as personal data processing by law enforcement authorities is concerned (such as Belgium, see case study).

      -
    • -
    • +
    • +
    • The EU should work toward developing a centralised authorisation process for biometric surveillance, within which all relevant authorities are included and are able to veto the authorisation.

      -
    • +
    • Although the proposed EU Artificial Intelligence Act limits a prior authorisation by a court or independent administrative authority to ‘real-time’ biometric surveillance, it is necessary to underline that ex-post biometric identification systems must be subject to supervision or authorisation taking into account the standards under the ECHR and the Charter.

    • @@ -727,56 +584,50 @@

      Introduction

      -
    ABISAutomated Biometric Identification Systems
    ABISAutomated Biometric Identification Systems
    ACLUACLU American Civil Liberties Union
    ADMADM Automated Decision-Making (System)
    AFISAFIS Automated Fingerprint Identification System
    AIAI Artificial Intelligence
    ANPRANPR Automated Number Plate Recognition
    APIAPI Application Programming Interface
    AWSAWS Amazon Web Services
    BDASBDAS Biometric Data Processing System
    BDSGBDSG Federal Data Protection Act (Germany)
    BKABKA Federal Criminal Police Office (Germany)
    BKKBKK Centre for Budapest Transport (Hungary)
    BPIBPI Public Investment Bank (France)
    BPOLBPOL German Federal Police
    CATCHCATCH Central Automatic TeChnology for Recognition of Persons (Netherlands)
    CBISCBIS Central Biometric Information System (Czechia)
    CCTVCCTV Closed Circuit Television
    CGTCGT General Labour Confederation (France)
    CJEUCJEU Court of Justice of the European Union (EU)
    CNILCNIL National Commission for Informatics and Freedoms (France)
    COCCOC Supervisory Body for Police Information (Belgium)
    CoECoE Council of Europe
    COCOCOCO Common Objects in Context (Dataset)
    COVIDCOVID Coronavirus Disease
    CSUCSU Centre for Urban Supervision (France)
    DEPDEP Digital European Program
    DITSSDITSS Dutch Institute for Technology, Safety & Security
    DPADPA Data Protection Authority
    ECEC European Commission (EU)
    ECtHRECtHR European Court of Human Rights
    EDEEDE Criminal identification database (Austria)
    EDPBEDPB European Data Protection Board (EU)
    EDPSEDPS European Data Protection Supervisor (EU)
    EDSEDS European Data Strategy
    EEAEEA European Economic Area
    EPPEPP European People’s Party
    EUEU European Union
    FRAFRA Fundamental Rights Agency (EU)
    FRTFRT Facial Recognition Technology
    FRVTFRVT Face Recognition Vendor Test
    GDPRGDPR General Data Protection Regulation (EU)
    HCLUHCLU Hungarian Civil Liberties Union (Hungary). See “
    HDHD High Definition
    HDRHDR Habitoscopic Data Register
    HKRHKR Home Quarantine App (Hungary)
    IARPAIARPA Intelligence Advanced Research Projects Agency (USA)
    IDID Identification
    IFRSIFRS Interpol Facial R
    IKSZRIKSZR Integrated Traffic Management and Control System (Hungary)
    INCLOINCLO International Network of Civil Liberties Organisations
    INPOLINPOL Criminal Case Management System (Germany)
    KAKKAK Governmental Data Centre (Hungary)
    KDNPKDNP Christian Democratic People's Party (Hungary)
    LEDLED Law Enforcement Directive (EU)
    LFPLFP Law on the Function of Police (Belgium)
    LGBTQLGBTQ Lesbian, Gay, Bisexual,Transgender, Queer
    LIDARLIDAR Light Detection and Ranging
    LPALPA Airport Police (Belgium)
    LQDNLQDN La Quadrature du Net (France)
    GMOGMO Genetically Modified Organism
    MITMIT Massachusetts Institute of Technology
    MRAPMRAP Movement against racism and for friendship between peoples (France)
    NAIHNAIH Hungarian National Authority for Data Protection and Freedom of Information
    NBISNBIS National Biometric Identification System (Romania)
    NGONGO Non-Governmental Organisation
    NISTNIST National Institute of Standards and Technology (USA)
    NISZNISZ National Infocommunication Services (Hungary)
    PARAFEPARAFE Rapid passage at the external borders (France)
    PPMPPM Pixels Per Meter
    RBIRBI Remote Biometric Identification
    RETURETU Registered persons identifying features database and Aliens database (Finland)
    RGBRGB Red, Green, Blue
    SISSIS Schengen Information System
    SSNSSSNS Secret Service for National Security (Hungary)
    TAJTAJ Criminal case history database (France)
    TASZTASZ Hungarian Civil Liberties Union
    TELEFITELEFI Towards the European Level Exchange of Facial Images (EU Project)
    UAVGUAVG GDPR Implementation Act (Germany)
    UKUK United Kingdom
    UNUN United Nations
    UNHRCUNHRC United Nations Human Rights Council
    US(A)US(A) United States of America
    VGGVGG Visual Geometry Group (Dataset)
    VMDVMD Video motion detection
    VOCVOC Visual Object Classes (Pascal VOC)
    YOLOYOLO You Only Look Once (Algorithm)
    - - - - - -
    +

    Key points

    -
    • The aim of this report is to establish a problematised overview of what is currently being done in Europe when it comes to remote biometric identification (RBI), and to assess in which cases we could potentially fall into forms of biometric mass surveillance.

    • Private and public actors are increasingly deploying “smart surveillance” solutions including RBI technologies which, if left unchecked, could become biometric mass surveillance.

    • Facial recognition technology has been the most discussed of the RBI technologies. However, there seems to be little understanding of the ways in which this technology might be applied and the potential impact of such a broad range of applications on the fundamental rights of European citizens.

    • The development of RBI systems by authoritarian regimes which may subsequently be exported to and used within Europe is of concern. Not only as it pertains to the deployments of such technologies but also the lack of adequate insight into the privacy practices of the companies supplying the systems.

    • Four main positions have emerged with regard to the deployments of RBI technologies and their potential impact on fundamental rights: 1) active promotion 2) support with safeguards; 3) moratorium and 4) outright ban.

    • -
    + +

    Since the widespread use of neural network algorithms in 2012, artificial intelligence applied to the field of security has steadily grown into a political, economic, and social reality. As examples from Singapore, the UK, South Africa, or China demonstrate, the image of a digital society of control, in which citizens are monitored through algorithmically processed audio and video feeds is becoming a tangible possible reality in the European Union.

    Through a set of “pilot projects”, private and public actors including supermarkets, casinos, city councils, border guards, local and national law enforcement agencies are increasingly deploying a wide array of “smart surveillance” solutions. Among them remote biometric identification, namely security mechanisms “that leverage unique biological characteristics” such as fingerprints, facial images, iris or vascular patterns to “identify multiple persons’ identities at a distance, in a public space and in a continuous or ongoing manner by checking them against data stored in a database.” (European Commission 2020b, 18) European institutions have reacted with a series of policy initiatives in the last years, but as we will show in this report, if left unchecked, remote biometric identification technologies can easily become biometric mass surveillance.

    -

     

    +

    Among technologies of remote biometric identification, facial recognition has been at the centre of the attention of most discussions in the public debate. The foregrounding of this specific use case of computer vision in the public debate has allowed concerned actors to raise awareness on the dangers of artificial intelligence algorithms applied to biometric datasets. But it has also generated confusion. The perception that facial recognition is a single type of technology (i.e., an algorithm “that recognises faces”) has obscured the broad range of applications of “smart technologies” within very different bureaucratic contexts: from the “smart cities” live facial recognition of video feeds deployed for the purpose of public space surveillance, to the much more specific, on-the-spot searches by law enforcement for the purpose of carrying out arrests or forensic investigations.

    -

     

    +

    The disentanglement and specification of each of these uses is important, if only because each distinct technological arrangement between sensing devices (cameras, microphones), datasets and algorithmic processing tools allows for radically different applications, and thus can have different types of impact on European citizens’ fundamental rights. As the recent communication of the European Commission (2021) stated, not all systems and not all applications are equally threatening for our democratic freedoms: some bear too much risk of infringing our fundamental rights – and therefore should never be allowed; some are “high risk” applications that can take place in certain circumstances with very clear safeguards; and some are more mundane uses of the technologies that require less attention. The ethical, political, and legal assessment of these levels of danger can therefore not be separated from a detailed understanding of how these technologies work. The limitation being of course that while we know what technologies are theoretically available to public actors, the detail of their characteristics is often hidden from view.

    Objectives of the report

    The aim of this report is thus to establish a problematised overview of what we know about what is currently being done in Europe when it comes to remote biometric identification, and to assess in which cases we could potentially fall into forms of biometric mass surveillance. The report will thus answer the following questions: What types of technologies are being used and how? In what context? By whom are these technologies used and to what aim? What types of actors are involved? What types of consequences does the use of those technologies entail? What legal basis and framework are applied to the use of those technologies? What are the forms of mobilisation and contestation against these uses?

    -

     

    +

    In the rest of this introduction, we locate the political context for this study, including the voices that have called for a moratorium or a ban of all technologies that are associated with “biometric mass surveillance”. We then specify the objectives, scope, methodology, some working definitions and outline the remaining chapters.

    The international context

    The concern for uncontrolled deployment of remote biometric identification systems emerges in a context characterised by the development of technologies in authoritarian regimes; the development of controversial “pilot” projects as part of “smart cities projects” in Europe; revelations about controversial privacy practices of companies such as Clearview AI; and finally, by the structuration of a US and EU debate around some of the key biases and problems they entail.

    -

     

    +

    In 2013, the Chinese authorities officially revealed the existence of a large system of mass surveillance involving more than 20 million cameras called Skynet, which had been established since 2005. While the cameras were aimed at the general public, more targeted systems were deployed in provinces such as Tibet and Xinjiang where political groups contest the authority of Beijing. In 2018, the surveillance system became coupled with a system of social credit, and Skynet became increasingly connected to facial recognition technology (Ma 2018; Jiaquan 2018). By 2019, it was estimated that Skynet had reached 200 million face-recognition enabled CCTV cameras (Mozur 2018).

    -

     

    +

    The intrusiveness of the system, and its impact on fundamental rights is best exemplified by its deployment in the Xinjiang province. The province capital, Urumqi, is chequered with checkpoints and identification stations. Citizens need to submit to facial recognition ID checks in supermarkets, hotels, train stations, highway stations and several other public spaces (Chin and Bürge 2017). The information collected through the cameras is centralised and matched against other biometric data such as DNA samples and voice samples. This allows the government to attribute trust-worthiness scores (trustworthy, average, untrustworthy) and thus generate a list of individuals that can become candidates for detention (Wang 2018).

    -

     

    +

    European countries’ deployments are far from the Chinese experience. But the companies involved in China’s pervasive digital surveillance network (such as Tencent, Dahua Technology, Hikvision, SenseTime, ByteDance and Huawei) are exporting their know-how to Europe, under the form of “safe city” packages. Huawei is one of the most active in this regard. On the European continent, the city of Belgrade has for example deployed an extensive communication network of more than 1.000 cameras which collect up to 10 body and facial attributes (Stojkovski 2019). The cameras, deployed on poles, major traffic crossings and a large number of public spaces allow the Belgrade police to monitor large parts of the city centre, collect biometric information and communicate it directly to police officers deployed in the field. Belgrade has the most advanced deployment of Huawei’s surveillance technologies on the European continent, but similar projects are being implemented by other corporations – including the European companies Thales, Engie Ineo or Idemia – in other European cities and many “Safe City” deployments are planned soon in EU countries such as France, Italy, Spain, Malta, and Germany (Hillman and McCalpin 2019). Furthermore, contrary to the idea China would be the sole exporter of Remote Biometric Identification technologies, EU companies have substantially developed their exports in this domain over the last years (Wagner 2021)

    The turning point of public debates on facial recognition in Europe was probably the Clearview AI controversy in 2019-2020. Clearview AI, a company founded by Hoan Ton-That and Richard Schwartz in the United States, maintained a relatively secret profile until a New York Times article revealed in late 2019 that it was selling facial recognition technology to law enforcement.  In February 2020, it was reported that the client list of Clearview AI had been stolen, and a few days later the details of the list were leaked (Mac, Haskins, and McDonald 2020). To the surprise of many in Europe, in addition to US government agencies and corporations, it appeared that the Metropolitan Police Service (London, UK), as well as law enforcement from Belgian, Denmark, Finland, France, Ireland, Italy, Latvia, Lithuania, Malta, the Netherlands, Norway, Portugal, Serbia, Slovenia, Spain, Sweden, and Switzerland were on the client list. The controversy grew larger as it emerged that Clearview AI had (semi-illegally) harvested a large number of images from social media platforms such as Facebook, YouTube and Twitter in order to constitute the datasets against which clients were invited to carry out searches (Mac, Haskins, and McDonald 2020).

    -

     

    +

    The news of the hacking strengthened a strong push-back movement against the development of facial recognition technology by companies such as Clearview AI, as well as their use by government agencies. In 2018, Massachusetts Institute of Technology (MIT) scholar and Algorithmic Justice League founder Joy Buolamwini together with Temnit Gebru had published the report Gender Shades (Buolamwini and Gebru 2018), in which they assessed the racial bias in the face recognition datasets and algorithms used by companies such as IBM and Microsoft. Buolamwini and Gebru found that algorithms performed generally worse on darker-skinned faces, and in particular darker-skinned females, with error rates up to 34% higher than lighter-skinned males (Najibi 2020). IBM and Microsoft responded by amending their systems, and a re-audit showed less bias. Not all companies responded equally. Amazon’s Rekognition system, which was included in the second study continued to show a 31% lower rate for darker-skinned females. The same year ACLU conducted another key study on Amazon’s Rekognition, using the pictures of members of congress against a dataset of mugshots from law enforcement. 28 members of Congress, largely people of colour were incorrectly matched (Snow 2018). Activists engaged lawmakers. In 2019, the Algorithmic Accountability Act allowed the Federal Trade Commission to regulate private companies’ uses of facial recognition. In 2020, several companies, including IBM, Microsoft, and Amazon, announced a moratorium on the development of their facial recognition technologies. Several US cities, including Boston, Cambridge (Massachusetts) San Francisco, Berkeley, Portland (Oregon), have also banned their police forces from using the technology.

    -

     

    +

    The European context

    In Europe, a similar set of developments took place around Artificial Intelligence in activist circles, both at the member states level and at the EU level. (Andraško et al. 2021, 3). The first intervention dates from 2017 with the European Parliament Resolution of 16 February to the Commission on Civil Law Rules on Robotics (European Parliament 2017). It was followed by two statements and advisory documents: The Age of Artificial Intelligence, published by the European Political Strategy Centre; and a Statement on Artificial Intelligence, Robotics and Autonomous Systems (March 2018), published by the European Group on Ethics in Science and New Technologies (Andraško et al. 2021, 3). At the beginning of 2018, the European Economic and Social Committee issued three opinions on the deployment of AI in practice (European Economic and Social Committee 2018a, 2018b, 2018c). All these documents addressed the need for the EU to understand AI uses, and embedded them in the various ethical and political frameworks created by EU institutions. The same year, the Council of Europe began its activities on the matter. In 2017, the Parliamentary Assembly of the Council of Europe adopted a Recommendation on Technological Convergence, Artificial Intelligence and Human Rights pointing towards the need to established common guidelines for the use of artificial intelligence in court (Parliamentary Assembly of the Council of Europe 2017; Gonzalez Fuster 2020, 45).

    -

     

    +

    Legislative activity accelerated in 2018. The European Commission (2018a) published a communication Artificial Intelligence for Europe, in which it called for a joint legal framework for the regulation of AI-related services. Later in the year, the Commission (2018b) adopted a Coordinated Plan on Artificial Intelligence with similar objectives. It compelled EU member states to adopt a national strategy on artificial intelligence which should meet the EU requirements. It also allocated 20 billion euros each year for investment in AI development. (Andraško et al. 2021, 4).

    -

     

    +

    In 2019, the Council of Europe Commissioner for Human Rights published a Recommendation entitled Unboxing Artificial Intelligence: 10 steps to Protect Human Rights which describes several steps for national authorities to maximise the potential of AI while preventing or mitigating the risk of its misuse. (Gonzalez Fuster 2020, 46). The same year the European Union’s High Level Expert Group on Artificial Intelligence (AI HLEG) adopted the Ethics Guidelines for Trustworthy Artificial Intelligence, a key document for the EU strategy in bringing AI within ethical standards (Nesterova 2020, 3).

    -

     

    +

    In February 2020, the new European Commission went one step further in regulating matters related to AI, adopting the digital agenda package – a set of documents outlining the strategy of the EU in the digital age. Among the documents the White Paper on Artificial Intelligence: a European approach to excellence and trust captured most of the commission’s intentions and plans.  

    @@ -821,12 +672,8 @@

    Technical overview

    - - - - - - -
    +

    Key points

    -
    • The current market of RBI systems is overwhelmingly dominated by image-based products, at the centre of which is facial recognition technology (FRT). Other products such as face detection and person detection technologies are also in use.

    • FRT is typically being deployed to perform two types of searches: cooperative searches for verification and/ or authentication purposes, and non-cooperative searches to identify a data subject. The former involves voluntary consent from the data subject to capture their image, while the latter may not.

    • @@ -834,10 +681,8 @@
    • Other RBI technologies are being deployed though their use at present is marginal compared to FRT, these include gait, audio, and emotion recognition technologies, amongst others.

    • A better understanding of the technical components and possible usage applications of image-based RBI technologies is needed in order to assess their potential political implications.

    • RBI technologies are subject to technical challenges and limitations which should be considered in any broader analysis of their ethical, legal, and political implications.

    • -
    + +

    In order to grasp the various facets of remote biometric identification that could potentially lead to biometric mass surveillance, this section provides an overview of the currently available technologies, how they work and what their limitations are as well as where and by whom they are deployed in the European Union.

    Remote Biometric Identification and classification: defining key terms

    @@ -936,21 +781,15 @@

    Overview of deployments in Europe

    - - - - - - -
    +

    Key points

    -
    • Current deployments of RBI technologies within Europe are primarily experimental and localised. However, the technology coexists with a broad range of algorithmic processing of security images being carried out on a scale which ranges from the individual level to what could be classed as biometric mass surveillance. Distinguishing the various characteristics of these deployments is not only important to inform the public debate, but also helps to focus the discussion on the most problematic uses of the technologies.

    • Image and sound-based security applications being used for authentication purposes do not currently pose a risk for biometric mass surveillance. However, it should be noted that an alteration to the legal framework could increase the risk of them being deployed for biometric mass surveillance especially as many of the databases being used contain millions of data subjects.

    • In addition to authentication, image and sound-based security applications are being deployed for surveillance. Surveillance applications include the deployment of RBI in public spaces.

    • Progress on two fronts makes the development of biometric mass surveillance more than a remote possibility. Firstly, the current creation and/or upgrading of biometric databases being used in civil and criminal registries. Secondly, the repeated piloting of live-feed systems connected to remote facial and biometric information search and recognition algorithms.  

    • -
    + +

    When looking at the map of actual deployments of image and sound-based security technologies in Europe, Remote Biometric Identification is, as this report is being written, so far mostly an experimental and localised application. It coexists alongside a broad range of algorithmic processing of security images in a spectrum that goes from individual, localised authentication systems to generalised law enforcement uses of authentication, to what can properly be defined as Biometric Mass Surveillance. Distinguishing the various characteristics of these deployments is not only important to inform the public debate, but it also helps focus the discussion on the most problematic uses of the technologies. It also highlights the risks of function creep: systems deployed for one use which is respectful of EU fundamental rights can in some cases very easily be upgraded to function as biometric mass surveillance.

    The European map of image and sound-based security technologies can be divided into two broad categories: authentication applications and surveillance applications. Remote Biometric Identification is a sub-category of the latter.

    @@ -1064,34 +903,28 @@
    @@ -1265,21 +1092,15 @@

    Facial Recognition cameras at Brussels International Airport (Belgium)

    - - - - - - -
    +

    Key points

    -
    • Belgium is one of two European countries that has not yet authorised the use of FRT, however, law enforcement is strongly advocating for its use and the current legal obstacles to its implementation might not hold for very long given the political pressure.

    • In 2017, unbeknownst to the Belgian Supervisory Body for Police Information (COC), Brussels International Airport acquired 4 cameras connected to a facial recognition software for use by the airport police. Though the COC subsequently ruled that this use fell outside of the conditions for a lawful deployment, the legality of the airport experiment fell into a legal grey area because of the ways in which the technology was deployed.

    • One justification for the legality of the airport experiment from the General Commissioner of Federal Police was to compare the technological deployment to that of the legal use of other intelligent technologies such as Automated Number Plate Recognition (ANPR). Although this argument was rejected at the time, such a system could be re-instated if the grounds for interruption are no longer present in the law.

    • Some civil society actors in Belgium contest the legitimacy of remote biometric identification. However, current legislative activity seems to point in the direction of more acceptance for remote biometric surveillance.

    • -
    + +

    Belgium is, with Spain, one of the few countries in Europe that has not authorised the use of facial recognition technology, neither for criminal investigations nor for mass surveillance (Vazquez 2020). This does not mean that it is unlikely to change its position in the very near future. Law enforcement is indeed strongly advocating its use, and the current legal obstacles are not likely to hold for very long (Bensalem 2018). The pilot experiment that took place in Zaventem / Brussels International Airport, although aborted, occurred within a national context in which biometric systems are increasingly used and deployed.

    Belgium will, for example, soon roll out at the national level the new biometric identity card “eID”, the Minister of Interior Annelies Verlinden has recently announced. The identification document, which will rely on the constitution of a broad biometric database and is part of a broader European Union initiative, is developed in partnership with security multinational Thales, was already trialled with 53.000 citizens in (Prins 2021; Thales Group 2020).30

    Municipalities in different parts of the country are experimenting with Automated Number Plate Recognition (ANPR) technology. A smaller number have started deploying “smart CCTV” cameras, which fall just short of using facial recognition technology. The city of Kortrijk has for example deployed “body recognition” technology, which uses walking style or clothing of individuals to track them across the city’s CCTV network. Facial recognition is possible with these systems, but has not been activated as of yet pending legal authorisation to do so. In the city of Roeselare, “smart cameras” have been installed in one of the shopping streets. Deployed by telecom operator Citymesh, they could provide facial recognition services, but are currently used to count and estimate crowds, data which is shared with the police (van Brakel 2020). All the emerging initiatives of remote biometric identification are however pending a reversal of the decision to halt the experiment at Zaventem Brussels International Airport.

    @@ -1307,15 +1128,15 @@

    Mobilisations and contestations

    Based on this legislative framework, the General Commissioner, in his letter to the COC dated 18 July 2019, justified a deployment without consultation of the COC nor the Belgian DPA on the grounds that

    -

    “although the creation of a technical database for facial recognition is not possible under the current legislation, the use of real-time intelligent technologies other than Automatic Number Plate Recognition (ANPR) is possible under Article 25/3 of the LFP. The legislator has indeed provided that a camera used by the police, regardless of its type, can be equipped with intelligent technology. The introduction of real-time facial recognition is therefore, in our opinion, in accordance with the law.” (Organe de Controle de l'Information Policière 2019, 4)

    +
    “although the creation of a technical database for facial recognition is not possible under the current legislation, the use of real-time intelligent technologies other than Automatic Number Plate Recognition (ANPR) is possible under Article 25/3 of the LFP. The legislator has indeed provided that a camera used by the police, regardless of its type, can be equipped with intelligent technology. The introduction of real-time facial recognition is therefore, in our opinion, in accordance with the law.”
    (Organe de Controle de l'Information Policière 2019, 4)

    The COC was not convinced by the arguments of the General Commissioner and concluded that the LFP did not apply. It justified its decision as follows:

    -

    “As the case stands, the Regulator is not entirely convinced that the LFP is applicable. It is true that the definition of a "smart camera" is taken in a very broad sense. According to Article 25/2, §1, 3° of the LFP, this term refers to "a camera which also includes components and software which, whether or not coupled with registers or files, can process the images collected autonomously or not". In the explanatory memorandum, ANPR cameras and cameras for facial recognition are mentioned as examples (Organe de Controle de l'Information Policière 2019, 4)

    +
    “As the case stands, the Regulator is not entirely convinced that the LFP is applicable. It is true that the definition of a "smart camera" is taken in a very broad sense. According to Article 25/2, §1, 3° of the LFP, this term refers to "a camera which also includes components and software which, whether or not coupled with registers or files, can process the images collected autonomously or not". In the explanatory memorandum, ANPR cameras and cameras for facial recognition are mentioned as examples
    (Organe de Controle de l'Information Policière 2019, 4)

    It further added that

    @@ -1328,22 +1149,16 @@

    The Burglary Free Neighbourhood in Rotterdam (Netherlands)

    - - - - - - -
    +

    Key points

    -
    • The Fieldlab Burglary Free Neighbourhood is a public-private collaboration with two aims: to detect suspicious behaviour and to influence the behaviour of the suspect. While the system of smart streetlamps does collect some image and sound-based data, it does not record any characteristics specific to the individual.

    • From a legal perspective, there is a question as to whether or not the data processed by the Burglary Free Neighbourhood programme qualifies as personal data and thus would fall within the scope of data protection legislation.

    • It is contested whether forms of digital monitoring and signalling are actually the most efficient methods for preventing break ins. Despite the aims of the programme, to date, the streetlights have only been used to capture data for the purposes of machine learning.

    • The infrastructure installed for the experiments can potentially be used for more invasive forms of monitoring. During the project, local police, for example, already voiced an interest in access to the cameras.

    • In March 2021, the Fieldlab trial ended. The data collected over the course of the project was not sufficient enough to have the computer distinguish suspicious trajectories. The infrastructure of cameras and microphones is currently disabled, yet remains in place.

    • -
    + +

    In October 2019, the Carlo Collodihof, a courtyard in the Rotterdam neighbourhood Lombardijen, was equipped with a new kind of streetlamp. The twelve new luminaires did not just illuminate the streets; they were fitted with cameras, microphones, speakers, and a computer which was connected to the internet. They are part of the so called Fieldlab Burglary Free Neighbourhood: an experiment in the public space with technologies for computer sensing and data processing, aimed at the prevention of break-ins, robberies, and aggression; increasing the chances of catching and increasing a sense of safety for the inhabitants of the neighbourhood ((Redactie Inbraakvrije Wijk 2019; Kokkeler et al. 2020b). The practical nature of a Fieldlab provides a way to examine concretely how the various technologies come together, and how they fit in with existing infrastructures and regulations.

    Detection and decision-making in the “Burglary free neighbourhood” Fieldlab

    @@ -1364,7 +1179,7 @@

    The Constitution for the Kingdom of the Netherlands provides for a general right to protection for privacy in Article 10, according to which restrictions to that right must be laid down by law. The GDPR Implementation Act (Uitvoeringswet Algemene Verordening Gegevens-bescherming) (UAVG), as well as the Police Data Act (Wet Politiegegevens) or the Judicial Data and Criminal Records Act (Wet Justitiele en Strafvorderlijke Gegevens) which implement the GDPR and the LED, provides the legal framework regarding privacy and data protection.

    The definition of personal data as enshrined in the GDPR and the LED is directly applicable under the Dutch law. To qualify data as such, “any information” must relate to an identified or identifiable natural person. Based on the data that can be captured by the Fieldlab programme, two elements of this definition need further attention.

    -“Information “relating to” a natural person”. The former Article 29 Working Party (2007) substantiated this element by noting that information can relate to an individual based on its content (i.e., information is about the individual), its purpose (i.e., information is used or likely to be used to evaluate, treat in a way, or influence the status or behaviour of an individual), or its result (i.e., information is likely to have an impact on a certain person’s rights and interests, taking into account all the circumstances surrounding the precise case). These three alternative notions to determine whether the information relates to an individual was endorsed by the CJEU in its Nowak decision (C-434/16), where it dealt with the purpose (i.e., it evaluates the candidate’s competence) and the result (i.e., it is used to determine whether the candidate passes or fails, which can have an impact on the candidate’s rights) of the information in question in determining whether the written answers to an exam would qualify as personal data. In brief, in determining whether the data captured by the Fieldlab programme qualify as personal data, the context for which the data is used or captured is important. Information about the level of crowding or sound could “relate” to an individual if it is used to evaluate or influence the behaviour of a person (based on its purpose), or to affect a person’s rights (based on its result) (Galič and Gellert 2021).

    -

     

    +

    -“Identifiable Person”. The notion of identifiability covers circumstances where it is possible to distinguish the individual from a group of people by combining additional information (See 4.2.1). In situations where the person cannot be identified, determining the extent to which that person can be identifiable depends on the possibilities of combining additional identifying information (Galič and Gellert 2021). However, where the system mainly operates on non-personal data because its aim is to influence the behaviour of a group of people, instead of an identified or identifiable person, the chances of having sufficient data to render the person identifiable would be lower (ibid). 

    The uncertainties around these two elements of personal data mean that a project that monitors and tracks the behaviour of individuals in public spaces may fall outside the scope of data protection legislation if there are uncertainties around whether the data it processes actually qualify as personal data. Notably, the Whitepaper on the sensors in the role of municipalities (van Barneveld, Crover, and Yeh 2018), produced in collaboration with the Ministry of Interior, a reference to the definition of personal data and the possibility of combining for example sound-data with camera recordings to trigger the application of the data protection legislation, without giving further details. Unlike in the relevant sections of the other case studies, this section will not explore further data processing conditions under the UAVG and the other relevant laws because the issue from a data protection view in the first instance with the Fieldlab programme or any similar initiative is whether they process personal data.

    @@ -1372,7 +1187,7 @@

    Mobilisations and contestations

    Despite visits from the mayor of Rotterdam and Secretary of State Sander Dekker, the Fieldlab of the Burglary Free Neighbourhood has not been discussed much in Dutch media. The most prominent discussion on the project has been in a TV broadcast and online video by Nieuwsuur, in which criminologist Marc Schuilenburg is sceptical about the technology deployed in the Fieldlab (Nieuwsuur 2020a, 5:38m):

    -

    So far, there has not been any study that assesses the effectiveness of the streetlamps. We know what works best against burglary: looking out for each other and fitting your door with a double lock. Social cohesion is known to work best. […] What is happening now is that social cohesion is degrading, because neighbours can trust in the intelligent streetlight. Any responsibility is delegated to a streetlight.

    +
    So far, there has not been any study that assesses the effectiveness of the streetlamps. We know what works best against burglary: looking out for each other and fitting your door with a double lock. Social cohesion is known to work best. […] What is happening now is that social cohesion is degrading, because neighbours can trust in the intelligent streetlight. Any responsibility is delegated to a streetlight.

    Schuilenburg frames the interest of cities in technologies such as those used in the Burglary Free Neighbourhood as being part of the well-marketed narrative of the “smart city” that is sold by technology companies: “no city wants to be dumb” (“Nieuwsuur” 2020b, 36m). To some extent, Guido Delver positions the project’s privacy-by-design methodology in contrast to many of these commercial products for surveillance. In his conversations with various municipalities he recognises, and shares, the interest for “smart” surveillance technologies. However, Delver attempts to minimise the data gathering in the Burglary Free Neighbourhood. This proves to be a constant negotiation, for example the police have voiced an interest in access to the camera feeds in case suspicious behaviour was detected. However, access to the camera feeds has been deliberately kept outside of the scope of the project (Delver 2021).

    While the project currently only stores the position of passers-by, there are also technical considerations for the capture of more information. For example, the video cameras cannot cover the entire area, therefore, as no characteristics of individuals are stored, tracking people from one camera to the next is problematic. It raises the question of whether biometric measurements such as a person’s estimated volume, length, or colour of clothing should be recorded, this would allow the computer to link the trace of one camera to another. Posing ethical and legal questions for the project: what are the legal ramifications of deducing and (temporarily) storing these characteristics, and for how long should they be stored (Delver 2021)? Even for projects that decide to consider privacy by design, it can be tempting to store and process biometric information. However, as mentioned above (see section 7.2.), the challenges in determining whether the Fieldlab or any other similar initiatives process personal data as defined in the GDPR raises questions on the extent to which these programmes fall within the scope of the data protection legislation, irrespective of the fact that they may be designed to affect the personal autonomy of individuals (as opposed to an identified or identifiable individual) by influencing and nudging their behaviours.

    Finally, commentators have pointed out the discrepancy between what is expected of the technology, and what it is actually doing. For example, the Algemeen Dagblad (Krol 2019) writes that the “smart streetlights” are actually able to “recognise behaviour” and to “sound the alarm” if necessary. Whereas up until now, the streetlights have only been used to capture data for machine learning.

    @@ -1393,21 +1208,15 @@

    The Safe City Projects in Nice (France)

    - - - - - - -
    +

    Key points

    -
    • Several French cities have launched “safe city” projects involving biometric technologies, however Nice is arguably the national leader. The city currently has the highest CCTV coverage of any city in France and has more than double the police agents per capita of the neighbouring city of Marseille.

    • Through a series of public-private partnerships the city began a number of initiatives using RBI technologies (including emotion and facial recognition). These technologies were deployed for both authentication and surveillance purposes with some falling into the category of biometric mass surveillance.

    • One project which used FRT at a high school in Nice and one in Marseille was eventually declared unlawful. The court determined that the required consent could not be obtained due to the power imbalance between the targeted public (students) and the public authority (public educational establishment). This case highlights important issues about the deployment of biometric technologies in public spaces.

    • The use of biometric mass surveillance by the mayor of Nice Christian Estrosi has put him on a collision course with the French Data Protection Authority (CNIL) as well as human rights/ digital rights organisations (Ligue des Droits de l’Homme, La Quadrature du Net). His activities have raised both concern and criticism over the usage of the technologies and their potential impact on the privacy of personal data.

    • -
    + +

    Although several French cities such as Paris, Valenciennes or Marseille have launched pilot projects for “safe city” projects involving biometric technologies (facial, voice, sound recognition), the city of Nice is perhaps the national leader in the experimentation with such technologies at a local level (Nice Premium 2017). The mayor of Nice, Christian Estrosi (Les Républicains Party, right) a prominent political figure on the national political scene, has made clear his intention was to make Nice a “laboratory” of crime prevention (Barelli 2018). Since 2010, more than 1.962 surveillance cameras have been deployed throughout the city, making it the city with highest CCTV coverage in France (27 cameras per square meter). Nice also possesses the most local police in France per inhabitant: 414 agents, for a population of 340.000 (in comparison, the neighbouring city of Marseille has 450 agents for 861.000 inhabitants).

    The various facets of the “Safe city” project in Nice

    @@ -1421,18 +1230,18 @@

    Legal bases and challenges

    The use of facial recognition systems in high schools in Nice and Marseille, which was declared unlawful by the Administrative Court of Marseille, raised important issues on the legality of deploying biometric technologies in public places.

    There is no specific provision devoted to the right to privacy or data protection in the French Constitution of 1958, but constitutional safeguards for the interests protected under said rights exists. The French Constitutional Council (Conseil Constitutionnel) has recognised that the respect for privacy is protected by Article 2 of the 1789 Declaration of the Rights of Man and of the Citizen, which is incorporated in the French constitutionality bloc as binding constitutional rule (bloc de constitutionnalité) (French Constitutional Council, Decision N° 2004-492 DC of 2 March 2004). Accordingly, the collection, retention, use and sharing of personal data attracts protection under the right to privacy (French Constitutional Council, Decision n° 2012-652 DC of 22 March 2012). The limitations to that right must thus be justified on grounds of general interest and implemented in an adequate manner, proportionate to this objective (ibid).

    -

     

    +

    France has updated the Act N°78-17 of 6 January 1978 on information technology, data files and civil liberties in various stages to incorporate the provisions of the GDPR, address the possible exemptions contained in the GDPR, and implement the LED.

    -

     

    +

    The Act sets out the reserved framework for sensitive data including biometric data in its Article 6, which states that sensitive data can be processed for purposes listed in Article 9(2) of the GDPR as well as those listed in its Article 44. The latter includes the re-use of information contained in court rulings and decisions, provided that neither the purpose nor the outcome of such processing is the re-identification of the data subjects; and the processing of biometric data by employers or administrative bodies if it is strictly necessary to control access to workplaces, equipment, and applications used by employees, agents, trainees, or service providers in their assignments. 

    Pursuant to Article 6 of the Act N°78-17, processing of sensitive data can be justified for public interest if it is duly authorised in accordance with Articles 31 and 32 of the Act. Accordingly, an authorisation by decree of the Conseil d'État (State Council) is required after reasoned opinion of CNIL, for processing of biometric data on behalf of the State for the authentication of control of the identity of the individuals (Article 32, Act N°78-17). 

    -

     

    +

    In February 2020, the Administrative Court of Marseille considered the extent to which the data subject’s explicit consent may provide an appropriate legal basis in the deployment of facial recognition systems to control access to high schools in Nice and Marseille (Administrative Court of Marseille, Decision N°1901249 of 27 February 2020). After recognising that data collected by facial recognition constitute biometric data (para 10), the Court held that the required consent could not be obtained simply by the students or their legal representatives in the case of minors signing a form due to the power imbalance between the targeted public and the public educational establishment as the public authority (para. 12). More importantly, the Court determined that the biometric data processing could not be justified based on a substantial public interest (i.e., controlling access to premises) envisioned in Article 9(2)(g) of the GDPR in the absence of considerations that the relevant aim could not be achieved by badge checks combined with – where appropriate – video surveillance (ibid). 

    -

     

    +

    Article 88 of the Act N°78-17 provides the specific limitations of the processing of sensitive data for law enforcement purposes, according to which their processing is prohibited unless it is strictly necessary, subject to appropriate safeguards for the data subject’s rights and freedoms and based on any of the same three grounds listed in Article 10 of the LED, including where it is authorised by law.

    -

     

    +

    The Act N°78-17 provides the data subject rights against the processing of their personal data with restrictions to the exercise of those rights subject to certain conditions (e.g., the restriction for protecting public security to the right to access the data processed for law enforcement purposes pursuant to Art 107 of Act N°78-17). An important data subject’s right in the context of biometric surveillance is the data subject’s right not to be subjected to solely automated decision-making, including profiling, except if it is carried out in light of circumstances laid out in Article 22 of the GDPR and for individual administrative decisions taken in compliance with French legislation (Article 47 of Act N°78-17). That said, for the latter circumstance, the automated data processing must not involve sensitive data (Article 47(2), Act N°78-17). Regarding the data processing operations relating to State security and defence (Article 120, Act N°78-17) and to the prevention, investigation, and prosecution of criminal offences (Article 95, Act N°78-17), the Act lays out an absolute prohibition against solely automated decision-making, according to which no decision producing legal effects or similarly significant effects can be based on said decision-making intended to predict or assess certain personal aspects of the person concerned. Particularly, with respect to data processing operations for law enforcement purposes, Article 95 of the Act prohibits any type of profiling that discriminates against natural persons based on sensitive data as laid out in Article 6.

    -

     

    +

    In addition to the data protection legislation, the other legislation applicable to biometric surveillance is the Code of Criminal Procedure. Its Article R40-26 allows the national police and gendarmerie to retain in a criminal records database (Traitement des Antécédents Judiciaires or TAJ) photographs of people suspected of having participated in criminal offences as well as victims and persons being investigated for causes of death, serious injury or disappearance to make it possible to use a facial recognition device. According to a 2018 report by Parliament, TAJ contains between 7 and 8 million facial images (Assemblée Nationale N°1335, 2018, 64, f.n. 2). La Quadrature du Net lodged legal complaints against the retention of facial images before the Conseil d'État, arguing that this practice does not comply with the strict necessity test required under Article 10 of LED and Article 88 of Act N°78-17 (La Quadrature du Net, 2020).

    @@ -1457,22 +1266,16 @@

    Facial Recognition in Hamburg, Mannheim & Berlin (Germany)

    - - - - - - -
    +

    Key points

    -
    • The German federal police, in cooperation with the German railway company, conducted a project called “Sicherheitsbahnhof” at the Berlin railway station Südkreuz in 2017/18, which included 77 video cameras and a video management system.

    • The police in Hamburg used facial recognition software Videmo 360 during the protests against the G20 summit in 2017. The database includes 100.000 individuals in Hamburg during the G20 summit and whose profiles are saved in the police database. The technology allows for the determination of behaviour, participation in gatherings, preferences, and religious or political engagement

    • Sixty-eight cameras were installed by local police on central squares and places in the German city Mannheim to record the patterns of movement of people. In this project, which started in 2018, the software is used to detect conspicuous behaviour.

    • Half of these deployments (Mannheim & Berlin Südkreuz) took place as measures to test the effectiveness of facial recognition and behavioural analysis software. This “justification as a test” approach is often used in Germany to argue for a deviation from existing rules and societal expectations and was similarly applied during deviations to commonly agreed measures in the Coronavirus/COVID-19 pandemic.

    • Resistance to video surveillance is also in no small part a result of constant campaigning and protest by German civil society. The Chaos Computer Club and Digital Courage have consistently campaigned against video surveillance and any form of biometric or behavioural surveillance. The long term effect of these “pilots” is to normalise surveillance.

    • -
    + +

    RBI Deployments in Germany

    All the deployments of RBI we are aware of in Germany were conducted by law enforcement. The deployments range from using facial recognition software to analyse the German central criminal information system, to specific deployments in more targeted locations such as Berlin Südkreuz train station or Mannheim city centre, or to deployments around specific events such as the G20 in Hamburg in 2019.

    @@ -1497,25 +1300,25 @@

    Legal bases and challenges

    The question on the legal permissibility of examples of biometric video surveillance explained above requires a brief description of the constitutional and legislative framework for the protection of privacy and personal data, and the police powers granted under the German law in relation to the use and processing of personal data.

    The general right of personality based on Articles 2(1) and 1(1) of the German Constitution protects individuals against the collection, storage, and use of their personal data by public authorities (Eichenhofer and Gusy, 2017).  The basic right to informational self-determination guarantees the authority to decide on the disclosure and also on the type of use of one's personal data (BVerfG, judgment of 15 December 1983 - 1 BvR 209/83, para. 149).

    -

     

    +

    Germany adapted a new Federal Data Protection Act (BDSG), to use the discretionary powers and the application of national laws contained in the GDPR. The BDSG also contains data protection provisions on the processing of personal data by activities of public federal bodies which do not fall within the scope of Union law (e.g., intelligence services, Federal Armed Forces) (Part 4, BDSG) and implements the LED (Part 3, BDSG).

    -

     

    +

    Paragraph 22 of the BDSG sets out lawful purposes additional to those listed in Article 9 of the GDPR for which sensitive data may be processed. For the purpose of this report, the lawful purposes that are relevant for public bodies processing operations are the following: (i) processing is urgently necessary for reasons of substantial public interest; (ii) processing is necessary to prevent substantial threats to public security; (iii) processing is urgently necessary to prevent substantial harm to the common good or to safeguard substantial concerns of the common good; (iv) processing is necessary for urgent reasons of defence or to fulfil supra- or intergovernmental obligations of a public body. In each case, the interests sought with any of these purposes must outweigh the data subject’s interest. Paragraph 22 of the BDSG further imposes obligations such as access restriction and encryption in relation to implementing appropriate safeguards to protect the data subjects’ interest when the processing is carried out based on the above purposes. Furthermore §27 of the BDSG envisages the processing of sensitive data for scientific or historical research purposes or statistical purposes subject to certain conditions

    -

     

    +

    In regard to the processing of sensitive data for law enforcement purposes, §48 of the BDSG permits the processing only where it is strictly necessary for the performance of the competent authority’s task, and subject to the existence of certain safeguards such as those in relation to data security and encryption

    -

     

    +

    In terms of the further use of the data, §23 of the BDSG designates purposes for which personal data may be processed other than the initial intended purpose such as where it is necessary to prevent substantial harm to common good, threat to public security, defence, or national security or where it is necessary to prevent serious harms to others’ rights. §49 of the BDSG lays out the rules for the processing of personal data for law-enforcement purposes other than the initial intended law enforcement purpose.

    -

     

    +

    Moreover, the BDSG devotes a specific section to the processing of personal data while conducting video surveillance. Pursuant to §4 of the BDSG, video surveillance of public spaces is permitted only as far as it is necessary (i) for public bodies to perform their tasks; (ii) to exercise the right to determine who shall be allowed or denied access, or (iii) to safeguard legitimate interests for specifically defined purposes. There should be nothing to indicate that the data subject’s legitimate interest overrides the interest protected by any of the respective purposes and protecting lives, health and freedom of people should be considered as a very important interest (§4, the BDSG). More importantly, the data collected through the use of video surveillance can be further processed if it is necessary to prevent threats to state and public security and to prosecute crimes (§4(4), the BDSG). The same section further provides conditions for notification at the earliest possible moment about the surveillance, informing the data subject whose personal data may be collected as a result of the surveillance and the deletion of the data if it is no longer necessary.

    -

     

    +

    The BDSG restricts the application of certain data subject rights as enshrined in the GDPR such as the right to be informed (§33) and the right to request access (§34). §37 of the Act provides a sectorial exception in relation to providing services pursuant to an insurance contract for the prohibition against the sole automated decision-making. In relation to the processing of personal data for law enforcement purposes, the BDSG permits the sole automated decision-making if it is authorised by law (§55). Nevertheless, the decision cannot be based on sensitive data unless there are suitable safeguards to the data subject (§55(2)). In any case, it provides an explicit prohibition against conducting profiling that may discriminate against people based on their sensitive data (§55(3)).  

    -

     

    +

    The collection of personal data in general and facial images in particular in criminal investigation proceedings are authorised under German Law by the Federal Police Act (Gesetz über die Bundespolizei) (BPoIG), by the Federal Criminal Police Office and the Cooperation of the Federal and State Governments in Criminal Police Matters (Bundeskriminalamtgesetz) (BKAG), the Code of Criminal Procedure (Strafprozessordnung) (StPO), and the police acts of Länder.

    -

     

    +

    §24 of the BPoIG grants the Federal Police the authority to take photographs including image recordings of a person subject to specific conditions. Moreover, §26 of the BPoIG, entrusts the Federal Police the power to collect personal data by making picture and sound recordings of participants in public events or gatherings if facts justify that there are significant risks to border security or to categories of people or objects. §27 of the BPoIG further authorises the use of automatic image recording, albeit in relation to security risks at the border or to categories of people or objects. Each section provides the obligations for the deletion of the data after a specific timeframe.

    -

     

    +

    The BKAG provides the rules for information collection by the Federal Criminal Police Office in its information system, BKAG established pursuant to §13 of the BKAG. §12 of the Act allows the processing of personal data by the Office for purposes other than those for which they were collected in order to prevent, investigate, and prosecute serious crimes. Additionally, the personal data of people who are convicted of, accused of, and suspected of committing a crime, and for whom there are factual indications that they may commit crimes of considerable importance in the near future may be processed to identify that person. (§12, para. 5, the BKAG).  The same Article states that personal data obtained by taking photos or image recordings of a person by means of covert use of technical means in or out of homes may not be further processed for law enforcement purposes. 

    -

     

    +

    §81b of the StPO grants the police the authority to obtain the photographs and fingerprints of a suspect and any of his measurements in order to conduct criminal proceedings. §100h of the StPO covers the police power to conduct covert surveillance measures, which includes the recording of the photographs and other images of the person concerned outside of private premises where other means of establishing the facts or determining an accused’s whereabouts would offer less prospect of success or would be more difficult. In terms of the investigative powers of police to use personal data in general, §98c of the StPO grants the authority to automatic matching of personal data from criminal proceedings with other data stored for the purposes of criminal prosecution or the enforcement of a sentence, or in order to avert a danger. This is, however, subject to the specific rules under federal law or Länder law. §483 of the StPO authorises a number of authorities to process personal data where necessary for the purposes of criminal proceedings including for criminal proceedings other than the one for which the data were collected. §484 of the StPO allows for the processing of personal data for future criminal proceedings.

    @@ -1536,12 +1339,8 @@

    The Dragonfly project (Hungary)

    - - - - - - -
    +

    Key points

    -
    • The Hungarian Government led by Prime Minister Viktor Orbán has long been on a collision course with EU Institutions over the rule of law and the undermining of the country’s judicial independence and democratic institutions.

    • Hungary is a frontrunner in Europe when it comes to authorising law enforcement’s use of Facial Recognition Technology, developing a nationwide and centralised database (The Dragonfly Project), and using the Home Quarantine App as part of the Government’s Coronavirus measures.

    • @@ -1549,53 +1348,49 @@
    • This is due to (1) the overlap between the private and public sectors, specifically government institutions, and (2) the complex entanglements biometric systems have with other information systems (such as car registries, traffic management, public transport monitoring and surveillance, etc.).

    • Although the latter are not concerned with the traces of the human body they can nonetheless be used for and facilitate biometric mass surveillance. These entanglements create grey zones of biometric mass surveillance where the development and deployment of such technologies is hidden from visibility and critical scrutiny.

    • The Dragonfly Project has elicited numerous warnings regarding data protection and the rights to privacy from both public and private organisations. However the lack of contestation and social debate around the issues of privacy and human rights in relation to such projects as the Hungarian Government’s Dragonfly is striking.

    • -
    + +

    Under the Government of Prime Minister Viktor Orbán, Hungary has been on a collision course with EU Institutions. It has centralised and consolidated its power by marginalising civil society and curtailing the autonomy of Hungarian media, cultural and higher education institutions (Csaky 2020; Gehrke 2020; Verseck 2020). Orbán’s continued erosion of the country’s democratic institutions was further advanced with the 2020 adoption of an emergency law which allows the government to rule by decree (Schlagwein 2020; Stolton 2020). In this context, the latest developments in using Biometric Identification Technologies in Hungary flag serious concerns regarding the rule of law, human rights and civil liberties.

    Hungary is a frontrunner in Europe when it comes to authorising law enforcement’s use of Facial Recognition Technology, developing a nationwide and centralised database, and using the Home Quarantine App as part of the Government’s Coronavirus measures. The infrastructure in place that potentially allows for a centralised deployment of biometric mass surveillance technologies in Hungary has reached an unprecedented scale while the legal and ethical scrutiny of these technologies lags dangerously behind. This is due to (1) the overlap between the private and public sectors, specifically government institutions, and (2) due to the complex entanglements biometric systems have with other information systems (such as car registries, traffic management, public transport monitoring and surveillance, etc.). Although the latter are not concerned with the traces of the human body they can nonetheless be used for and facilitate biometric mass surveillance. These entanglements create grey zones of biometric mass surveillance where the development and deployment of such technologies is hidden from visibility and critical scrutiny.

    -
      -
    1. Remote Biometric Identification in Hungary

      -
        -
      1. The Hungarian Police’s use of Facial Recognition

      2. -
    2. -
    +

    Remote Biometric Identification in Hungary

    + +

    The Hungarian Police’s use of Facial Recognition

    +

    On 10 December 2019 the Hungarian Parliament passed a package of amendments of acts for the work of law enforcement in Hungary. Entitled “the simplification and digitisation of some procedures” this adjustment legalised the use of forensic – but also live – FRT by the Hungarian Police (Hungarian Parliament 2019). In cases when a person identified by the police cannot present an ID document, the police agents can take a photograph of the individual on location, take fingerprints, and record the biometric data based on “perception and measurement” of external characteristics. The photo taken on location can be instantly verified against the database of the national registry of citizens. The automatic search is performed by a face recognition algorithm and the five closest matches are returned to the police agent who, based on these photos proceeds with identifying the person (1994. Évi XXXIV. Törvény, para 29/4(a)). This application of FRT does not fall under the category of mass surveillance; however, it is only possible due to a central system which collects and centralises the national and other biometric databases but also provides the technical support for accessing it in a quick and affective way by various operational units. In this instance by the patrolling police.  

    The Dragonfly (Szitakötő) Project

    In 2018 the Ministry of Interior presented a bill in the Hungarian Government that proposed a centralised CCTV system with data stored in one centralised database called the Governmental Data Centre (Kormányzati Adatközpont, abbreviated as KAK). All governmental operations aiming at developing this centralised database run under the name Szitakötő (Dragonfly). This central storage facility collects surveillance data of public spaces (streets, squares, parks, parking facilities, etc.); the Centre for Budapest Transport (BKK); bank security and the Hungarian Public Road PLC. The project with an estimated budget of 50 billion forints (160 million euros) proposes to centralise about 35.000 CCTV cameras and 25.000 terabytes of monitoring data from across the country (NAIH 2018). While the project, and notably the response of Dr. Attila Péterfalvi, head of the Hungarian Data Protection Authority, - Hungarian National Authority for Data Protection and Freedom of Information (NAIH), who warned of the lack of data protection considerations in the bill, have been largely mediatised, this has done little for halting the Project which has already been rolled out. In 2015 the Hungarian company GVSX Ltd (Hungary). Had already been contracted (NISZ-GVSX 2019) to implement an Integrated Traffic Management and Control System called IKSZR (Integrált Közlekedésszervezési és Szabályozási Rendszer) that centralises data from various systems such as ANPR cameras, car parks, traffic monitoring, meteorological data, etc. The Dragonfly Project has been designed as an expansion of this system by centralising the data flowing from both the IKSZR system, the databases of the National Infocommunication Services (NISZ) and also CCTV data from other public and private surveillance systems such as those operated by local governments, public transport companies and banks.

    -

     

    +

    The technical description of the Dragonfly Project does not make any explicit reference to (live) facial recognition technology, however, the system collects, stores and searches, in real time, video surveillance footage from 35.000 CCTV cameras. However, from the reports of the Hungarian Civil Liberties Union (HCLU or TASZ in Hungarian) and the DPA, it is known (NAIH 2019, 139) that to some extend FRT has been used by the Secret Service for National Security (SSNS), one of the national security services of Hungary. According to the DPA’s investigation all the cases in which FRT has been used happened in relation to concrete (criminal) cases looking for a missing person or someone under warrant. These cases were also limited to specific geographic locations (NAIH 2019). According to the DPA’s investigation, in 2019 the FRT system operated by the SSNS found 6.000 matches, which resulted in around 250 instances of stop-and-search and 4 arrests (NAIH 2019). The numbers for 2020 are inconsistent with those given for 2019 (3 matches, 28 instances of stop-and-search, unknown number of arrests), however, this is probably due to the fact that the system has since been moved primarily to the jurisdiction of the Hungarian Police.

    -

     

    +

    While the legal framework for police checks does refer to the use of facial recognition technologies, the national security act does not mention it. This is even more striking as the SSNS, is known to be using FRT to provide the national security services, the police, or other authorised institutions (e.g., prosecutor’s office, tax office, etc.) classified information.

    -

     

    +

    Two interrelated companies are responsible for the development, maintenance, and administration of this single central system: the NISZ and IdomSoft Ltd., both owned by the state. The NISZ or National Infocommunication Services is a 100% state owned company that only in 2020 signed 6 contracts to purchase the necessary hardware, storage, and other IT equipment for implementing the Dragonfly Project. While Public Procurement documents (Közbeszerzési Hatóság, 2020) bear witness to the ongoing investments and development of the Dragonfly Project by the Hungarian Government, a comprehensive overview of the project, the stages of its implementation or its budget, is nowhere to be found.

    -

     

    +

    The other company responsible for the administration of the Dragonfly Project is the IdomSoft company, a member of the so called NISZ group. Idomsoft is a 100% indirect state-owned company (indirect ownership means that the government owns shares, but not through authorised state institutions or through other organisations) that, according to its website, “plays a leading role in the development, integration, installation and operation of IT systems of national importance”. Apart from administering the National Dragonfly Database, Idomsoft also assures the interoperability of the various national databases such as the citizen’s registry, passport and visa databases, car registries, and police alerts, and it connects the Hungarian databases into the Schengen Information System (SIS II).

    -

     

    +

    Since the implementation of the Dragonfly Project the Hungarian government has been collecting video surveillance data that is centralised in the Governmental Data Centre (Kormányzati Adatközpont) in the same location and by the same institutions that administer the national registry of citizens, visa-entries, police databases, and also other e-governmental databases such as related to social security, tax office or health records.

    -

     

    +

    While the COVID-19 pandemic has brought a temporary halt of movement in public spaces, it also facilitated the introduction of new tracking technologies. Hungary is among two countries in Europe (Poland being the other) to introduce a Home Quarantine App which uses automated face recognition technology to verify that people stay in quarantine for the required time.

     The normalisation of biometric surveillance at home: The Hungarian Home Quarantine App

    In May 2020 Hungarian Authorities rolled out two digital applications, the contract-tracing app called VirusRadar (Kaszás 2020) and the Home Quarantine App (Házi Karantén Rendszer, abreviated HKR). Both of these apps are centralised tracing apps meaning that they send contact logs with pseudonymised personal data to a central (government) back-end server (Council of Europe 2020, 28). While the VirusRadar only uses Bluetooth data and proximity of other devices, the HKR processes biometric data when comparing facial images of its users.

    Those who, according to the COVID-19 regulations in Hungary, are confined to home quarantine are offered the option to use the app instead of being checked by the police. For those who return from abroad, the use of the app is compulsory. But even those who can choose are encourage by the authorities to make use of the HKR app otherwise they will be subjected to frequent visits by police agents. Once a person downloads the app, its use becomes compulsory and failure to do so or attempts to evade its tracking is considered an administrative offense. From a data protection law point of view, this is a clear case where the data subject’s consent (and in the case of biometric data, their explicit consent) cannot provide the lawful ground for the processing of data through the app (see section 4.2.2). Even if the processing can be based on another lawful ground such as public interest, the punitive nature of non-compliance may raise issues in terms of adhering to the necessity test, which requires a balancing act between the objective pursued and the data subject’s interests.

    -

     

    +

    The HKR app is developed by Asura Technologies and implemented by IdomSoft Ltd., the same company that provides the software and technical implementation for the nation-wide Dragonfly Project. The HKR application works with face recognition technology combined with location verification. The application sends notifications at random times prompting the user to upload a facial image while retrieving the location data of the mobile device.  The user must respond within 15 minutes and the location data must match the address registered for quarantine. In order for the Home Quarantine App to work, the user first needs to upload a facial image which is compared by a police officer with the photo of the same individual stored in the central database. After this facial verification, the app creates a biometric template on the mobile phone of the user and the photo is deleted. The consecutive photos are only compared to this biometric template, so neither the photos nor the template leave the personal device. If there is suspicion about the identity or whereabouts of the user, a police officer visits the address to make sure that the person is adhering to the quarantine rules.

    -

     

    +

    Interestingly, the HKR app, — just like the contact tracing app VirusRadar, which was developed by Nextsense — has been “donated” to the Hungarian Government by Asura Technologies “free of charge”

    -

     

    +

    Graphical user interface, application, chat or text message Description automatically generatedA picture containing text, screenshot, monitor Description automatically generated

    Figure 5. Snapshots from the video Home Quarantine System Short Presentation by Asura Technologies38

    @@ -1647,12 +1442,12 @@

    3. The EU should promote the reinforcement of robust accountability mechanisms for biometric surveillance systems.

      -
    • +
    • The current legislative framework remains unclear as to which institutions may review or authorise biometric surveillance systems. In light of the GDPR and the LED, the Data Protection Authorities (DPAs) in some member states enforce the relevant data protection legislation and oversee the processing of biometric data, while in others a separate authority is tasked with the responsibility to review the compatibility with the relevant legislation insofar as personal data processing by law enforcement authorities is concerned (such as Belgium, see case study).

      -
    • -
    • +
    • +
    • The EU should work toward developing a centralised authorisation process for biometric surveillance, within which all relevant authorities are included and are able to veto the authorisation.

      -
    • +
    • Although the proposed EU Artificial Intelligence Act limits a prior authorisation by a court or independent administrative authority to ‘real-time’ biometric surveillance, it is necessary to underline that ex-post biometric identification systems must be subject to supervision or authorisation taking into account the standards under the ECHR and the Charter.

    • @@ -1685,7 +1480,7 @@
    -

    REFERENCES

    +

    REFERENCES

    1994. Évi XXXIV. Törvény - Nemzeti Jogszabálytár. 1994. https://njt.hu/jogszabaly/1994-34-00-00.

    2015. Évi CLXXXVIII. Törvény - Nemzeti Jogszabálytár. 2015. https://njt.hu/jogszabaly/2015-188-00-00.

    7sur7. 2019. “Des caméras avec reconnaissance faciale à Brussels Airport.” https://www.7sur7.be/belgique/des-cameras-avec-reconnaissance-faciale-a-brussels-airport~a46f7a4c/.

    @@ -1865,7 +1660,7 @@

    Xie, Ning, Gabrielle Ras, Marcel van Gerven, and Derek Doran. 2020. ‘Explainable Deep Learning: A Field Guide for the Uninitiated’. arXiv:2004.14545 [cs, stat]. http://arxiv.org/abs/2004.14545

    -

    ANNEX: CASES

    +

    ANNEX: CASES

    CJEU Decisions