diagram_web/canvas.html

1517 lines
87 KiB
HTML
Raw Permalink Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" lang="" xml:lang="">
<head>
<meta charset="utf-8" />
<meta name="generator" content="pandoc" />
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
<meta name="author" content="Ruben van de Ven, Ildikó Zonga Plájás, Cyan Bae, Francesco Ragazzi" />
<title>Algorithmic Security Vision: Diagrams of Computer Vision Politics</title>
<style>
/* div[data-custom-style='Body Text']{
background: rgba(255,255,255,.5)
} */
code {
white-space: pre-wrap;
}
span.smallcaps {
font-variant: small-caps;
}
span.underline {
text-decoration: underline;
}
div.column {
display: inline-block;
vertical-align: top;
width: 50%;
}
div.hanging-indent {
margin-left: 1.5em;
text-indent: -1.5em;
}
ul.task-list {
list-style: none;
}
.display.math {
display: block;
text-align: center;
margin: 0.5rem auto;
}
.anchor {
cursor: pointer;
/* TODO investigate scroll-snap-align: center; having scroll-snap-type: y proximity; on html*/
}
/*Filenames with code blocks: https://stackoverflow.com/a/58199362*/
div.sourceCode::before {
content: attr(data-filename);
display: block;
background-color: #cfeadd;
font-family: monospace;
font-weight: bold;
}
#collage {
position: fixed;
z-index: -1;
background-color: white;
left: 0;
top: 0;
right: 0;
bottom: 0;
overflow: hidden;
}
#collage_window {
position: absolute;
top: -1000px;
left: -1000px;
}
#collage_window svg {
position: absolute;
left: 0;
top: 0;
}
div[data-custom-style='Body Text'] p {
padding: 1em 0;
margin: 0;
background-color: rgba(255, 255, 255, 0.8);
}
.anchor {
position: relative;
}
.anchor.active:not(.playing)::before {
content: '⏵';
position: absolute;
width: 40px;
height: 40px;
background: gray;
left: calc(50% - 20px);
top: calc(50% - 20px);
vertical-align: middle;
line-height: 35px;
border-radius: 5px;
color: white;
}
.anchor.active:not(.playing):hover::before {
background: black
}
.anchor.playing:hover::before {
content: '⏸︎';
position: absolute;
width: 40px;
height: 40px;
background: black;
left: calc(50% - 20px);
top: calc(50% - 20px);
vertical-align: middle;
line-height: 35px;
border-radius: 5px;
color: white;
}
</style>
<link rel="stylesheet" href="paper.css" />
<script src="assets/wNumb-1.2.0.min.js"></script>
<script src="assets/annotate.js"></script>
<script>
const centerPoints = [
[2759, 6452],
[14335, 5364],
[5757, 10084],
[7137, 3869], // left in practice is -5746px;, top: -2988px;:
]
// test with FPR
const canvasCenter = [20077 / 2, 10331 / 2]
let scale = .5;
const sheet = new CSSStyleSheet
sheet.replaceSync(
`
:host{
--override-color: gray;
}
:host(.active){
--override-color: blue;
}
:host(.ending){
--override-color: blue;
}
div.controls{display:none !important;}`
);
function easeInOutSine(x) {
return -(Math.cos(Math.PI * x) - 1) / 2;
}
function easeInOutQuart(x) {
return x < 0.5 ? 8 * x * x * x * x : 1 - Math.pow(-2 * x + 2, 4) / 2;
}
function easeInOutBack(x) {
const c1 = 1.70158;
const c2 = c1 * 1.525;
return x < 0.5
? (Math.pow(2 * x, 2) * ((c2 + 1) * 2 * x - c2)) / 2
: (Math.pow(2 * x - 2, 2) * ((c2 + 1) * (x * 2 - 2) + c2) + 2) / 2;
}
let scroll_offsets = []
function calculateScrollOffsets() {
const anchorEls = document.getElementsByClassName('anchor');
offsets = []
for (let anchorEl of anchorEls) {
const align_pos = centerPoints[anchorEl.dataset.i];
const bbox = anchorEl.getBoundingClientRect()
const scroll_y = bbox.top + (bbox.height / 2) + window.scrollY;
offsets.push([scroll_y, anchorEl.dataset.i])
}
return offsets.sort((a, b) => a[0] - b[0]);
}
window.addEventListener('DOMContentLoaded', () => {
scroll_offsets = calculateScrollOffsets()
const windowEl = document.getElementById('collage_window')
const anchorEls = document.getElementsByClassName('anchor')
const playerEls = document.getElementsByTagName('annotation-player')
const paths = [document.getElementById('path1'), document.getElementById('path2'), document.getElementById('path3')]
paths.forEach((el) => el.style.strokeDasharray = Math.ceil(el.getTotalLength()) + 'px');
const lastAnchorEl = anchorEls[anchorEls.length - 1];
for (const anchorEl of anchorEls) {
anchorEl.addEventListener('click', ev => playerEls[anchorEl.dataset.i].annotator.playPause());
playerEls[anchorEl.dataset.i].annotator.addEventListener('play', ev => anchorEl.classList.add('playing'));
playerEls[anchorEl.dataset.i].annotator.addEventListener('pause', ev => anchorEl.classList.remove('playing'));
}
for (const player of playerEls) {
player.shadowRoot.adoptedStyleSheets = [sheet];
}
function updateScroll() {
// calculate the zooming & positioning of the plot
center_y = window.scrollY + window.innerHeight / 2
prev = null;
next = null;
step_idx = null;
for (let idx in scroll_offsets) {
const offset = scroll_offsets[idx]
if (offset[0] > center_y) {
next = offset
step_idx = idx;
break;
}
prev = offset
}
const sticky_dy = 200;
const intro_outro_dy = window.innerHeight + sticky_dy;
let source_pos, target_pos, source_scale, target_scale, source_color, target_color, source_x_offset, target_x_offset;
const x_column_width = window.innerWidth - document.body.getBoundingClientRect().width + 200; // for some reason the 200 is neccesary
const x_center_map = x_column_width / 2;
const x_center_column = document.body.getBoundingClientRect().left + document.body.getBoundingClientRect().width / 2;
const fit_scale = x_column_width / (canvasCenter[0] * 1.7)
let sticky_start = true;
let sticky_end = true;
if (prev === null) {
prev = [next[0] - intro_outro_dy, null]
source_scale = fit_scale
target_scale = .45
source_pos = canvasCenter
target_pos = centerPoints[next[1]]
source_color = 100;
target_color = 220;
source_x_offset = x_center_map;
target_x_offset = x_center_column;
sticky_start = false; // no sticky before first item
} else if (next === null) {
next = [prev[0] + intro_outro_dy, null]
source_scale = .45
target_scale = fit_scale
source_pos = centerPoints[prev[1]]
target_pos = canvasCenter
source_color = 220;
target_color = 50;
source_x_offset = x_center_column;
target_x_offset = x_center_map;
sticky_end = false; // no sticky after last item
} else {
source_pos = centerPoints[prev[1]]
target_pos = centerPoints[next[1]]
target_scale = .45
source_scale = .45
source_color = target_color = 220;
source_x_offset = target_x_offset = x_center_column;
}
const t_old = Math.min(1, Math.max(0, (center_y - prev[0]) / (next[0] - prev[0])))
const s = (center_y - prev[0] - (sticky_dy * sticky_start)) / (next[0] - prev[0] - (sticky_start + sticky_end) * sticky_dy);
const t = Math.min(1, Math.max(0, s))
// console.log(t_old, t)
t_ease = easeInOutSine(t)
// t_ease = easeInOutQuart(t) // use this if not snapping
let sticky_offset = 0;
if (s > 0 && s < 1) {
// scrolling from one item to the next
sticky_offset = t_ease * (sticky_start + sticky_end) * sticky_dy - (sticky_dy * sticky_start);
} else {
// sticky item
if (sticky_end && center_y > next[0] - sticky_dy) {
sticky_offset = sticky_dy - (center_y - next[0] + sticky_dy)
} else if (sticky_start && s <= 0) {
sticky_offset = ((center_y - prev[0]) / sticky_dy) * (- sticky_dy);
}
}
const dx = target_pos[0] - source_pos[0];
const dy = target_pos[1] - source_pos[1];
const ds = target_scale - source_scale
// console.log('twean from', source_pos, 'to', target_pos, 't', t_ease)
// console.log('twean scale', source_scale, 'to', target_scale, 't', t_ease)
scale = source_scale + t_ease * ds;
x_offset = (target_x_offset - source_x_offset) * t_ease + source_x_offset
x = -1 * (source_pos[0] + dx * t_ease) * scale + x_offset;
y = -1 * (source_pos[1] + dy * t_ease) * scale + window.innerHeight / 2 + sticky_offset;
const color = (target_color - source_color) * t_ease + source_color
// sheet.rules[0].style.setProperty('--override-color', `rgba(${color},${color},${color},0.7)`);
sheet.rules[0].style.setProperty('--disactive-path', `rgba(${color},${color},${color},0.7)`);
// draw the line
if (step_idx === null) {
// full paths
paths.forEach(el => el.style.strokeDashoffset = 0)
}
else {
// no paths
paths.forEach((el, idx) => {
if (idx >= step_idx) {
el.style.strokeDashoffset = Math.ceil(el.getTotalLength()) + 'px';
} else if (idx == step_idx - 1) {
// console.log('anim', el)
el.style.strokeDashoffset = Math.ceil(el.getTotalLength() - el.getTotalLength() * t_ease) + 'px';
} else {
el.style.strokeDashoffset = 0;
}
});
// paths.forEach((el) => stroke)
}
// console.log('x', x, 'y', y, 'scale', scale, 'color', color)
// console.log(x, y);
windowEl.style.transform = `scale(${scale})`
windowEl.style.left = `${x}px`
windowEl.style.top = `${y}px`
// calculate whether we're nearing the conlusion, and color accordingly
const last = Math.max(...Array.from(anchorEls).map((e) => e.getBoundingClientRect().bottom))
if (last < 0) {
for (const playerEl of playerEls) {
playerEl.classList.add('ending')
}
} else {
for (const playerEl of playerEls) {
playerEl.classList.remove('ending')
}
}
}
windowEl.style.transform = `scale(${scale})`
window.addEventListener('resize', (ev) => {
scroll_offsets = calculateScrollOffsets()
updateScroll()
})
window.addEventListener('scroll', updateScroll)
updateScroll()
let options = {
// root: document.querySelector("#scrollArea"), // viewport by default
rootMargin: `${-Math.floor((window.innerHeight - 10) / 2)}px 0px`, //"0px",
threshold: 0,
};
let observer = new IntersectionObserver((entries, observer) => {
entries.forEach((entry) => {
index = entry.target.dataset.i;
console.log(entry)
if (index >= playerEls.length) {
return;
}
playerEl = windowEl.children[index];
if (entry.isIntersecting) {
entry.target.classList.add('active');
playerEl.classList.add('active')
} else {
entry.target.classList.remove('active');
playerEl.classList.remove('active')
if (typeof playerEl.annotator.paused !== 'undefined' && !playerEl.annotator.paused) {
console.log('pause', playerEl.annotator, playerEl.annotator.paused)
playerEl.annotator.pause()
}
}
})
}, options);
// const anchorEls = document.getElementsByClassName('anchor');
for (const anchorEl of anchorEls) {
observer.observe(anchorEl)
}
// console.log(anchorEls)
// .forEach(el => observer.observe());
// console.log(anchorEl.dataset.title);
// const toSelect = typeof anchorEl.dataset.title == 'undefined' || anchorEl.dataset.title == 'none' ? null : frameEl.contentWindow.getIdForTitle(anchorEl.dataset.title);
// // navItemEl.hash url-encodes
// // let targetEl = document.getElementById(navItemEl.attributes.href.value.substr(1));
// // let wrapperEl = targetEl.parentNode;
// let intersectionObserver = new IntersectionObserver(function (entries) {
// console.log(entries);
// // If intersectionRatio is 0, the target is out of view
// // and we do not need to do anything.
// // if (entries[0].intersectionRatio <= 0) {
// // // navItemEl.classList.remove('active');
// // } else {
// // if (toSelect === null) {
// // frameEl.contentWindow.mapGraph.triggerReset();
// // // frameEl.contentWindow.mapGraph.deselectNode();
// // // frameEl.contentWindow.mapGraph.resetZoom();
// // } else {
// // frameEl.contentWindow.mapGraph.triggerSelect(toSelect);
// // // frameEl.contentWindow.mapGraph.selectNode(node);
// // }
// // // navItemEl.classList.add('active');
// // }
// });
// // start observing
// intersectionObserver.observe(anchorEl);
// }
// const linkEls = document.getElementsByClassName('maplink');
// for (let linkEl of linkEls) {
// linkEl.addEventListener('click', (ev) => {
// const toSelect = typeof linkEl.dataset.title == 'undefined' || linkEl.dataset.title == 'none' ? null : frameEl.contentWindow.getIdForTitle(linkEl.dataset.title);
// if (toSelect === null) {
// frameEl.contentWindow.mapGraph.deselectNode();
// frameEl.contentWindow.mapGraph.resetZoom();
// } else {
// const node = frameEl.contentWindow.mapGraph.graph.nodes.filter(n => n.id == toSelect)[0]
// frameEl.contentWindow.mapGraph.selectNode(node);
// }
// })
// linkEl.addEventListener('mouseover', (ev) => {
// const toSelect = typeof linkEl.dataset.title == 'undefined' || linkEl.dataset.title == 'none' ? null : frameEl.contentWindow.getIdForTitle(linkEl.dataset.title);
// if (toSelect) {
// const node = frameEl.contentWindow.mapGraph.graph.nodes.filter(n => n.id == toSelect)[0]
// frameEl.contentWindow.mapGraph.hoverNode(false, node);
// }
// })
// linkEl.addEventListener('mouseout', (ev) => {
// const toSelect = typeof linkEl.dataset.title == 'undefined' || linkEl.dataset.title == 'none' ? null : frameEl.contentWindow.getIdForTitle(linkEl.dataset.title);
// if (toSelect) {
// const node = frameEl.contentWindow.mapGraph.graph.nodes.filter(n => n.id == toSelect)[0]
// frameEl.contentWindow.mapGraph.endHoverNode(node);
// }
// })
// }
});
</script>
</head>
<body>
<header id="title-block-header">
<h1 class="title">Algorithmic Security Vision: Diagrams of Computer
Vision Politics</h1>
<p class="author"><em>Ruben van de Ven, Ildikó Zonga Plájás, Cyan Bae,
Francesco Ragazzi</em></p>
<p class="date">December 2023</p>
</header>
<div id='collage'>
<div id="collage_window">
<!--data-poster-url="annotation-BIXG4VTL.svg"-->
<annotation-player style="position:absolute;width:3243px; height:2635px;left:514px;top:6329px;"
data-url-prefix="assets" stroke="blue" data-crop='whole'
data-annotation-url="annotation-F19O9PGE.json"></annotation-player>
<annotation-player style="position:absolute;width:11867px;height:2753px;left:3905px;top:4333px;"
data-url-prefix="assets" stroke="blue" data-crop='whole'
data-annotation-url="annotation-XYGE65SB.json"></annotation-player>
<annotation-player style="position:absolute;width:4450px;height:3250px;left:5056px;top:7081px;"
data-url-prefix="assets" stroke="blue" data-crop='whole'
data-annotation-url="annotation-NKOUGNWE.json"></annotation-player>
<annotation-player style="position:absolute;width:2547px; height:2710px; left:5690px;top:1584px;"
data-url-prefix="assets" stroke="blue" data-crop='whole'
data-annotation-url="annotation-J4AOBZCJ.json"></annotation-player>
<annotation-player style="position:absolute;width:2952px; height:5043px;left:0;top:0;"
data-url-prefix="assets" stroke="blue" data-crop='whole'
data-annotation-url="annotation-BIXG4VTL.json"></annotation-player>
<svg height="10331.54" version="1.1" viewBox="61.650009 -1201.8501 20077.92 10331.539" width="20077.92"
id="svg1410" xml:space="preserve" xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<defs id="defs4">
<marker style="overflow:visible" id="marker39470" refX="0" refY="0" orient="auto-start-reverse"
markerWidth="5.3244081" markerHeight="6.155385" viewBox="0 0 5.3244081 6.1553851"
preserveAspectRatio="xMidYMid">
<path transform="scale(0.5)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 5.77,0 -2.88,5 V -5 Z" id="path39468-6" />
</marker>
</defs>
<g id="layer1">
<path
style="fill:none;stroke:#00f;stroke-width:5;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:20, 40;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker39470)"
d="M 3153.037,4991.1869 C 3627.9651,3806.502 10134,2080.0386 13783.189,4095.2221" id="path1" />
<path
style="fill:none;stroke:#00f;stroke-width:5;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:20, 40;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker39470)"
d="M 14473.211,4826.0987 C 14487.638,6297.2524 9146.0135,8823.4797 5997.8383,8868.5177"
id="path2" />
<path
style="fill:none;stroke:#00f;stroke-width:5;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:20, 40;stroke-dashoffset:0;stroke-opacity:1;marker-end:url(#marker39470)"
d="M 5562.416,8572.101 C 5561.5237,7310.7009 7556.3295,6911.6966 7557.7371,3111.5328"
id="path3" />
</g>
</svg>
<!--data-poster-url="annotation-F19O9PGE.svg"-->
<!--data-poster-url="annotation-XYGE65SB.svg"-->
<!--data-poster-url="annotation-NKOUGNWE.svg"-->
<!--data-poster-url="annotation-J4AOBZCJ.svg"-->
</div>
</div>
<section id="part1">
<p> .... this is a demo to showcase how the chronodiagramming looks like in its interactive form. Please note
that this demo of the interface is not compatible with mobile devices ...</p>
<section id="managing-error-from-the-sublime-to-the-risky-algorithm" class="level2">
<h2>3. Managing error: from the sublime to the risky algorithm</h2>
<div data-custom-style="Body Text">
<p>Our third emerging figuration concerns the place of the error. A
large body of literature examines actual and speculative cases of
algorithmic prediction based on self-learning systems (Azar et al.,
2021). Central to these analyses is the boundary-drawing performed by
such algorithmic devices, enacting (in)security by rendering their
subjects as more- or less-risky others (Amicelle et al., 2015: 300;
Amoore and De Goede, 2005; Aradau et al., 2008; Aradau and Blanke, 2018)
based on a spectrum of individual and environmental features (Calhoun,
2023). In other words, these predictive devices conceptualize risk as
something produced by, and thus external to, security technologies.</p>
</div>
<div data-custom-style="Body Text">
<p>In this critical literature on algorithmic practices, practitioners
working with algorithmic technologies are often critiqued for
understanding software as “sublime” (e.g. Wilcox, 2017: 3). However, in
our diagrams, algorithmic vision appears as a practice of managing
error. The practitioners we interviewed are aware of the error-prone
nature of their systems but know it will never be perfect, and see it as
a key metric that needs to be acted upon.</p>
</div>
<div data-custom-style="Body Text">
<p>The most prominent way in which error figures in the diagrams is in
its quantified form of the true positive and false positive rates, TPR
and FPR. The significance and definition of these metrics is stressed by
CTO Gerwin van der Lugt (Diagram 6). In camera surveillance, the false
positive rate could be described as the number of fales positive
classifications relative to the number of video frames being analyzed.
Upon writing down these definitions, van der Lugt corrected his initial
definitions, as these definitions determine the work of his development
team, the ways in which his clients — security operators — engage with
the technology, and whether they perceive the output of the system as
trustworthy.</p>
</div>
<div data-custom-style="Figure">
<div class="anchor" data-i="0" style="height:2.3in"></div>
</div>
<div data-custom-style="Caption">
<p>Diagram 6. Gerwin van der Lugt corrects his initial definitions of
the true positive and false positive rates, and stresses the importance
of their precise definition.</p>
</div>
<div data-custom-style="Body Text">
<p>The figuration of algorithmic security vision as inherently imprecise
affects the operationalization of security practices. Van der Lugts
example concerns whether the violence detection algorithm developed by
Oddity.ai should be trained to categorize friendly fighting
(<em>stoeien</em>) between friends as “violence” or not. In this
context, van der Lugt finds it important to differentiate what counts as
false positive in the algorithms evaluation metric from an error in the
algorithms operationalization of a security question.</p>
</div>
<div data-custom-style="Body Text">
<p>He gives two reasons to do so. First, he anticipates that the
exclusion of <em>stoeien</em> from the category of violence would
negatively impact TPR. In the iterative development of self-learning
systems, the TPR and FPR, together with the true and false
<em>negative</em> rates must perform a balancing act. Van der Lugt
outlines that with their technology they aim for fewer than 100 false
positives per 100 million frames per week. The FPR becomes indicative of
the algorithms quality, as too many faulty predictions will desensitize
the human operator to system alerts.
</p>
</div>
<div data-custom-style="Body Text">
<p>This leads to van der Lugts second point: He fears that the
exclusion of <em>stoeien</em> from the violence category might cause
unexpected biases in the system. For example, instead of distinguishing
violence from <em>stoeien</em> based on peoples body movements, the
algorithm might make the distinction based on their age. For van der
Lugt, this would be an undesirable and hard to notice form of
discrimination. In developing algorithmic (in)security, error is figured
not merely as a mathematical concept but (as shown in Diagram 6) as a
notion that invites pre-emption — a mitigation of probable failure — for
which the developer is responsible. The algorithmic condition of
security vision is figured as the pre-emption of error.</p>
</div>
<div data-custom-style="Figure">
<div class="anchor" data-i="1" style="height:6in"></div>
</div>
<div data-custom-style="Caption">
<p>Diagram 7. By drawing errors on a timeline, van Rest calls attention
to the pre-emptive nature of error in the development process of
computer vision technologies.</p>
</div>
<div data-custom-style="Body Text">
<p>According to critical AI scholar Matteo Pasquinelli, “machine
learning is technically based on formulas for error correction” (2019:
2). Therefore, any critical engagement with such algorithmic processes
needs to go beyond citing errors, “for it is precisely through these
variations that the algorithm learns what to do” (Amoore, 2019: 164),
pushing us to reconsider any argument based on the inaccuracy of the
systems.</p>
</div>
<div data-custom-style="Body Text">
<p>The example of <em>stoeien</em> suggests that it is not so much a
question if, or how much, these algorithms err, but how these errors are
anticipated and negotiated. Thus, taking error as a hallmark of machine
learning we can see how practices of (in)security become shaped by the
notion of mathematical error well beyond their development stages. Error
figures centrally in the development, acquisition and deployment of such
devices. As one respondent indicated, predictive devices are inherently
erroneous, but the quantification of their error makes them amenable to
"risk management.”</p>
</div>
<div data-custom-style="Body Text">
<p>While much has been written about security technologies as a device
<em>for</em> risk management, little is known about how security
technologies are conceptualized as objects <em>of</em> risk management.
What happens then in this double relation of risk? The figure of the
error enters the diagrams as a mathematical concept, throughout the
conversations we see its figure permeate the discourse around
algorithmic security vision. By figuring algorithmic security vision
through the notion of error, risk is placed at the heart of the security
apparatus.
</p>
</div>
</section>
</section>
<section id="con-figurations-of-algorithmic-security-vision-fragmenting-accountability-and-expertise"
class="level1">
<h1>Con-figurations of algorithmic security vision: fragmenting
accountability and expertise</h1>
<div data-custom-style="Body Text">
<p>In the previous section we explored the changing <em>figurations</em>
of key dimensions of algorithmic security vision, in this section we
examine how these figurations <em>configure</em>. For Suchman, working
with configurations highlights “the histories and encounters through
which things are figured <em>into meaningful existence</em>, fixing them
through reiteration but also always engaged in the perpetuity of coming
to be that characterizes the biographies of objects as well as
subjects” (Suchman, 2012: 50, emphasis ours) In other words, we are
interested in the practices and tensions that emerge as figurations
become embedded in material practices. We focus on two con-figurations
that emerged in the interviews: the delegation of accountability to
externally managed benchmarks, and the displacement of responsibility
through the reconfiguration of the human-in-the-loop.</p>
</div>
<section id="delegating-accountability-to-benchmarks" class="level2">
<h2>Delegating accountability to benchmarks</h2>
<div data-custom-style="Body Text">
<p>The first configuration is related to the evaluation of the error
rate in the training of algorithmic vision systems: it involves
datasets, benchmark institutions, and the idea of fairness as equal
representation among different social groups. Literature on the ethical
and political effects of algorithmic vision has notoriously focused on
the distribution of errors, raising questions of ethnic and racial bias
(e.g. Buolamwini and Gebru, 2018). Our interviews reflect the concerns
of much of this literature as the pre-emption of error figured
repeatedly in relation to the uneven distribution of error across
minorities or groups. In Diagram 8, Ádám Remport draws how different
visual traits have often led to different error rates. While the general
error metric of an algorithmic system might seem "acceptable," it
actually privileges particular groups, which is invisible when only the
whole is considered. Jeroen van Rest distinguishes such errors from the
inherent algorithmic imprecision in deep machine learning models, as
systemic biases (Diagram 7), as they perpetuate inequalities in the
society in which the product is being developed.</p>
</div>
<div data-custom-style="Figure">
<div class="anchor" data-i="2" style="height:4in"></div>
</div>
<div data-custom-style="Caption">
<p>Diagram 8. Ádám Remport describes that facial recognition
technologies are often most accurate with white male adult faces,
reflecting the datasets they are trained with. The FPR is higher with
people with darker skin, children, or women, which may result in false
flagging and false arrests.</p>
</div>
<div data-custom-style="Body Text">
<p>To mitigate these concerns and manage their risk, many of our
interviewees who develop and implement these technologies, externalize
the reference against which the error is measured. They turn to a
benchmark run by the American National Institute of Standards and
Technology (NIST), which ranks facial recognition technologies by
different companies by their error metric across groups. John Riemen,
who is responsible for the use of forensic facial recognition technology
at the Center for Biometrics of the Dutch police, describes how their
choice for software is driven by a public tender that demands a "top-10"
score on the NIST benchmark. The mitigation of bias is thus outsourced
to an external, and in this case foreign, institution.</p>
</div>
<div data-custom-style="Body Text">
<p>We see in this outsourcing of error metrics a form of delegation that
brings about a specific regime of (in)visibility. While a particular
kind of algorithmic bias is rendered central to the NIST benchmark, the
mobilization of this reference obfuscates questions on how that metric
was achieved. That is to say, questions about training data are
invisibilized, even though that data is a known site of contestation.
For example, the NIST benchmark datasets are known to include faces of
wounded people (Keyes, 2019). The Clearview company is known to use
images scraped illegally from social media, and IBM uses a dataset that
is likely in violation of European GDPR legislation (Bommasani et al.,
2022: 154). Pasquinelli (2019) argued that machine learning models
ultimately act as data compressors: enfolding and operationalizing
imagery of which the terms of acquisition are invisibilized.</p>
</div>
<div data-custom-style="Body Text">
<p>Attention to this invisibilization reveals a discrepancy between the
developers and the implementers of these technologies. On the one hand,
the developers we interviewed expressed concerns about how their
training data is constituted to gain a maximum false positive rate/true
positive rate (FPR/TPR) ratio, while showing concern for the legality of
the data they use to train their algorithms. On the other hand,
questions about the constitution of the dataset have been virtually
non-existent in our conversations with those who implement software that
relies on models trained with such data. Occasionally this knowledge was
considered part of the developers' intellectual property that had to be
kept a trade secret. A high score on the benchmark is enough to pass
questions of fairness, legitimizing the use of the algorithmic model.
Thus, while indirectly relying on the source data, it is no longer
deemed relevant in the consideration of an algorithm. This illustrates
well how the invisibilization of the “compressed” dataset, in
Pasquinellis terms, into a model, with the formalization of guiding
metrics into a benchmark, permits a bracketing of accountability. One
does not need to know how outcomes are produced, as long as the
benchmarks are in order.</p>
</div>
<div data-custom-style="Body Text">
<p>The configuration of algorithmic visions bias across a complex
network of fragmented locations and actors, from the dataset, to the
algorithm, to the benchmark institution reveals the selective processes
of (in)visibilization. This opens up fruitful alleys for new empirical
research: What are the politics of the benchmark as a mechanism of
legitimization? How does the outsourcing of assessing the error
distribution impact attention to bias? How has the critique of bias been
institutionalized by the security industry, resulting in the
externalization of accountability, through dis-location and
fragmentation?</p>
</div>
</section>
<section id="reconfiguring-the-human-in-the-loop" class="level2">
<h2>Reconfiguring the human-in-the-loop</h2>
<div data-custom-style="Body Text">
<p>A second central question linked to the delegation of accountability
is the configuration in which the security operator is located. The
effects of delegation and fragmentation in which the mitigation of
algorithmic errors is outsourced to an external party, becomes visible
in the ways in which the role of the security operator is configured in
relation to the institution they work for, the softwares assessment,
and the affected publics.</p>
</div>
<div data-custom-style="Body Text">
<p>The public critique of algorithms has often construed the
<em>human-in-the loop</em> as one of the last lines of defense in the
resistance to automated systems, able to filter and correct erroneous
outcomes (Markoff, 2020). The literature in critical security studies
has however problematized the representation of the security operator in
algorithmic assemblages by discussing how the algorithmic predictions
appear on their screen (Aradau and Blanke, 2018), and how the embodied
decision making of the operator is entangled with the algorithmic
assemblage (Wilcox, 2017). Moreover, the operator is often left guessing
at the working of the device that provides them with information to make
their decision (Møhl, 2021).
</p>
</div>
<div data-custom-style="Body Text">
<p>What our participants diagrams emphasized is how a whole spectrum of
system designs emerges in response to similar questions, for example the
issue of algorithmic bias. A primary difference can be found in the
degree of understanding of the systems that is expected of security
operators, as well as their perceived autonomy. Sometimes, the human
operator is central to the systems operation, forming the interface
between the algorithmic systems and surveillance practices. Gerwin van
der Lugt, developer of software at Oddity.ai that detects criminal
behavior argues that “the responsibility for how to deal with the
violent incidents is always [on a] human, not the algorithm. The
algorithm just detects violence—thats it—but the human needs to deal
with it.”</p>
</div>
<div data-custom-style="Body Text">
<p>Dirk Herzbach, chief of police at the Police Headquarters Mannheim,
adds that when alerted to an incident by the system, the operator
decides whether to deploy a police car. Both Herzbach and Van der Lugt
figure the human-in-the-loop as having full agency and responsibility in
operating the (in)security assemblage (cf. Hoijtink and Leese,
2019).</p>
</div>
<div data-custom-style="Body Text">
<p>Some interviewees drew a diagram in which the operator is supposed to
be aware of the ways in which the technology errs, so they can address
them. Several other interviewees considered the technical expertise of
the human-in-the-loop to be unimportant, even a hindrance.</p>
</div>
<div data-custom-style="Body Text">
<p>Chief of police Herzbach prefers an operator to have patrol
experience to assess which situations require intervention. He is
concerned that knowledge about algorithmic biases might interfere with
such decisions. In the case of the Moscow metro, in which a facial
recognition system has been deployed to purchase tickets and open access
gates, the human-in-the-loop is reconfigured as an end user who needs to
be shielded from the algorithms operation (c.f. Lorusso, 2021). On
these occasions, expertise on the technological creation of the suspect
becomes fragmented.</p>
</div>
<div data-custom-style="Body Text">
<p>These different figurations of the security operator are held
together by the idea that the human operator is the expert of the
subject of security, and is expected to make decisions independent from
the information that the algorithmic system provides.</p>
</div>
<div data-custom-style="Figure">
<div class="anchor" data-i="3" style="height:6in"></div>
</div>
<div data-custom-style="Caption">
<p>Diagram 9. Riemen explains the process of information filtering that
is involved in querying the facial recognition database of the Dutch
police.</p>
</div>
<div data-custom-style="Body Text">
<p>Other drivers exist, however, to shield the operator from the
algorithms functioning, challenging individual expertise and
acknowledging the fallibility of human decision making. In Diagram 9,
John Riemen outlines the use of facial recognition by the Dutch police.
He describes how data from the police case and on the algorithmic
assessment is filtered out as much as possible from the information
provided to the operator. This, Riemen suggests, might reduce bias in
the final decision. He adds that there should be no fewer than three
humans-in-the-loop who operate independently to increase the accuracy of
the algorithmic security vision.</p>
</div>
<div data-custom-style="Body Text">
<p>Instead of increasing their number, there is another configuration of
the human-in-the-loop that responds to the fallibility of the operator.
For the Burglary-Free Neighborhood project in Rotterdam, project manager
Guido Delver draws surveillance as operated by neighborhood residents,
through a system that they own themselves. By involving different
stakeholders, Delver hopes to counter government hegemony over the
surveillance apparatus. However, residents are untrained in assessing
algorithmic predictions raising new challenges. Delver illustrates a
scenario in which the algorithmic signaling of a potential burglary may
have dangerous consequences: “Does it invoke the wrong behavior from the
citizen? [They could] go out with a bat and look for the guy who has
done nothing [because] it was a false positive.” In this case, the worry
is that the erroneous predictions will not be questioned. Therefore, in
Delvers project the goal was to actualize an autonomous system, “with
as little interference as possible.” Human participation or
“interference” in the operation are potentially harmful. Thus, figuring
the operator — whether police officer or neighborhood resident — as
risky, can lead to the relegation of direct human intervention.</p>
</div>
<div data-custom-style="Body Text">
<p>By looking at the figurations of the operator that appear in the
diagrams we see multiple and heterogeneous configurations of
regulations, security companies, and professionals. In each
configuration, the human-in-the-loop appears in different forms. The
operator often holds the final responsibility in the ethical functioning
of the system. At times they are configured as an expert in
sophisticated but error-prone systems; at others they are figured as end
users who are activated by the alerts generated by the system, and who
need not understand how the software works and errs, or who can be left
out.</p>
</div>
<div data-custom-style="Body Text">
<p>These configurations remind us that there cannot be any theorization
of “algorithmic security vision,” both of its empirical workings and its
ethical and political consequences without close attention to the
empirical contexts in which the configurations are arranged. Each
organization of datasets, algorithms, benchmarks, hardware and operators
has specific problems. And each contains specific politics of
visibilization, invisibilization, responsibility and accountability.</p>
</div>
</section>
</section>
<section id="a-diagram-of-research" class="level1">
<h1>A diagram of research</h1>
<div data-custom-style="Body Text">
<p>In this conclusion, we reflect upon a final dimension of the method
of diagraming in the context of figurations and configurations: its
potential as an alternative to the conventional research program.</p>
</div>
<div data-custom-style="Body Text">
<p>While writing this text, indeed, the search for a coherent structure
through which we could map the problems that emerged from analyzing the
diagrams in a straightforward narrative proved elusive. We considered
various organizational frameworks, but consistently encountered
resistance from one or two sections. It became evident that our
interviews yielded a rhizome of interrelated problems, creating a
multitude of possible inquiries and overlapping trajectories. Some
dimensions of these problems are related, but not to every problem.</p>
</div>
<div data-custom-style="Body Text">
<p>If we take for example the understanding of algorithmic security
vision as practices of error management as a starting point, we see how
the actors we interviewed have incorporated the societal critique of
algorithmic bias. This serves as a catalyst for novel strategies aimed
at mitigating the repercussions of imperfect systems. The societal
critique has driven the development of synthetic datasets, which promise
equitable representation across diverse demographic groups. It has also
been the reason for the reliance on institutionalized benchmarks to
assess the impartiality of algorithms. Moreover, different
configurations of the human-in-the-loop emerge, all promised to rectify
algorithmic fallibility. We see a causal chain there.</p>
</div>
<div data-custom-style="Body Text">
<p>But how does the question of algorithmic error relate to the shift
from photographic to cinematic vision that algorithmic security vision
brings about? Certainly, there are reverberations. The relegation of
stable identity that we outlined, could be seen as a way to mitigate the
impact of those errors. But it would be a leap to identify these
questions of error as the central driver for the increased incorporation
of moving images in algorithmic security vision.</p>
</div>
<div data-custom-style="Body Text">
<p>However, if we take as our starting point the formidable strides in
computing power and the advancements in camera technologies, we face
similar problems. These developments make the analysis of movement
possible while helping to elucidate the advances in the real-time
analysis that are required to remove the human-in-the-loop, as trialed
in the Burglary-Free Neighborhood. These developments account for the
feasibility of the synthetic data generation, a computing-intense
process which opens a vast horizon of possibilities for developers to
detect objects or actions. Such an account, however, does not address
the need for such a synthetic dataset. A focus on the computation of
movement, however, would highlight how a lack of training data
necessitates many of the practices described. Synthetic data is
necessitated by the glaring absence of pre-existing security datasets
that contain moving bodies. While facial recognition algorithms could be
trained and operated on quickly repurposed photographic datasets of
national identity cards or drivers license registries, no dataset for
moving bodies has been available to be repurposed by states or
corporations. This absence of training data requires programmers to
stage scenes for the camera. Thus, while one issue contains echoes of
the other, the network of interrelated problematizations cannot be
flattened into a single narrative.</p>
</div>
<div data-custom-style="Body Text">
<p>The constraints imposed by the linear structure of an academic
article certainly necessitate a specific ordering of sections. Yet the
different research directions we highlight form something else. The
multiple figurations analyzed here generate fresh tensions when put in
relation with security and political practices. What appears from the
diagrams is a network of figurations in various configurations. Instead
of a research <em>program</em>, our interviews point toward a larger
research <em>diagram</em> of interrelated questions, which invites us to
think in terms of pathways through this dynamic and evolving network of
relations.</p>
</div>
</section>
<section id="references" class="level1">
<h1>References</h1>
<div data-custom-style="Bibliography">
<p>Ajana B (2013) <em>Governing Through Biometrics</em>. London:
Palgrave Macmillan UK. DOI: <a href="https://doi.org/10.1057/9781137290755"><span
data-custom-style="Hyperlink">10.1057/9781137290755</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Amicelle A, Aradau C and Jeandesboz J (2015) Questioning security
devices: Performativity, resistance, politics. <em>Security
Dialogue</em> 46(4): 293306. DOI: <a href="https://doi.org/10.1177/0967010615586964"><span
data-custom-style="Hyperlink">10.1177/0967010615586964</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Amoore L (2014) Security and the incalculable. <em>Security
Dialogue</em> 45(5). SAGE Publications Ltd: 423439. DOI: <a
href="https://doi.org/10.1177/0967010614539719"><span
data-custom-style="Hyperlink">10.1177/0967010614539719</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Amoore L (2019) Doubt and the algorithm: On the partial accounts of
machine learning. <em>Theory, Culture &amp; Society</em> 36(6). SAGE
Publications Ltd: 147169. DOI: <a href="https://doi.org/10.1177/0263276419851846"><span
data-custom-style="Hyperlink">10.1177/0263276419851846</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Amoore L (2021) The deep border. <em>Political Geography</em>.
Elsevier: 102547.</p>
</div>
<div data-custom-style="Bibliography">
<p>Amoore L and De Goede M (2005) Governance, risk and dataveillance in
the war on terror. <em>Crime, Law and Social Change</em> 43(2): 149173.
DOI: <a href="https://doi.org/10.1007/s10611-005-1717-8"><span
data-custom-style="Hyperlink">10.1007/s10611-005-1717-8</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Andersen RS (2015) <em>Remediating Security</em>. 1. oplag.
Ph.d.-serien / københavns universitet, institut for statskundskab. Kbh.:
Københavns Universitet, Institut for Statskundskab.</p>
</div>
<div data-custom-style="Bibliography">
<p>Andersen RS (2018) The art of questioning lethal vision: Mosses
infra and militarized machine vision. In: <em>_Proceeding of EVA
copenhagen 2018_</em>, 2018.</p>
</div>
<div data-custom-style="Bibliography">
<p>Andrejevic M and Burdon M (2015) Defining the sensor society.
<em>Television &amp; New Media</em> 16(1): 1936. DOI: <a
href="https://doi.org/10.1177/1527476414541552"><span
data-custom-style="Hyperlink">10.1177/1527476414541552</span></a>.
</p>
</div>
<div data-custom-style="Bibliography">
<p>Aradau C and Blanke T (2015) The (big) data-security assemblage:
Knowledge and critique. <em>Big Data &amp; Society</em> 2(2):
205395171560906. DOI: <a href="https://doi.org/10.1177/2053951715609066"><span
data-custom-style="Hyperlink">10.1177/2053951715609066</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Aradau C and Blanke T (2018) Governing others: Anomaly and the
algorithmic subject of security. <em>European Journal of International
Security</em> 3(1). Cambridge University Press: 121. DOI: <a
href="https://doi.org/10.1017/eis.2017.14"><span
data-custom-style="Hyperlink">10.1017/eis.2017.14</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Aradau C, Lobo-Guerrero L and Van Munster R (2008) Security,
technologies of risk, and the political: Guest editors introduction.
<em>Security Dialogue</em> 39(2-3): 147154. DOI: <a
href="https://doi.org/10.1177/0967010608089159"><span
data-custom-style="Hyperlink">10.1177/0967010608089159</span></a>.
</p>
</div>
<div data-custom-style="Bibliography">
<p>Azar M, Cox G and Impett L (2021) Introduction: Ways of machine
seeing. <em>AI &amp; SOCIETY</em>. DOI: <a href="https://doi.org/10.1007/s00146-020-01124-6"><span
data-custom-style="Hyperlink">10.1007/s00146-020-01124-6</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Bae G, de La Gorce M, Baltrušaitis T, et al. (2023) DigiFace-1M: 1
million digital face images for face recognition. In: <em>2023 IEEE
Winter Conference on Applications of Computer Vision (WACV)</em>, 2023.
IEEE.</p>
</div>
<div data-custom-style="Bibliography">
<p>Barad KM (2007) <em>Meeting the Universe Halfway: Quantum Physics and
the Entanglement of Matter and Meaning</em>. Durham: Duke University
Press.</p>
</div>
<div data-custom-style="Bibliography">
<p>Bellanova R, Irion K, Lindskov Jacobsen K, et al. (2021) Toward a
critique of algorithmic violence. <em>International Political
Sociology</em> 15(1): 121150. DOI: <a href="https://doi.org/10.1093/ips/olab003"><span
data-custom-style="Hyperlink">10.1093/ips/olab003</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Bigo D (2002) Security and immigration: Toward a critique of the
governmentality of unease. <em>Alternatives</em> 27. SAGE Publications
Inc: 6392. DOI: <a href="https://doi.org/10.1177/03043754020270S105"><span
data-custom-style="Hyperlink">10.1177/03043754020270S105</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Bigo D and Guild E (2005) Policing at a distance: Schengen visa
policies. In: <em>Controlling Frontiers. Free Movement into and Within
Europe</em>. Routledge, pp. 233263.</p>
</div>
<div data-custom-style="Bibliography">
<p>Bommasani R, Hudson DA, Adeli E, et al. (2022) On the opportunities
and risks of foundation models. Available at: <a href="http://arxiv.org/abs/2108.07258"><span
data-custom-style="Hyperlink">http://arxiv.org/abs/2108.07258</span></a>
(accessed 2 June 2023).</p>
</div>
<div data-custom-style="Bibliography">
<p>Bousquet AJ (2018) <em>The Eye of War</em>. Minneapolis: University
of Minnesota Press.</p>
</div>
<div data-custom-style="Bibliography">
<p>Bucher T (2018) <em>If...Then: Algorithmic Power and Politics</em>.
New York: Oxford University Press.</p>
</div>
<div data-custom-style="Bibliography">
<p>Buolamwini J and Gebru T (2018) Gender shades: Intersectional
accuracy disparities in commercial gender classification.
<em>Proceedings of Machine Learning Research</em> 81.
</p>
</div>
<div data-custom-style="Bibliography">
<p>Calhoun L (2023) Latency, uncertainty, contagion: Epistemologies of
risk-as-reform in crime forecasting software. <em>Environment and
Planning D: Society and Space</em>. SAGE Publications Ltd STM:
02637758231197012. DOI: <a href="https://doi.org/10.1177/02637758231197012"><span
data-custom-style="Hyperlink">10.1177/02637758231197012</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Carraro V (2021) Grounding the digital: A comparison of Wazes avoid
dangerous areas feature in Jerusalem, Rio de Janeiro and the US.
<em>GeoJournal</em> 86(3): 11211139. DOI: <a href="https://doi.org/10.1007/s10708-019-10117-y"><span
data-custom-style="Hyperlink">10.1007/s10708-019-10117-y</span></a>.
</p>
</div>
<div data-custom-style="Bibliography">
<p>Dawson-Howe K (2014) <em>A Practical Introduction to Computer Vision
with OpenCV</em>. 1st edition. Chichester, West Sussex, United Kingdon;
Hoboken, NJ: Wiley.</p>
</div>
<div data-custom-style="Bibliography">
<p>Dijstelbloem H, van Reekum R and Schinkel W (2017) Surveillance at
sea: The transactional politics of border control in the Aegean.
<em>Security Dialogue</em> 48(3): 224240. DOI: <a href="https://doi.org/10.1177/0967010617695714"><span
data-custom-style="Hyperlink">10.1177/0967010617695714</span></a>.
</p>
</div>
<div data-custom-style="Bibliography">
<p>Farocki H (2004) Phantom images. <em>Public</em>. Available at: <a
href="https://public.journals.yorku.ca/index.php/public/article/view/30354"><span
data-custom-style="Hyperlink">https://public.journals.yorku.ca/index.php/public/article/view/30354</span></a>
(accessed 6 March 2023).</p>
</div>
<div data-custom-style="Bibliography">
<p>Fisher DXO (2018) Situating border control: Unpacking Spains SIVE
border surveillance assemblage. <em>Political Geography</em> 65: 6776.
DOI: <a href="https://doi.org/10.1016/j.polgeo.2018.04.005"><span
data-custom-style="Hyperlink">10.1016/j.polgeo.2018.04.005</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Fourcade M and Gordon J (2020) Learning like a state: Statecraft in
the digital age32.</p>
</div>
<div data-custom-style="Bibliography">
<p>Fourcade M and Johns F (2020) Loops, ladders and links: The
recursivity of social and machine learning. <em>Theory and Society</em>:
130. DOI: <a href="https://doi.org/10.1007/s11186-020-09409-x"><span
data-custom-style="Hyperlink">10.1007/s11186-020-09409-x</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Fraser A (2019) Curating digital geographies in an era of data
colonialism. <em>Geoforum</em> 104. Elsevier: 193200.</p>
</div>
<div data-custom-style="Bibliography">
<p>Galton F (1879) Composite portraits, made by combining those of many
different persons into a single resultant figure. <em>The Journal of the
Anthropological Institute of Great Britain and Ireland</em> 8. [Royal
Anthropological Institute of Great Britain; Ireland, Wiley]: 132144.
DOI: <a href="https://doi.org/10.2307/2841021"><span
data-custom-style="Hyperlink">10.2307/2841021</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Gandy OH (2021) <em>The Panoptic Sort: A Political Economy of
Personal Information</em>. Oxford University Press. Available at: <a
href="https://books.google.com?id=JOEsEAAAQBAJ"><span
data-custom-style="Hyperlink">https://books.google.com?id=JOEsEAAAQBAJ</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Gillespie T (2018) <em>Custodians of the Internet: Platforms, Content
Moderation, and the Hidden Decisions That Shape Social Media</em>.
Illustrated edition. Yale University Press.</p>
</div>
<div data-custom-style="Bibliography">
<p>Goodwin C (1994) Professional vision. <em>American
Anthropologist</em> 96(3).</p>
</div>
<div data-custom-style="Bibliography">
<p>Graham S (1998) Spaces of surveillant simulation: New technologies,
digital representations, and material geographies. <em>Environment and
Planning D: Society and Space</em> 16(4). SAGE Publications Sage UK:
London, England: 483504.</p>
</div>
<div data-custom-style="Bibliography">
<p>Graham SD (2005) Software-sorted geographies. <em>Progress in human
geography</em> 29(5). Sage Publications Sage CA: Thousand Oaks, CA:
562580.</p>
</div>
<div data-custom-style="Bibliography">
<p>Grasseni C (2004) Skilled vision. An apprenticeship in breeding
aesthetics. <em>Social Anthropology</em> 12(1): 4155. DOI: <a
href="https://doi.org/10.1017/S0964028204000035"><span
data-custom-style="Hyperlink">10.1017/S0964028204000035</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Grasseni C (2018) Skilled vision. In: Callan H (ed.) <em>The
International Encyclopedia of Anthropology</em>. 1st ed. Wiley, pp. 17.
DOI: <a href="https://doi.org/10.1002/9781118924396.wbiea1657"><span
data-custom-style="Hyperlink">10.1002/9781118924396.wbiea1657</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Haraway D (1988) Situated knowledges: The science question in
feminism and the privilege of partial perspective. <em>Feminist
Studies</em> 14(3). Feminist Studies, Inc.: 575599. DOI: <a
href="https://doi.org/10.2307/3178066"><span
data-custom-style="Hyperlink">10.2307/3178066</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Hoijtink M and Leese M (2019) How (not) to talk about technology
international relations and the question of agency. In: Hoijtink M and
Leese M (eds) <em>Technology and Agency in International Relations</em>.
Emerging technologies, ethics and international affairs. London ; New
York: Routledge, pp. 124.</p>
</div>
<div data-custom-style="Bibliography">
<p>Hopman R and Mcharek A (2020) Facing the unknown suspect: Forensic
DNA phenotyping and the oscillation between the individual and the
collective. <em>BioSocieties</em> 15(3): 438462. DOI: <a
href="https://doi.org/10.1057/s41292-020-00190-9"><span
data-custom-style="Hyperlink">10.1057/s41292-020-00190-9</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Hunger F (2023) <em>Unhype artificial intelligence! A proposal to
replace the deceiving terminology of AI.</em> 12 April. Zenodo. DOI: <a
href="https://doi.org/10.5281/zenodo.7524493"><span
data-custom-style="Hyperlink">10.5281/zenodo.7524493</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Huysmans J (2022) Motioning the politics of security: The primacy of
movement and the subject of security. <em>Security Dialogue</em> 53(3):
238255. DOI: <a href="https://doi.org/10.1177/09670106211044015"><span
data-custom-style="Hyperlink">10.1177/09670106211044015</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Isin E and Ruppert E (2020) The birth of sensory power: How a
pandemic made it visible? <em>Big Data &amp; Society</em> 7(2). SAGE
Publications Ltd: 2053951720969208. DOI: <a href="https://doi.org/10.1177/2053951720969208"><span
data-custom-style="Hyperlink">10.1177/2053951720969208</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Jasanoff S (2004) <em>States of Knowledge: The Co-Production of
Science and Social Order</em>. Routledge Taylor &amp; Francis Group.</p>
</div>
<div data-custom-style="Bibliography">
<p>Ji Z, Lee N, Frieske R, et al. (2023) Survey of hallucination in
natural language generation. <em>ACM Computing Surveys</em> 55(12):
138. DOI: <a href="https://doi.org/10.1145/3571730"><span
data-custom-style="Hyperlink">10.1145/3571730</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Keyes O (2019) The gardeners vision of data: Data science reduces
people to subjects that can be mined for truth. <em>Real Life Mag</em>.
Available at: <a href="https://reallifemag.com/the-gardeners-vision-of-data/"><span
data-custom-style="Hyperlink">https://reallifemag.com/the-gardeners-vision-of-data/</span></a>.
</p>
</div>
<div data-custom-style="Bibliography">
<p>Latour B (2005) <em>Reassembling the Social: An Introduction to
Actor-Network-Theory</em>. Clarendon Lectures in Management Studies.
Oxford; New York: Oxford University Press.</p>
</div>
<div data-custom-style="Bibliography">
<p>Leese M (2015) We were taken by surprise: Body scanners, technology
adjustment, and the eradication of failure. <em>Critical Studies on
Security</em> 3(3). Routledge: 269282. DOI: <a
href="https://doi.org/10.1080/21624887.2015.1124743"><span
data-custom-style="Hyperlink">10.1080/21624887.2015.1124743</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Leese M (2019) Configuring warfare: Automation, control, agency. In:
Hoijtink M and Leese M (eds) Technology and Agency in International
Relations. Emerging technologies, ethics and international affairs.
London; New York: Routledge, pp. 4265.</p>
</div>
<div data-custom-style="Bibliography">
<p>Lorusso S (2021) The user condition. Available at: <a href="https://theusercondition.computer/"><span
data-custom-style="Hyperlink">https://theusercondition.computer/</span></a>
(accessed 18 February 2021).</p>
</div>
<div data-custom-style="Bibliography">
<p>Lyon D (2003) <em>Surveillance as Social Sorting: Privacy, Risk, and
Digital Discrimination</em>. Psychology Press. Available at: <a
href="https://books.google.com?id=yCLFBfZwl08C"><span
data-custom-style="Hyperlink">https://books.google.com?id=yCLFBfZwl08C</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Mackenzie A (2017) <em>Machine Learners: Archaeology of a Data
Practice</em>. The MIT Press. DOI: <a href="https://doi.org/10.7551/mitpress/10302.001.0001"><span
data-custom-style="Hyperlink">10.7551/mitpress/10302.001.0001</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Maguire M, Frois C and Zurawski N (eds) (2014) <em>The Anthropology
of Security: Perspectives from the Frontline of Policing,
Counter-Terrorism and Border Control</em>. Anthropology, culture and
society. London: Pluto Press.</p>
</div>
<div data-custom-style="Bibliography">
<p>Mahony M (2021) Geographies of science and technology 1: Boundaries
and crossings. <em>Progress in Human Geography</em> 45(3): 586595. DOI:
<a href="https://doi.org/10.1177/0309132520969824"><span
data-custom-style="Hyperlink">10.1177/0309132520969824</span></a>.
</p>
</div>
<div data-custom-style="Bibliography">
<p>Markoff J (2020) Robots will need humans in future. <em>The New York
Times: Section B</em>, 22 May. New York. Available at: <a
href="https://www.nytimes.com/2020/05/21/technology/ben-shneiderman-automation-humans.html"><span
data-custom-style="Hyperlink">https://www.nytimes.com/2020/05/21/technology/ben-shneiderman-automation-humans.html</span></a>
(accessed 31 October 2023).</p>
</div>
<div data-custom-style="Bibliography">
<p>McCosker A and Wilken R (2020) <em>Automating Vision: The Social
Impact of the New Camera Consciousness</em>. 1st edition. Routledge.</p>
</div>
<div data-custom-style="Bibliography">
<p>Møhl P (2021) Seeing threats, sensing flesh: Humanmachine ensembles
at work. <em>AI &amp; SOCIETY</em> 36(4): 12431252. DOI: <a
href="https://doi.org/10.1007/s00146-020-01064-1"><span
data-custom-style="Hyperlink">10.1007/s00146-020-01064-1</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Muller B (2010) <em>Security, Risk and the Biometric State</em>.
Routledge. DOI: <a href="https://doi.org/10.4324/9780203858042"><span
data-custom-style="Hyperlink">10.4324/9780203858042</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>OSullivan S (2016) On the diagram (and a practice of diagrammatics).
In: Schneider K, Yasar B, and Lévy D (eds) <em>Situational Diagram</em>.
New York: Dominique Lévy, pp. 1325.</p>
</div>
<div data-custom-style="Bibliography">
<p>Olwig KF, Grünenberg K, Møhl P, et al. (2019) <em>The Biometric
Border World: Technologies, Bodies and Identities on the Move</em>. 1st
ed. Routledge. DOI: <a href="https://doi.org/10.4324/9780367808464"><span
data-custom-style="Hyperlink">10.4324/9780367808464</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Pasquinelli M (2015) Anomaly detection: The mathematization of the
abnormal in the metadata society. Panel presentation.</p>
</div>
<div data-custom-style="Bibliography">
<p>Pasquinelli M (2019) How a machine learns and fails a grammar of
error for artificial intelligence. Available at: <a
href="https://spheres-journal.org/contribution/how-a-machine-learns-and-fails-a-grammar-of-error-for-artificial-intelligence/"><span
data-custom-style="Hyperlink">https://spheres-journal.org/contribution/how-a-machine-learns-and-fails-a-grammar-of-error-for-artificial-intelligence/</span></a>
(accessed 13 January 2021).</p>
</div>
<div data-custom-style="Bibliography">
<p>Pugliese J (2010) <em>Biometrics: Bodies, Technologies,
Biopolitics</em>. New York: Routledge. DOI: <a href="https://doi.org/10.4324/9780203849415"><span
data-custom-style="Hyperlink">10.4324/9780203849415</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Schurr C, Marquardt N and Militz E (2023) Intimate technologies:
Towards a feminist perspective on geographies of technoscience.
<em>Progress in Human Geography</em>. SAGE Publications Ltd:
03091325231151673. DOI: <a href="https://doi.org/10.1177/03091325231151673"><span
data-custom-style="Hyperlink">10.1177/03091325231151673</span></a>.
</p>
</div>
<div data-custom-style="Bibliography">
<p>Soon W and Cox G (2021) <em>Aesthetic Programming: A Handbook of
Software Studies</em>. London: Open Humanities Press. Available at: <a
href="http://www.openhumanitiespress.org/books/titles/aesthetic-programming/"><span
data-custom-style="Hyperlink">http://www.openhumanitiespress.org/books/titles/aesthetic-programming/</span></a>
(accessed 9 March 2021).</p>
</div>
<div data-custom-style="Bibliography">
<p>Srnicek N and De Sutter L (2017) <em>Platform Capitalism</em>. Theory
redux. Cambridge, UK ; Malden, MA: Polity.</p>
</div>
<div data-custom-style="Bibliography">
<p>Stevens N and Keyes O (2021) Seeing infrastructure: Race, facial
recognition and the politics of data. <em>Cultural Studies</em> 35(4-5):
833853. DOI: <a href="https://doi.org/10.1080/09502386.2021.1895252"><span
data-custom-style="Hyperlink">10.1080/09502386.2021.1895252</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Suchman L (2006) <em>Human-Machine Reconfigurations: Plans and
Situated Actions</em>. 2nd edition. Cambridge University Press.</p>
</div>
<div data-custom-style="Bibliography">
<p>Suchman L (2012) Configuration. In: <em>Inventive Methods</em>.
Routledge, pp. 4860.</p>
</div>
<div data-custom-style="Bibliography">
<p>Suchman L (2020) Algorithmic warfare and the reinvention of accuracy.
<em>Critical Studies on Security</em> 8(2). Routledge: 175187. DOI: <a
href="https://doi.org/10.1080/21624887.2020.1760587"><span
data-custom-style="Hyperlink">10.1080/21624887.2020.1760587</span></a>.
</p>
</div>
<div data-custom-style="Bibliography">
<p>Sudmann A (2021) Artificial neural networks, postdigital
infrastructures and the politics of temporality. In: Volmar A and Stine
K (eds) <em>Media Infrastructures and the Politics of Digital Time</em>.
Amsterdam University Press, pp. 279294. DOI: <a href="https://doi.org/10.1515/9789048550753-017"><span
data-custom-style="Hyperlink">10.1515/9789048550753-017</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Tazzioli M (2018) Spy, track and archive: The temporality of
visibility in Eurosur and Jora. <em>Security Dialogue</em> 49(4):
272288. DOI: <a href="https://doi.org/10.1177/0967010618769812"><span
data-custom-style="Hyperlink">10.1177/0967010618769812</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Thatcher J, OSullivan D and Mahmoudi D (2016) Data colonialism
through accumulation by dispossession: New metaphors for daily data.
<em>Environment and Planning D: Society and Space</em> 34(6). SAGE
Publications Ltd STM: 9901006. DOI: <a href="https://doi.org/10.1177/0263775816633195"><span
data-custom-style="Hyperlink">10.1177/0263775816633195</span></a>.
</p>
</div>
<div data-custom-style="Bibliography">
<p>Uliasz R (2020) Seeing like an algorithm: Operative images and
emergent subjects. <em>AI &amp; SOCIETY</em>. DOI: <a
href="https://doi.org/10.1007/s00146-020-01067-y"><span
data-custom-style="Hyperlink">10.1007/s00146-020-01067-y</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>van de Ven R and Plájás IZ (2022) Inconsistent projections:
Con-figuring security vision through diagramming. <em>A Peer-Reviewed
Journal About</em> 11(1): 5065. DOI: <a href="https://doi.org/10.7146/aprja.v11i1.134306"><span
data-custom-style="Hyperlink">10.7146/aprja.v11i1.134306</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Wilcox L (2017) Embodying algorithmic war: Gender, race, and the
posthuman in drone warfare. <em>Security Dialogue</em> 48(1): 1128.
DOI: <a href="https://doi.org/10.1177/0967010616657947"><span
data-custom-style="Hyperlink">10.1177/0967010616657947</span></a>.</p>
</div>
<div data-custom-style="Bibliography">
<p>Zuboff S (2019) <em>The Age of Surveillance Capitalism: The Fight for
a Human Future at the New Frontier of Power</em>. First edition. New
York: Public Affairs.</p>
</div>
</section>
<section class="footnotes footnotes-end-of-document" role="doc-endnotes">
<hr />
<ol>
<li id="fn1" role="doc-endnote">
<div data-custom-style="Footnote Text">
<p><span data-custom-style="Footnote Characters"></span> The interface
software and code is available at <a
href="https://git.rubenvandeven.com/security_vision/svganim"><span
data-custom-style="Hyperlink">https://git.rubenvandeven.com/security_vision/svganim</span></a>
and <a href="https://gitlab.com/security-vision/chronodiagram"><span
data-custom-style="Hyperlink">https://gitlab.com/security-vision/chronodiagram</span></a>
</p>
</div>
<a href="#fnref1" class="footnote-back" role="doc-backlink">↩︎</a>
</li>
<li id="fn2" role="doc-endnote">
<div data-custom-style="Footnote Text">
<p><span data-custom-style="Footnote Characters"></span> The interviews
were conducted in several European countries: the majority in the
Netherlands, but also in Belgium, Hungary and Poland. Based on an
initial survey of algorithmic security vision practices in Europe we
identified various roles that are involved in such practices. Being a
rather small group of people, these interviewees do not serve as
“illustrative representatives” (Mol &amp; Law 2002, 16-17) of the field
they work in. However, as the interviewees have different cultural and
institutional affiliations, and hold different positions in working with
algorithms, vision and security, they cover a wide spectrum of
engagements with our research object.</p>
</div>
<a href="#fnref2" class="footnote-back" role="doc-backlink">↩︎</a>
</li>
<li id="fn3" role="doc-endnote">
<div data-custom-style="Footnote Text">
<p><span data-custom-style="Footnote Characters"></span> The interviews
were conducted by the first two authors, and at a later stage by Clemens
Baier. The conversations were largely unstructured, but began with two
basic questions. First, we asked the interviewees if they use diagrams
in their daily practice. We then asked: “when we speak of security
vision we speak of the use of computer vision in a security context.
Can you explain from your perspective what these concepts mean and how
they come together?” After the first few interviews, we identified some
recurrent themes, which we then specifically asked later interviewees to
discuss.</p>
</div>
<a href="#fnref3" class="footnote-back" role="doc-backlink">↩︎</a>
</li>
<li id="fn4" role="doc-endnote">
<div data-custom-style="Footnote Text">
<p><span data-custom-style="Footnote Characters"></span> Using
anthropomorphizing terms such as “neural networks,” “learning” and
“training” to denote algorithmic configurations and processes is
suggested to hype “artificial intelligence.” While we support the need
for an alternative terminology as proposed by Hunger (2023), here we
preserve the language of our interviewees.</p>
</div>
<a href="#fnref4" class="footnote-back" role="doc-backlink">↩︎</a>
</li>
</ol>
</section>
</body>
</html>