Big set of changes to load annotations incl. audio. Fixes player mode.

This commit is contained in:
Ruben van de Ven 2022-05-30 15:02:28 +02:00
parent cc253295ee
commit 5eb9b934f4
10 changed files with 810 additions and 415 deletions

View file

@ -7,3 +7,9 @@ poetry run python webserver.py
``` ```
`parse_offsets.py` can be used to pad the diagram in order to sync it with the audio. This is necessary eg. after a network failure. It works by adding a line with the required offset to the `.json_appendable`-file. `parse_offsets.py` can be used to pad the diagram in order to sync it with the audio. This is necessary eg. after a network failure. It works by adding a line with the required offset to the `.json_appendable`-file.
## record
On Linux: for now, set the pulse default input device to the monitor of the speakers. Then use wf-recorder:
`wf-recorder -g"$(slurp)" -a -f recording.mp4`

View file

@ -1,7 +1,10 @@
from __future__ import annotations from __future__ import annotations
import asyncio
import copy
import json import json
from os import X_OK, PathLike from os import X_OK, PathLike
import os import os
import subprocess
from typing import Optional, Union from typing import Optional, Union
import shelve import shelve
from pydub import AudioSegment from pydub import AudioSegment
@ -12,9 +15,11 @@ import logging
logger = logging.getLogger('svganim.strokes') logger = logging.getLogger('svganim.strokes')
Milliseconds = float
Seconds = float
class Annotation: class Annotation:
def __init__(self, tag: str, drawing: Drawing, t_in: float, t_out: float) -> None: def __init__(self, tag: str, drawing: Drawing, t_in: Milliseconds, t_out: Milliseconds) -> None:
self.tag = tag self.tag = tag
self.t_in = t_in self.t_in = t_in
self.t_out = t_out self.t_out = t_out
@ -30,9 +35,14 @@ class Annotation:
def get_as_svg(self) -> str: def get_as_svg(self) -> str:
return self.getAnimationSlice().get_as_svg() return self.getAnimationSlice().get_as_svg()
def getJsonUrl(self) -> str:
return self.drawing.get_url() + f"?t_in={self.t_in}&t_out={self.t_out}"
Filename = Union[str, bytes, PathLike[str], PathLike[bytes]] Filename = Union[str, bytes, PathLike[str], PathLike[bytes]]
SliceId = [str, float, float]
class Drawing: class Drawing:
def __init__(self, filename: Filename, metadata_dir: Filename, basedir: Filename) -> None: def __init__(self, filename: Filename, metadata_dir: Filename, basedir: Filename) -> None:
@ -70,24 +80,26 @@ class Drawing:
if 'file' not in md['audio']: if 'file' not in md['audio']:
return None return None
return AudioSlice(filename=os.path.join(self.basedir, md['audio']['file'][1:]), offset=md['audio']['offset']*1000) return AudioSlice(filename=os.path.join(self.basedir, md['audio']['file'][1:]), drawing=self, offset=md['audio']['offset']*1000)
def get_animation(self) -> AnimationSlice: def get_animation(self) -> AnimationSlice:
# with open(self.eventfile, "r") as fp: # with open(self.eventfile, "r") as fp:
strokes = [] strokes = []
viewboxes = []
with open(self.eventfile, "r") as fp: with open(self.eventfile, "r") as fp:
events = json.loads("[" + fp.read() + "]") events = json.loads("[" + fp.read() + "]")
for i, event in enumerate(events): for i, event in enumerate(events):
if i == 0: if i == 0:
# metadata on first line # metadata on first line, add as initial viewbox to slice
pass viewboxes.append(TimedViewbox(-float('Infinity'), 0, 0, event[1], event[2]))
else: else:
if type(event) is list: if type(event) is list:
# ignore double metadatas, which appear when continuaing an existing drawing # ignore double metadatas, which appear when continuaing an existing drawing
continue continue
if event["event"] == "viewbox": if event["event"] == "viewbox":
pass viewboxes.extend([TimedViewbox(
b['t'], b['x'], b['y'], b['width'], b['height']) for b in event['viewboxes']])
if event["event"] == "stroke": if event["event"] == "stroke":
# points = [] # points = []
# for i in range(int(len(stroke) / 4)): # for i in range(int(len(stroke) / 4)):
@ -100,7 +112,7 @@ class Drawing:
for p in event["points"]], for p in event["points"]],
) )
) )
return AnimationSlice(strokes, audioslice=self.get_audio()) return AnimationSlice([self.id, None, None], strokes, viewboxes, audioslice=self.get_audio())
def get_metadata(self): def get_metadata(self):
canvas = self.get_canvas_metadata() canvas = self.get_canvas_metadata()
@ -127,6 +139,12 @@ class Viewbox:
return f"{self.x} {self.y} {self.width} {self.height}" return f"{self.x} {self.y} {self.width} {self.height}"
class TimedViewbox(Viewbox):
def __init__(self, time: Milliseconds, x: float, y: float, width: float, height: float):
super().__init__(x, y, width, height)
self.t = time
FrameIndex = tuple[int, int] FrameIndex = tuple[int, int]
@ -134,36 +152,89 @@ class AnimationSlice:
# either a whole drawing or the result of applying an annotation to a drawing (an excerpt) # either a whole drawing or the result of applying an annotation to a drawing (an excerpt)
# TODO rename to AnimationSlice to include audio as well # TODO rename to AnimationSlice to include audio as well
def __init__( def __init__(
self, strokes: list[Stroke], t_in: float = 0, t_out: float = None, audioslice: AudioSlice = None self, slice_id: SliceId, strokes: list[Stroke], viewboxes: list[TimedViewbox] = [], t_in: float = 0, t_out: float = None, audioslice: AudioSlice = None
) -> None: ) -> None:
self.id = slice_id
self.strokes = strokes self.strokes = strokes
self.viewboxes = viewboxes
self.t_in = t_in self.t_in = t_in
self.t_out = t_out self.t_out = t_out
self.audio = audioslice self.audio = audioslice
# TODO: Audio # TODO: Audio
def get_bounding_box(self) -> Viewbox: def asDict(self) -> dict:
"""Can be used to json-ify the animation-slice
"""
# conversion necessary for when no t_in is given
boxes = [v.__dict__ for v in self.viewboxes]
for box in boxes:
if box['t'] == -float('Infinity'):
box['t'] = 0
drawing = {
"file": self.getUrl(),
"time": "-", # creation date
# dimensions of drawing canvas
"dimensions": [self.viewboxes[0].width, self.viewboxes[0].height],
"shape": [s.asDict() for s in self.strokes],
"viewboxes": boxes,
"bounding_box": self.get_bounding_box().__dict__,
"audio": self.getAudioDict() if self.audio else None
}
return drawing
def getAudioDict(self):
"""quick and dirty to not use audio.asDict(), but it avoids passing all around sorts of data"""
return {
"file": '/files/' + self.getUrl('.mp3'),
"offset": 0
# "offset": self.audio.offset / 1000
}
def getUrl(self, extension = '') -> str:
if not self.id[1] and not self.id[2]:
return self.id[0]
return self.id[0] + f"{extension}?t_in={self.t_in}&t_out={self.t_out}"
def get_bounding_box(self, stroke_thickness: float = 3.5) -> Viewbox:
"""Stroke_thickness 3.5 == 1mm. If it should not be considered, just set it to 0.
"""
if len(self.strokes) == 0:
# empty set
return Viewbox(0,0,0,0)
min_x, max_x = float("inf"), float("-inf") min_x, max_x = float("inf"), float("-inf")
min_y, max_y = float("inf"), float("-inf") min_y, max_y = float("inf"), float("-inf")
for s in self.strokes: for s in self.strokes:
for p in s.points: for p in s.points:
if p.x < min_x:
min_x = p.x x1 = p.x - stroke_thickness/2
if p.x > max_x: x2 = p.x + stroke_thickness/2
max_x = p.x y1 = p.y - stroke_thickness/2
if p.y < min_y: y2 = p.y + stroke_thickness/2
min_y = p.y if x1 < min_x:
if p.y > max_y: min_x = x1
max_y = p.y if x2 > max_x:
max_x = x2
if y1 < min_y:
min_y = y1
if y2 > max_y:
max_y = y2
return Viewbox(min_x, min_y, max_x - min_x, max_y - min_y) return Viewbox(min_x, min_y, max_x - min_x, max_y - min_y)
def getSlice(self, t_in: float, t_out: float) -> AnimationSlice: def getSlice(self, t_in: Milliseconds, t_out: Milliseconds) -> AnimationSlice:
"""slice the slice. T in ms"""
frame_in = self.getIndexForInPoint(t_in) frame_in = self.getIndexForInPoint(t_in)
frame_out = self.getIndexForOutPoint(t_out) frame_out = self.getIndexForOutPoint(t_out)
strokes = self.getStrokeSlices(frame_in, frame_out) strokes = self.getStrokeSlices(frame_in, frame_out, t_in)
# TODO shift t of points with t_in
viewboxes = self.getViewboxesSlice(t_in, t_out)
print(viewboxes[0])
audio = self.audio.getSlice(t_in, t_out) if self.audio else None audio = self.audio.getSlice(t_in, t_out) if self.audio else None
return AnimationSlice(strokes, t_in, t_out, audio) return AnimationSlice([self.id[0], t_in, t_out], strokes, viewboxes, t_in, t_out, audio)
def get_as_svg_dwg(self) -> svgwrite.Drawing: def get_as_svg_dwg(self) -> svgwrite.Drawing:
box = self.get_bounding_box() box = self.get_bounding_box()
@ -171,7 +242,8 @@ class AnimationSlice:
dwg = svgwrite.Drawing(fn, size=(box.width, box.height)) dwg = svgwrite.Drawing(fn, size=(box.width, box.height))
dwg.viewbox(box.x, box.y, box.width, box.height) dwg.viewbox(box.x, box.y, box.width, box.height)
self.add_to_dwg(dwg) self.add_to_dwg(dwg)
dwg.defs.add(dwg.style("path{stroke-width:1mm;stroke-linecap: round;}")) dwg.defs.add(
dwg.style("path{stroke-width:1mm;stroke-linecap: round;}"))
return dwg return dwg
def get_as_svg(self) -> str: def get_as_svg(self) -> str:
@ -186,8 +258,32 @@ class AnimationSlice:
stroke.add_to_dwg(group) stroke.add_to_dwg(group)
dwg.add(group) dwg.add(group)
def getViewboxesSlice(self, t_in: Milliseconds, t_out: Milliseconds) -> list[TimedViewbox]:
"""Extract the viewboxes for in- and outpoints.
If there's one before inpoint, move that to the t_in, so that animation starts at the right position
the slice is offset by t_in ms
"""
viewboxes = [] # Add single empty element, so that we can use viewboxes[0] later
lastbox = None
for viewbox in self.viewboxes:
if viewbox.t > t_out:
break
if viewbox.t <= t_in:
# make sure the first box is the last box from _before_ the slice
firstbox = TimedViewbox(
0, viewbox.x, viewbox.y, viewbox.width, viewbox.height)
if not len(viewboxes):
viewboxes.append(firstbox)
else:
viewboxes[0] = firstbox
continue
viewboxes.append(TimedViewbox(viewbox.t-t_in, viewbox.x, viewbox.y, viewbox.width, viewbox.height))
return viewboxes
def getStrokeSlices( def getStrokeSlices(
self, index_in: FrameIndex, index_out: FrameIndex self, index_in: FrameIndex, index_out: FrameIndex, t_offset: Seconds = 0
) -> list[Stroke]: ) -> list[Stroke]:
"""Get list of Stroke/StrokeSlice based in in and out indexes """Get list of Stroke/StrokeSlice based in in and out indexes
Based on annotation.js getStrokesSliceForPathRange(in_point, out_point) Based on annotation.js getStrokesSliceForPathRange(in_point, out_point)
@ -204,10 +300,10 @@ class AnimationSlice:
out_i = index_out[1] if index_out[0] == i else len( out_i = index_out[1] if index_out[0] == i else len(
stroke.points) - 1 stroke.points) - 1
slices.append(StrokeSlice(stroke, in_i, out_i)) slices.append(StrokeSlice(stroke, in_i, out_i, t_offset))
return slices return slices
def getIndexForInPoint(self, ms) -> FrameIndex: def getIndexForInPoint(self, ms: Milliseconds) -> FrameIndex:
"""Get the frame index (path, point) based on the given time """Get the frame index (path, point) based on the given time
The In point version (so the first index after ms) The In point version (so the first index after ms)
Equal to annotations.js findPositionForTime(ms) Equal to annotations.js findPositionForTime(ms)
@ -235,14 +331,14 @@ class AnimationSlice:
break # done :-) break # done :-)
return (path_i, point_i) return (path_i, point_i)
def getIndexForOutPoint(self, ms) -> FrameIndex: def getIndexForOutPoint(self, ms: Milliseconds) -> FrameIndex:
"""Get the frame index (path, point) based on the given time """Get the frame index (path, point) based on the given time
The Out point version (so the last index before ms) The Out point version (so the last index before ms)
Equal to annotations.js findPositionForTime(ms) Equal to annotations.js findPositionForTime(ms)
""" """
return self.getIndexForTime( ms) return self.getIndexForTime(ms)
def getIndexForTime(self, ms) -> FrameIndex: def getIndexForTime(self, ms: Milliseconds) -> FrameIndex:
"""Get the frame index (path, point) based on the given time """Get the frame index (path, point) based on the given time
Equal to annotations.js findPositionForTime(ms) Equal to annotations.js findPositionForTime(ms)
""" """
@ -269,49 +365,114 @@ class AnimationSlice:
point_i = len(stroke.points) - 1 point_i = len(stroke.points) - 1
return (path_i, point_i) return (path_i, point_i)
audiocache = {}
class AudioSlice: class AudioSlice:
def __init__(self, filename: Filename, t_in: float = None, t_out: float = None, offset: float = None): def __init__(self, filename: Filename, drawing: Drawing, t_in: Milliseconds = None, t_out: Milliseconds = None, offset: Milliseconds = None):
self.filename = filename self.filename = filename
self.drawing = drawing
self.t_in = t_in # in ms self.t_in = t_in # in ms
self.t_out = t_out # in ms self.t_out = t_out # in ms
self.offset = offset # in ms self.offset = offset # in ms TODO: use from self.drawing metadata
def getSlice(self, t_in: float, t_out: float) -> AnimationSlice: def getSlice(self, t_in: float, t_out: float) -> AnimationSlice:
return AudioSlice(self.filename, t_in, t_out, self.offset) return AudioSlice(self.filename, self.drawing, t_in, t_out, self.offset)
def export(self, format="mp3"): def asDict(self):
return {
"file": self.getUrl(),
# "offset": self.offset/1000
}
def getUrl(self):
fn = self.filename.replace("../files/audio", "/file/")
params = []
if self.t_in:
params.append(f"t_in={self.t_in}")
if self.t_out:
params.append(f"t_out={self.t_in}")
if len(params):
fn += "?" + "&".join(params)
return fn
async def export(self, format="mp3"):
"""Returns file descriptor of tempfile""" """Returns file descriptor of tempfile"""
# Opening file and extracting segment # Opening file and extracting segment
song = AudioSegment.from_file(self.filename) start = int(self.t_in - self.offset) # millisecond precision is enough
start = self.t_in - self.offset end = int(self.t_out - self.offset) # millisecond precision is enough
end = self.t_out - self.offset
if start < 0 and end < 0: # call ffmpeg directly, with given in and outpoint, so no unnecessary data is loaded, and no double conversion (e.g. ogg -> wav -> ogg ) is performed
extract = AudioSegment.silent( out_f = io.BytesIO()
duration=end-start, frame_rate=song.frame_rate)
else:
if start < 0:
preroll = AudioSegment.silent(
duration=start * -1, frame_rate=song.frame_rate)
start = 0
else:
preroll = None
if end > len(song):
postroll = AudioSegment.silent(
duration=end - len(song), frame_rate=song.frame_rate)
end = len(song) - 1
else:
postroll = None
extract = song[start: end] # build converter command to export
if preroll: conversion_command = [
extract = preroll + extract "ffmpeg",
if postroll: '-ss', f"{start}ms",
extract += postroll '-to', f"{end}ms",
"-i", self.filename, # ss before input, so not whole file is loaded
]
# Saving conversion_command.extend([
return extract.export(None, format=format) "-f", format, '-', # to stdout
])
# read stdin / write stdout
logger.info("ffmpeg start")
proc = await asyncio.create_subprocess_exec(
*conversion_command,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL)
p_out, p_err = await proc.communicate()
logger.info("ffmpeg finished")
if proc.returncode != 0:
raise Exception(
"Encoding failed. ffmpeg/avlib returned error code: {0}\n\nCommand:{1}".format(
p.returncode, conversion_command))
out_f.write(p_out)
out_f.seek(0)
return out_f
# old way, use AudioSegment, easy but slow (reads whole ogg to wav, then export segment to ogg again)
# logger.info("loading audio")
# if self.filename in audiocache:
# song = audiocache[self.filename]
# else:
# song = AudioSegment.from_file(self.filename)
# audiocache[self.filename] = song
# logger.info("loaded audio")
# if start < 0 and end < 0:
# extract = AudioSegment.silent(
# duration=end-start, frame_rate=song.frame_rate)
# else:
# if start < 0:
# preroll = AudioSegment.silent(
# duration=start * -1, frame_rate=song.frame_rate)
# start = 0
# else:
# preroll = None
# if end > len(song):
# postroll = AudioSegment.silent(
# duration=end - len(song), frame_rate=song.frame_rate)
# end = len(song) - 1
# else:
# postroll = None
# extract = song[start: end]
# if preroll:
# extract = preroll + extract
# if postroll:
# extract += postroll
# # Saving
# return extract.export(None, format=format)
class AnnotationIndex: class AnnotationIndex:
@ -395,7 +556,7 @@ class AnnotationIndex:
class Point: class Point:
def __init__(self, x: float, y: float, last: bool, t: float): def __init__(self, x: float, y: float, last: bool, t: Seconds):
self.x = float(x) self.x = float(x)
self.y = float(y) # if y == 0 it can still be integer.... odd python self.y = float(y) # if y == 0 it can still be integer.... odd python
self.last = last self.last = last
@ -409,6 +570,9 @@ class Point:
# TODO: change so that it actually scales to FIT dimensions # TODO: change so that it actually scales to FIT dimensions
return Point(self.x, self.y, self.last, self.t) return Point(self.x, self.y, self.last, self.t)
def asList(self) -> list:
return [self.x, self.y, 1 if self.last else 0, self.t]
Points = list[Point] Points = list[Point]
SvgDrawing = Union[svgwrite.container.SVG, svgwrite.container.Group] SvgDrawing = Union[svgwrite.container.SVG, svgwrite.container.Group]
@ -419,25 +583,28 @@ class Stroke:
self.color = color self.color = color
self.points = points self.points = points
def asDict(self) -> dict:
return {"color": self.color, "points": [p.asList() for p in self.points]}
def add_to_dwg(self, dwg: SvgDrawing): def add_to_dwg(self, dwg: SvgDrawing):
path = svgwrite.path.Path(d=self.get_as_d()).stroke( path = svgwrite.path.Path(d=self.get_as_d()).stroke(
self.color, 1).fill("none") self.color, 1).fill("none")
dwg.add(path) dwg.add(path)
def get_bounding_box(self) -> Viewbox: # def get_bounding_box(self) -> Viewbox:
min_x, max_x = float("inf"), float("-inf") # min_x, max_x = float("inf"), float("-inf")
min_y, max_y = float("inf"), float("-inf") # min_y, max_y = float("inf"), float("-inf")
for p in self.points: # for p in self.points:
if p.x < min_x: # if p.x < min_x:
min_x = p.x # min_x = p.x
if p.x > max_x: # if p.x > max_x:
max_x = p.x # max_x = p.x
if p.y < min_y: # if p.y < min_y:
min_y = p.y # min_y = p.y
if p.y > max_y: # if p.y > max_y:
max_y = p.y # max_y = p.y
return Viewbox(min_x, min_y, max_x - min_x, max_y - min_y) # return Viewbox(min_x, min_y, max_x - min_x, max_y - min_y)
def get_as_d(self): def get_as_d(self):
d = "" d = ""
@ -466,17 +633,22 @@ class Stroke:
class StrokeSlice(Stroke): class StrokeSlice(Stroke):
def __init__(self, stroke: Stroke, i_in: int = None, i_out: int = None) -> None: def __init__(self, stroke: Stroke, i_in: int = None, i_out: int = None, t_offset: Seconds = 0) -> None:
self.stroke = stroke self.stroke = stroke
self.i_in = 0 if i_in is None else i_in self.i_in = 0 if i_in is None else i_in
self.i_out = len(self.stroke.points) - 1 if i_out is None else i_out self.i_out = len(self.stroke.points) - 1 if i_out is None else i_out
# deepcopy points, because slices can be offset in time
self.points = copy.deepcopy(self.stroke.points[self.i_in: self.i_out + 1])
for p in self.points:
p.t -= t_offset
def slice_id(self): def slice_id(self):
return f"{self.i_in}-{self.i_out}" return f"{self.i_in}-{self.i_out}"
@property # @property
def points(self) -> Points: # def points(self) -> Points:
return self.stroke.points[self.i_in: self.i_out + 1] # return self.stroke.points[self.i_in: self.i_out + 1]
@property @property
def color(self) -> str: def color(self) -> str:

8
app/svganim/uimethods.py Normal file
View file

@ -0,0 +1,8 @@
from hashlib import md5
def annotation_hash(handler, input):
return md5(input.encode()).hexdigest()
# def nmbr(handler, lst) -> int:
# leno

View file

@ -2,9 +2,12 @@
<head> <head>
<title>Annotations</title> <title>Annotations</title>
<link rel="stylesheet" href="svganim.css">
<style> <style>
body { body {
background: darkgray; background: rgb(39, 40, 41);
font-family: sans-serif;
color: white
} }
ul { ul {
@ -17,21 +20,95 @@
; ;
} }
summary h2{
display: inline-block;
cursor: pointer;
}
details[open] summary{
color: rgb(224, 196, 196);
}
/* details ul{
display: none;
}
details[open] ul{
display: block;;
} */
img { img {
/* width: 400px; */ /* width: 400px; */
background: white; background: white;
width: 300px;
height: 200px;
cursor: pointer;
padding: 20px;
}
.svganim_player {
display: inline-block;
position: relative;
width: 300px;
height: 200px;
overflow: hidden;
padding: 20px;
background: white;
}
.svganim_player svg {
width: 100%;
height: 100%;
}
.svganim_player.play:not(.loading) .controls {
visibility: hidden;
}
.svganim_player:hover .controls {
visibility: visible !important;
} }
</style> </style>
<script src="assets/nouislider-15.5.0.js"></script>
<script src="assets/wNumb-1.2.0.min.js"></script>
<script src="annotate.js"></script>
<script src="playlist.js"></script>
</head> </head>
<body> <body>
{% for tag in index.tags %} {% for tag in index.tags %}
<h2>{{tag}}</h2> <details>
<summary>
<h2>{{tag}} ({{len(index.tags[tag])}})</h2>
</summary>
<ul> <ul>
{% for annotation in index.tags[tag] %} {% for annotation in index.tags[tag] %}
<li><img src="/annotation/{{ annotation.id }}.svg" data-audio="/annotation/{{ annotation.id }}.mp3"></li> <li>
<img src="/annotation/{{ annotation.id }}.svg" loading="lazy" id="img-{{ annotation_hash(annotation.id) }}">
<div class="play" id="annotation-{{ annotation_hash(annotation.id) }}"></div>
<script type='text/javascript'>
(function () {
let imgEl = document.getElementById('img-{{ annotation_hash(annotation.id) }}');
imgEl.addEventListener('click', () => {
imgEl.style.display = 'none';
new Annotator(
document.getElementById("annotation-{{ annotation_hash(annotation.id) }}"),
"tags.json",
"{{ annotation.getJsonUrl() }}",
{ is_player: true, crop_to_fit: true, autoplay: true }
);
})
})();
</script>
</li>
<!-- <li><img src="/annotation/{{ annotation.id }}.svg" data-audio="/annotation/{{ annotation.id }}.mp3"></li> -->
{% end %} {% end %}
</ul> </ul>
</details>
{% end %} {% end %}
<!-- <ul> <!-- <ul>
{% for annotation in index.annotations %} {% for annotation in index.annotations %}

View file

@ -12,6 +12,7 @@ import argparse
import coloredlogs import coloredlogs
import glob import glob
import svganim.strokes import svganim.strokes
import svganim.uimethods
logger = logging.getLogger("svganim.webserver") logger = logging.getLogger("svganim.webserver")
@ -76,6 +77,7 @@ class WebSocketHandler(tornado.websocket.WebSocketHandler):
return return
# write to an appendable json format. So basically a file that should be wrapped in [] to be json-parsable # write to an appendable json format. So basically a file that should be wrapped in [] to be json-parsable
# TODO use jsonlines -- which is not so much different but (semi-)standardized
with open( with open(
os.path.join(self.config.storage, self.filename + os.path.join(self.config.storage, self.filename +
".json_appendable"), "a" ".json_appendable"), "a"
@ -236,13 +238,14 @@ class AudioListingHandler(tornado.web.RequestHandler):
class AnimationHandler(tornado.web.RequestHandler): class AnimationHandler(tornado.web.RequestHandler):
def initialize(self, config): def initialize(self, config, index: svganim.strokes.AnnotationIndex):
self.config = config self.config = config
self.index = index
def get(self, filename): async def get(self, filename):
self.set_header("Content-Type", "application/json")
# filename = self.get_argument("file", None) # filename = self.get_argument("file", None)
if filename == "": if filename == "":
self.set_header("Content-Type", "application/json")
files = [] files = []
names = [ names = [
name name
@ -275,34 +278,48 @@ class AnimationHandler(tornado.web.RequestHandler):
files.sort(key=lambda k: k["mtime"]) files.sort(key=lambda k: k["mtime"])
self.write(json.dumps(files)) self.write(json.dumps(files))
else: else:
path = os.path.join( if filename[-4:] == ".svg":
self.config.storage, os.path.basename( extension = "svg"
filename) + ".json_appendable" filename = filename[:-4]
) elif filename[-4:] == ".mp3":
drawing = {"file": filename, "shape": [], "viewboxes": []} extension = "mp3"
with open(path, "r") as fp: filename = filename[:-4]
events = json.loads("[" + fp.read() + "]") elif filename[-4:] == ".wav":
for i, event in enumerate(events): extension = "wav"
if i == 0: filename = filename[:-4]
# metadata on first line
drawing["time"] = event[0]
drawing["dimensions"] = [event[1], event[2]]
else: else:
if type(event) is list: extension = None
# ignore double metadatas, which appear when continuaing an existing drawing
continue logger.info(f"file {filename=}, {extension=}")
if event["event"] == "viewbox": # if annotation_id not in self.index.annotations:
drawing["viewboxes"].extend(event['viewboxes']) # raise tornado.web.HTTPError(404)
if event["event"] == "stroke":
# points = [] # annotation = self.index.annotations[annotation_id]
# for i in range(int(len(stroke) / 4)):
# p = stroke[i*4:i*4+4]
# points.append([float(p[0]), float(p[1]), int(p[2]), float(p[3])]) t_in = self.get_argument('t_in', None)
drawing["shape"].append( t_out = self.get_argument('t_out', None)
{"color": event["color"],
"points": event["points"]} animation = self.index.drawings[filename].get_animation()
)
self.write(json.dumps(drawing)) if t_in is not None and t_out is not None:
animation = animation.getSlice(float(t_in), float(t_out))
if extension == "svg":
self.set_header("Content-Type", "image/svg+xml")
self.write(animation.get_as_svg())
elif extension == "mp3":
self.set_header("Content-Type", "audio/mp3")
audio = await animation.audio.export(format="mp3")
self.write(audio.read())
elif extension == "wav":
self.set_header("Content-Type", "audio/wav")
audio = await animation.audio.export(format="wav")
self.write(audio.read())
else:
self.set_header("Content-Type", "application/json")
self.write(json.dumps(animation.asDict()))
class TagHandler(tornado.web.RequestHandler): class TagHandler(tornado.web.RequestHandler):
@ -538,7 +555,7 @@ class Server:
"config": self.config, "config": self.config,
}, },
), ),
(r"/files/(.*)", AnimationHandler, {"config": self.config}), (r"/files/(.*)", AnimationHandler, {"config": self.config, "index": self.index}),
( (
r"/audio/(.+)", r"/audio/(.+)",
tornado.web.StaticFileHandler, tornado.web.StaticFileHandler,
@ -573,6 +590,7 @@ class Server:
], ],
debug=True, debug=True,
autoreload=True, autoreload=True,
ui_methods= svganim.uimethods
) )
application.listen(self.config.port) application.listen(self.config.port)
tornado.ioloop.IOLoop.current().start() tornado.ioloop.IOLoop.current().start()

View file

@ -4,6 +4,7 @@
<head> <head>
<meta charset="utf-8"> <meta charset="utf-8">
<title>Annotate a line animation</title> <title>Annotate a line animation</title>
<link rel="stylesheet" href="svganim.css">
<style media="screen"> <style media="screen">
body { body {
/* background: black; /* background: black;
@ -11,6 +12,10 @@
background: lightgray; background: lightgray;
} }
body.player{
background: rgb(39, 40, 41);;
}
#sample, #sample,
svg { svg {
position: absolute; position: absolute;
@ -24,8 +29,18 @@
/* border: solid 2px lightgray; */ /* border: solid 2px lightgray; */
} }
svg .background { body.player svg{
fill: white height: calc(100% - 40px);
background-color: white;
}
#wrapper {
position: absolute;
top: 0;
right: 0;
bottom: 0;
left: 0;
background: none;
} }
img { img {
@ -39,280 +54,7 @@
z-index: 1; z-index: 1;
} }
path {
fill: none;
stroke: gray;
stroke-width: 1mm;
stroke-linecap: round;
}
g.before path {
opacity: 0.5;
stroke: gray !important;
}
g.after path,
path.before_in {
opacity: .1;
stroke: gray !important;
}
#wrapper {
position: absolute;
top: 0;
right: 0;
bottom: 0;
left: 0;
background: none;
}
.gray {
position: absolute;
background: rgba(255, 255, 255, 0.7);
}
.controls--playback {
/* display:flex; */
}
.timecode {
position: absolute;
right: 100%;
width: 5%;
font-size: 8px;
}
.controls--playback input[type='range'] {
/* position: absolute;
z-index: 100;
bottom: 0;
left: 0;
right: 0; */
width: 100%;
}
.controls button.paused,
.controls button.playing {
position: absolute;
left: 100%;
width: 30px;
height:30px;
}
.controls button.paused::before {
content: '⏵';
}
.controls button.playing::before {
content: '⏸';
}
.buffering .controls button:is(.playing,.paused)::before {
content: '↺';
display:inline-block;
animation: rotate 1s infinite;
}
@keyframes rotate {
0% {
transform: rotate(359deg)
}
100% {
transform: rotate(0deg)
}
}
.controls {
position: absolute !important;
z-index: 100;
bottom: 10px;
left: 5%;
right: 0;
width: 90%;
}
.scrubber {}
.tags {
line-height: 40px;
display: flex;
flex-direction: row;
padding: 0;
margin: 0;
}
.tags .tag {
display: block;
padding: 5px;
border: solid 1px darkgray;
flex-grow: 1;
text-align: center;
}
.tags li {
display: block;
}
.tags .subtags {
padding: 0;
font-size: 80%;
display: flex;
flex-direction: row;
flex-wrap: wrap;
}
.tags .subtags .tag {
padding: 2px;
}
.tags .tag:hover {
cursor: pointer;
background: darkgray;
}
.tags .tag.selected {
background: #3FB8AF;
}
.tags .tag.annotation-rm {
/* display: none; */
overflow: hidden;
color: red;
font-size: 30px;
width: 0;
flex-grow: 0;
padding: 5px 0;
transition: width .3s;
pointer-events: none;
border: none;
direction: rtl;
/* hide behind bar, instead into nothing */
}
.selected-annotation .tags .tag.annotation-rm {
color: red;
display: block;
width: 30px;
pointer-events: all;
}
.controls .annotation-comment{
width: 100%;
visibility: hidden;
}
.selected-annotation .controls .annotation-comment{
visibility: visible;
}
.noUi-handle:focus {
/* background: red;; */
border: solid 2px #601be0;
}
/* .noUi-handle:focus::before, .noUi-handle:focus::after{
background: #601be0;
} */
.tags .tag span {
display: inline-block;
width: 20px;
height: 20px;
margin-right: 10px;
vertical-align: middle;
border-radius: 5px;
}
.tags .subtags .tag span {
width: 10px;
height: 10px;
margin-right: 2px;
}
.annotations {
height: 30px;
/* border: solid 1px darkgray; */
position: relative;
}
.annotations>div {
opacity: .4;
background: lightseagreen;
position: absolute;
bottom: 0;
top: 0;
}
.annotations>div:hover,
.annotations>div.selected {
opacity: 1;
cursor: pointer;
}
.unsaved::before {
content: '*';
color: red;
display: inline-block;
text-align: center;
font-size: 30px;
position: absolute;
top: 10px;
left: 10px;
}
.saved::before {
content: '\2713';
display: inline-block;
color: green;
text-align: center;
font-size: 30px;
position: absolute;
top: 10px;
left: 10px;
}
.noUi-horizontal .noUi-touch-area {
cursor: ew-resize;
}
#interface .noUi-horizontal .noUi-tooltip {
/* tooltips go below the buttons */
bottom: auto;
top: 110%;
}
.audioconfig {
z-index: 9;
background: black;
color: white;
position: relative;
width: 100px;
/* as wide as audio controls only */
overflow: hidden;
white-space: nowrap;
left: -50px;
}
.audioconfig:hover {
width: auto;
left: 0px;
}
.audioconfig select,
.audioconfig input {
margin: 10px;
}
audio {
vertical-align: middle;
width: 100px;
/* hides seek head */
}
.playlist img { .playlist img {
position: static; position: static;
@ -335,6 +77,10 @@
font-size: 6pt; font-size: 6pt;
} }
body:not(.help) .help{
display: none;
}
.help li { .help li {
display: inline-block; display: inline-block;
color: gray; color: gray;
@ -384,10 +130,19 @@
<script type='text/javascript'> <script type='text/javascript'>
let ann; let ann;
if (location.search) { if (location.search) {
const params = new URLSearchParams(location.search);
const is_player = !!parseInt(params.get('player'));
const crop_to_fit = !!parseInt(params.get('crop'));
if(is_player) {
document.body.classList.add('player');
} else {
document.body.classList.add('annotator');
}
ann = new Annotator( ann = new Annotator(
document.getElementById("interface"), document.getElementById("interface"),
"tags.json", "tags.json",
location.search.substring(1) params.get('file'),
{is_player: is_player, crop_to_fit: crop_to_fit}
); );
} else { } else {
const playlist = new Playlist(document.getElementById("interface"), '/files/'); const playlist = new Playlist(document.getElementById("interface"), '/files/');

View file

@ -112,11 +112,13 @@ class StrokeSlice {
class Annotator extends EventTarget { class Annotator extends EventTarget {
constructor(wrapperEl, tagFile, fileurl, config) { constructor(wrapperEl, tagFile, fileurl, config) {
fileurl = fileurl.replace("&amp;", "&"); // little hack: tornadoweb does this automatically for some reason
super(); super();
this.config = { this.config = {
is_player: config && config.hasOwnProperty('is_player') ? config.is_player : false, // in player mode annotations are not loaded, nor is the annotator shown is_player: config && config.hasOwnProperty('is_player') ? config.is_player : false, // in player mode annotations are not loaded, nor is the annotator shown
crop_to_fit: config && config.hasOwnProperty('crop_to_fit') ? config.crop_to_fit : false, // don't animate viewport, but show the whole drawing crop_to_fit: config && config.hasOwnProperty('crop_to_fit') ? config.crop_to_fit : false, // don't animate viewport, but show the whole drawing
autoplay: config && config.hasOwnProperty('autoplay') ? config.autoplay : false, // immediately start playback
} }
this.formatter = wNumb({ this.formatter = wNumb({
@ -194,9 +196,11 @@ class Annotator extends EventTarget {
this.controlsEl.appendChild(this.scrubberEl); this.controlsEl.appendChild(this.scrubberEl);
if(!this.config.is_player){
this.annotationsEl = document.createElement('div'); this.annotationsEl = document.createElement('div');
this.annotationsEl.classList.add('annotations') this.annotationsEl.classList.add('annotations')
this.controlsEl.appendChild(this.annotationsEl); this.controlsEl.appendChild(this.annotationsEl);
}
this.inPointPosition = [0, 0]; this.inPointPosition = [0, 0];
@ -279,7 +283,7 @@ class Annotator extends EventTarget {
this.commentEl.placeholder = "comment"; this.commentEl.placeholder = "comment";
this.commentEl.value = ""; this.commentEl.value = "";
this.commentEl.addEventListener('keyup', (e) => { this.commentEl.addEventListener('keyup', (e) => {
if (ev.key == 'Escape') { if (e.key == 'Escape') {
this.commentEl.blur() // deselect annotation, and deselect commentEl this.commentEl.blur() // deselect annotation, and deselect commentEl
} else { } else {
e.stopPropagation(); // prevent keyup event to propagate and set i/o points e.stopPropagation(); // prevent keyup event to propagate and set i/o points
@ -448,6 +452,8 @@ class Annotator extends EventTarget {
method: 'GET', method: 'GET',
}); });
this.wrapperEl.classList.add('loading');
fetch(request) fetch(request)
.then(response => response.json()) .then(response => response.json())
.then(data => { .then(data => {
@ -456,19 +462,36 @@ class Annotator extends EventTarget {
const metadata_req = new Request(`/annotations/${data.file}`, { const metadata_req = new Request(`/annotations/${data.file}`, {
method: 'GET', method: 'GET',
}); });
fetch(metadata_req) return fetch(metadata_req)
.then(response => response.ok ? response.json() : null) .then(response => response.ok ? response.json() : null)
.then(metadata => { .then(metadata => {
if (metadata !== null) { if (metadata !== null) {
metadata.annotations = metadata.annotations.map((a) => new Annotation(a.tag, a.t_in, a.t_out, a.hasOwnProperty('comment') ? a.comment : "")) metadata.annotations = metadata.annotations.map((a) => new Annotation(a.tag, a.t_in, a.t_out, a.hasOwnProperty('comment') ? a.comment : ""))
} }
this.loadStrokes(data, metadata) return this.loadStrokes(data, metadata)
}) })
.catch(e => console.log(e)); .catch(e => console.log(e));
} else { } else {
this.loadStrokes(data, null); return this.loadStrokes(data, null);
} }
}).catch(e => console.log(e)); })
.then(() => {
// play on click for player
if(this.config.is_player) {
this.svgEl.addEventListener('click', (ev) => {
console.log('clicked for play/pause');
this.playPause();
});
}
// autoplay if necessary
if(this.config.autoplay){
this.play(); // play should remove loading
} else{
this.wrapperEl.classList.remove('loading');
}
})
.catch(e => console.log(e));
} }
updateState() { updateState() {
@ -664,7 +687,7 @@ class Annotator extends EventTarget {
this.annotations = metadata.annotations; this.annotations = metadata.annotations;
} }
if ((metadata && metadata.hasOwnProperty('audio')) || drawing.hasOwnProperty('audio')) { if ((metadata && metadata.hasOwnProperty('audio')) || (drawing.hasOwnProperty('audio') && drawing.audio)) {
if (metadata && metadata.hasOwnProperty('audio')) { if (metadata && metadata.hasOwnProperty('audio')) {
this.audioFile = metadata.audio.file this.audioFile = metadata.audio.file
this.audioOffset = Number.parseFloat(metadata.audio.offset); this.audioOffset = Number.parseFloat(metadata.audio.offset);
@ -683,11 +706,8 @@ class Annotator extends EventTarget {
this.currentPointI = null; this.currentPointI = null;
this.currentViewboxI = null; this.currentViewboxI = null;
this.dimensions = drawing.dimensions; this.dimensions = drawing.dimensions;
if (!this.config.crop_to_fit) { this.bounding_box = drawing.bounding_box;
this.svgEl.setAttribute('viewBox', `0 0 ${this.dimensions[0]} ${this.dimensions[1]}`) this.updateViewbox();
} else {
this.svgEl.setAttribute('viewBox', `${drawing.bounding_box.x} ${drawing.bounding_box.y} ${drawing.bounding_box.width} ${drawing.bounding_box.height}`)
}
// let bgEl = document.createElementNS('http://www.w3.org/2000/svg', 'rect'); // let bgEl = document.createElementNS('http://www.w3.org/2000/svg', 'rect');
// bgEl.setAttribute("x", 0); // bgEl.setAttribute("x", 0);
@ -704,7 +724,7 @@ class Annotator extends EventTarget {
this.nextViewboxTimeout = null; this.nextViewboxTimeout = null;
this._setPausedFlag(true); this._setPausedFlag(true);
this.setupAudioConfig().then(() => { return this.setupAudioConfig().then(() => {
// this.setUpAnnotator() // this.setUpAnnotator()
let keyEl; let keyEl;
if (this.config.is_player) { if (this.config.is_player) {
@ -1017,12 +1037,36 @@ class Annotator extends EventTarget {
return; return;
} }
this.currentViewboxI = box_i this.currentViewboxI = box_i
const b = this.viewboxes[box_i];
if (!this.config.crop_to_fit) { if (!this.config.crop_to_fit) {
this.svgEl.setAttribute('viewBox', `${b.x} ${b.y} ${this.dimensions[0]} ${this.dimensions[1]}`) this.updateViewbox();
} }
} }
updateViewbox() {
if (this.config.crop_to_fit) {
this.svgEl.setAttribute('viewBox', `${this.bounding_box.x} ${this.bounding_box.y} ${this.bounding_box.width} ${this.bounding_box.height}`);
} else {
let x,y,w,h;
if(this.currentViewboxI !== null) {
x = this.viewboxes[this.currentViewboxI].x,
y = this.viewboxes[this.currentViewboxI].y,
w = this.dimensions[0],
h = this.dimensions[1];
} else {
x = 0,
y = 0,
w = this.dimensions[0],
h = this.dimensions[1];
}
this.svgEl.setAttribute('viewBox', `${x} ${y} ${w} ${h}`);
}
}
toggleCrop(){
this.config.crop_to_fit = !this.config.crop_to_fit;
this.updateViewbox();
}
getNextPosition(path_i, point_i) { getNextPosition(path_i, point_i) {
const path = this.strokes[path_i]; const path = this.strokes[path_i];
let next_path, next_point; let next_path, next_point;
@ -1153,6 +1197,8 @@ class Annotator extends EventTarget {
const startPlayback = () => { const startPlayback = () => {
console.log('start playback'); console.log('start playback');
this.wrapperEl.classList.remove('loading'); // no loading anymore
this.startTimeMs = window.performance.now() - this._currentTimeMs; this.startTimeMs = window.performance.now() - this._currentTimeMs;
// strokes // strokes
if (this._currentTimeMs < 0) { if (this._currentTimeMs < 0) {
@ -1174,11 +1220,10 @@ class Annotator extends EventTarget {
resolve(); resolve();
} }
if (this.audioEl.readyState !== 4) { // not ready to play after seeking audio. if (this.audioEl.src.length && this.audioEl.readyState !== 4) { // not ready to play after seeking audio.
console.log('wait for audio before playback'); console.log('wait for audio before playback');
this.wrapperEl.classList.add('buffering'); this.wrapperEl.classList.add('loading');
this.audioEl.addEventListener('canplaythrough', () => { this.audioEl.addEventListener('canplaythrough', () => {
this.wrapperEl.classList.remove('buffering');
startPlayback() startPlayback()
}, { once: true }); // only once }, { once: true }); // only once
} else { } else {

View file

@ -5,8 +5,22 @@
<meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0"> <meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Drawing & Annotating Path Animations</title> <title>Drawing & Annotating Path Animations</title>
<style>
body{
font-family: sans-serif;
font-size: 20px;
}
a{
line-height: 2;
text-decoration: none;
}
a:hover{
text-decoration: underline;
}
</style>
</head> </head>
<body> <body>
<h1>Diagrams</h1>
<ul> <ul>
<li><a href="/index">Tags</a></li> <li><a href="/index">Tags</a></li>
<li><a href="/annotate.html">Drawings</a></li> <li><a href="/annotate.html">Drawings</a></li>

View file

@ -54,8 +54,8 @@ class Playlist {
playEl.classList.add('play'); playEl.classList.add('play');
playEl.innerText = "Play"; playEl.innerText = "Play";
playEl.href = location; playEl.href = location;
playEl.pathname = "play.html"; playEl.pathname = "annotate.html";
playEl.search = "?"+file.name; playEl.search = "?file="+file.name+"&player=1";
linksEl.append(playEl); linksEl.append(playEl);
const annotateEl = document.createElement("a"); const annotateEl = document.createElement("a");
@ -63,7 +63,7 @@ class Playlist {
annotateEl.innerText = "Annotate"; annotateEl.innerText = "Annotate";
annotateEl.href = location; annotateEl.href = location;
annotateEl.pathname = "annotate.html"; annotateEl.pathname = "annotate.html";
annotateEl.search = "?"+file.name; annotateEl.search = "?file="+file.name;
linksEl.append(annotateEl); linksEl.append(annotateEl);
const drawEl = document.createElement("a"); const drawEl = document.createElement("a");

300
app/www/svganim.css Normal file
View file

@ -0,0 +1,300 @@
svg .background {
fill: white
}
path {
fill: none;
stroke: gray;
stroke-width: 1mm;
stroke-linecap: round;
}
g.before path {
opacity: 0.5;
stroke: gray !important;
}
g.after path,
path.before_in {
opacity: .1;
stroke: gray !important;
}
.gray {
position: absolute;
background: rgba(255, 255, 255, 0.7);
}
.controls--playback {
/* display:flex; */
position: relative;
}
.timecode {
position: absolute;
right: 100%;
width: 5%;
font-size: 8px;
}
.controls--playback input[type='range'] {
/* position: absolute;
z-index: 100;
bottom: 0;
left: 0;
right: 0; */
width: 100%;
}
.controls button.paused,
.controls button.playing {
position: absolute;
left: 100%;
width: 30px;
height: 30px;
}
.controls button.paused::before {
content: '⏵';
}
.controls button.playing::before {
content: '⏸';
}
.loading .controls button:is(.playing, .paused)::before {
content: '↺';
display: inline-block;
animation: rotate 1s infinite;
}
@keyframes rotate {
0% {
transform: rotate(359deg)
}
100% {
transform: rotate(0deg)
}
}
.controls {
position: absolute !important;
z-index: 100;
bottom: 10px;
left: 5%;
right: 0;
width: 90%;
}
.scrubber {}
.tags {
line-height: 40px;
display: flex;
flex-direction: row;
padding: 0;
margin: 0;
}
.tags .tag {
display: block;
padding: 5px;
border: solid 1px darkgray;
flex-grow: 1;
text-align: center;
}
.tags li {
display: block;
}
.tags .subtags {
padding: 0;
font-size: 80%;
display: flex;
flex-direction: row;
flex-wrap: wrap;
}
.tags .subtags .tag {
padding: 2px;
}
.tags .tag:hover {
cursor: pointer;
background: darkgray;
}
.tags .tag.selected {
background: #3FB8AF;
}
.tags .tag.annotation-rm {
/* display: none; */
overflow: hidden;
color: red;
font-size: 30px;
width: 0;
flex-grow: 0;
padding: 5px 0;
transition: width .3s;
pointer-events: none;
border: none;
direction: rtl;
/* hide behind bar, instead into nothing */
}
.selected-annotation .tags .tag.annotation-rm {
color: red;
display: block;
width: 30px;
pointer-events: all;
}
.controls .annotation-comment {
width: 100%;
visibility: hidden;
}
.selected-annotation .controls .annotation-comment {
visibility: visible;
}
.noUi-handle:focus {
/* background: red;; */
border: solid 2px #601be0;
}
/* .noUi-handle:focus::before, .noUi-handle:focus::after{
background: #601be0;
} */
.tags .tag span {
display: inline-block;
width: 20px;
height: 20px;
margin-right: 10px;
vertical-align: middle;
border-radius: 5px;
}
.tags .subtags .tag span {
width: 10px;
height: 10px;
margin-right: 2px;
}
.annotations {
height: 30px;
/* border: solid 1px darkgray; */
position: relative;
}
.annotations>div {
opacity: .4;
background: lightseagreen;
position: absolute;
bottom: 0;
top: 0;
}
.annotations>div:hover,
.annotations>div.selected {
opacity: 1;
cursor: pointer;
}
.unsaved::before {
content: '*';
color: red;
display: inline-block;
text-align: center;
font-size: 30px;
position: absolute;
top: 10px;
left: 10px;
}
.saved::before {
content: '\2713';
display: inline-block;
color: green;
text-align: center;
font-size: 30px;
position: absolute;
top: 10px;
left: 10px;
}
.noUi-horizontal .noUi-touch-area {
cursor: ew-resize;
}
#interface .noUi-horizontal .noUi-tooltip {
/* tooltips go below the buttons */
bottom: auto;
top: 110%;
}
.audioconfig {
z-index: 9;
background: black;
color: white;
position: relative;
width: 100px;
/* as wide as audio controls only */
overflow: hidden;
white-space: nowrap;
left: -50px;
}
.audioconfig:hover {
width: auto;
left: 0px;
}
.audioconfig select,
.audioconfig input {
margin: 10px;
}
audio {
vertical-align: middle;
width: 100px;
/* hides seek head */
}
.svganim_annotator {
display: flex;
flex-direction: column;
height: 100%;
}
.svganim_annotator svg {
top: 20px;
background: white;
margin-left: 20px;
flex-shrink: 1;
flex-grow: 1;
position: static;
}
.svganim_annotator .audioconfig {
order: -1;
flex-grow: 0;
flex-shrink: 0;
position: static;
}
.svganim_annotator .controls {
margin-left: 5%;
position: static !important;
}