Big set of changes to load annotations incl. audio. Fixes player mode.

This commit is contained in:
Ruben van de Ven 2022-05-30 15:02:28 +02:00
parent cc253295ee
commit 5eb9b934f4
10 changed files with 810 additions and 415 deletions

View File

@ -6,4 +6,10 @@ Create a hand drawn vector animation.
poetry run python webserver.py
```
`parse_offsets.py` can be used to pad the diagram in order to sync it with the audio. This is necessary eg. after a network failure. It works by adding a line with the required offset to the `.json_appendable`-file.
`parse_offsets.py` can be used to pad the diagram in order to sync it with the audio. This is necessary eg. after a network failure. It works by adding a line with the required offset to the `.json_appendable`-file.
## record
On Linux: for now, set the pulse default input device to the monitor of the speakers. Then use wf-recorder:
`wf-recorder -g"$(slurp)" -a -f recording.mp4`

View File

@ -1,7 +1,10 @@
from __future__ import annotations
import asyncio
import copy
import json
from os import X_OK, PathLike
import os
import subprocess
from typing import Optional, Union
import shelve
from pydub import AudioSegment
@ -12,9 +15,11 @@ import logging
logger = logging.getLogger('svganim.strokes')
Milliseconds = float
Seconds = float
class Annotation:
def __init__(self, tag: str, drawing: Drawing, t_in: float, t_out: float) -> None:
def __init__(self, tag: str, drawing: Drawing, t_in: Milliseconds, t_out: Milliseconds) -> None:
self.tag = tag
self.t_in = t_in
self.t_out = t_out
@ -29,10 +34,15 @@ class Annotation:
def get_as_svg(self) -> str:
return self.getAnimationSlice().get_as_svg()
def getJsonUrl(self) -> str:
return self.drawing.get_url() + f"?t_in={self.t_in}&t_out={self.t_out}"
Filename = Union[str, bytes, PathLike[str], PathLike[bytes]]
SliceId = [str, float, float]
class Drawing:
def __init__(self, filename: Filename, metadata_dir: Filename, basedir: Filename) -> None:
@ -70,24 +80,26 @@ class Drawing:
if 'file' not in md['audio']:
return None
return AudioSlice(filename=os.path.join(self.basedir, md['audio']['file'][1:]), offset=md['audio']['offset']*1000)
return AudioSlice(filename=os.path.join(self.basedir, md['audio']['file'][1:]), drawing=self, offset=md['audio']['offset']*1000)
def get_animation(self) -> AnimationSlice:
# with open(self.eventfile, "r") as fp:
strokes = []
viewboxes = []
with open(self.eventfile, "r") as fp:
events = json.loads("[" + fp.read() + "]")
for i, event in enumerate(events):
if i == 0:
# metadata on first line
pass
# metadata on first line, add as initial viewbox to slice
viewboxes.append(TimedViewbox(-float('Infinity'), 0, 0, event[1], event[2]))
else:
if type(event) is list:
# ignore double metadatas, which appear when continuaing an existing drawing
continue
if event["event"] == "viewbox":
pass
viewboxes.extend([TimedViewbox(
b['t'], b['x'], b['y'], b['width'], b['height']) for b in event['viewboxes']])
if event["event"] == "stroke":
# points = []
# for i in range(int(len(stroke) / 4)):
@ -100,7 +112,7 @@ class Drawing:
for p in event["points"]],
)
)
return AnimationSlice(strokes, audioslice=self.get_audio())
return AnimationSlice([self.id, None, None], strokes, viewboxes, audioslice=self.get_audio())
def get_metadata(self):
canvas = self.get_canvas_metadata()
@ -127,6 +139,12 @@ class Viewbox:
return f"{self.x} {self.y} {self.width} {self.height}"
class TimedViewbox(Viewbox):
def __init__(self, time: Milliseconds, x: float, y: float, width: float, height: float):
super().__init__(x, y, width, height)
self.t = time
FrameIndex = tuple[int, int]
@ -134,36 +152,89 @@ class AnimationSlice:
# either a whole drawing or the result of applying an annotation to a drawing (an excerpt)
# TODO rename to AnimationSlice to include audio as well
def __init__(
self, strokes: list[Stroke], t_in: float = 0, t_out: float = None, audioslice: AudioSlice = None
self, slice_id: SliceId, strokes: list[Stroke], viewboxes: list[TimedViewbox] = [], t_in: float = 0, t_out: float = None, audioslice: AudioSlice = None
) -> None:
self.id = slice_id
self.strokes = strokes
self.viewboxes = viewboxes
self.t_in = t_in
self.t_out = t_out
self.audio = audioslice
# TODO: Audio
def get_bounding_box(self) -> Viewbox:
def asDict(self) -> dict:
"""Can be used to json-ify the animation-slice
"""
# conversion necessary for when no t_in is given
boxes = [v.__dict__ for v in self.viewboxes]
for box in boxes:
if box['t'] == -float('Infinity'):
box['t'] = 0
drawing = {
"file": self.getUrl(),
"time": "-", # creation date
# dimensions of drawing canvas
"dimensions": [self.viewboxes[0].width, self.viewboxes[0].height],
"shape": [s.asDict() for s in self.strokes],
"viewboxes": boxes,
"bounding_box": self.get_bounding_box().__dict__,
"audio": self.getAudioDict() if self.audio else None
}
return drawing
def getAudioDict(self):
"""quick and dirty to not use audio.asDict(), but it avoids passing all around sorts of data"""
return {
"file": '/files/' + self.getUrl('.mp3'),
"offset": 0
# "offset": self.audio.offset / 1000
}
def getUrl(self, extension = '') -> str:
if not self.id[1] and not self.id[2]:
return self.id[0]
return self.id[0] + f"{extension}?t_in={self.t_in}&t_out={self.t_out}"
def get_bounding_box(self, stroke_thickness: float = 3.5) -> Viewbox:
"""Stroke_thickness 3.5 == 1mm. If it should not be considered, just set it to 0.
"""
if len(self.strokes) == 0:
# empty set
return Viewbox(0,0,0,0)
min_x, max_x = float("inf"), float("-inf")
min_y, max_y = float("inf"), float("-inf")
for s in self.strokes:
for p in s.points:
if p.x < min_x:
min_x = p.x
if p.x > max_x:
max_x = p.x
if p.y < min_y:
min_y = p.y
if p.y > max_y:
max_y = p.y
x1 = p.x - stroke_thickness/2
x2 = p.x + stroke_thickness/2
y1 = p.y - stroke_thickness/2
y2 = p.y + stroke_thickness/2
if x1 < min_x:
min_x = x1
if x2 > max_x:
max_x = x2
if y1 < min_y:
min_y = y1
if y2 > max_y:
max_y = y2
return Viewbox(min_x, min_y, max_x - min_x, max_y - min_y)
def getSlice(self, t_in: float, t_out: float) -> AnimationSlice:
def getSlice(self, t_in: Milliseconds, t_out: Milliseconds) -> AnimationSlice:
"""slice the slice. T in ms"""
frame_in = self.getIndexForInPoint(t_in)
frame_out = self.getIndexForOutPoint(t_out)
strokes = self.getStrokeSlices(frame_in, frame_out)
strokes = self.getStrokeSlices(frame_in, frame_out, t_in)
# TODO shift t of points with t_in
viewboxes = self.getViewboxesSlice(t_in, t_out)
print(viewboxes[0])
audio = self.audio.getSlice(t_in, t_out) if self.audio else None
return AnimationSlice(strokes, t_in, t_out, audio)
return AnimationSlice([self.id[0], t_in, t_out], strokes, viewboxes, t_in, t_out, audio)
def get_as_svg_dwg(self) -> svgwrite.Drawing:
box = self.get_bounding_box()
@ -171,7 +242,8 @@ class AnimationSlice:
dwg = svgwrite.Drawing(fn, size=(box.width, box.height))
dwg.viewbox(box.x, box.y, box.width, box.height)
self.add_to_dwg(dwg)
dwg.defs.add(dwg.style("path{stroke-width:1mm;stroke-linecap: round;}"))
dwg.defs.add(
dwg.style("path{stroke-width:1mm;stroke-linecap: round;}"))
return dwg
def get_as_svg(self) -> str:
@ -186,8 +258,32 @@ class AnimationSlice:
stroke.add_to_dwg(group)
dwg.add(group)
def getViewboxesSlice(self, t_in: Milliseconds, t_out: Milliseconds) -> list[TimedViewbox]:
"""Extract the viewboxes for in- and outpoints.
If there's one before inpoint, move that to the t_in, so that animation starts at the right position
the slice is offset by t_in ms
"""
viewboxes = [] # Add single empty element, so that we can use viewboxes[0] later
lastbox = None
for viewbox in self.viewboxes:
if viewbox.t > t_out:
break
if viewbox.t <= t_in:
# make sure the first box is the last box from _before_ the slice
firstbox = TimedViewbox(
0, viewbox.x, viewbox.y, viewbox.width, viewbox.height)
if not len(viewboxes):
viewboxes.append(firstbox)
else:
viewboxes[0] = firstbox
continue
viewboxes.append(TimedViewbox(viewbox.t-t_in, viewbox.x, viewbox.y, viewbox.width, viewbox.height))
return viewboxes
def getStrokeSlices(
self, index_in: FrameIndex, index_out: FrameIndex
self, index_in: FrameIndex, index_out: FrameIndex, t_offset: Seconds = 0
) -> list[Stroke]:
"""Get list of Stroke/StrokeSlice based in in and out indexes
Based on annotation.js getStrokesSliceForPathRange(in_point, out_point)
@ -204,10 +300,10 @@ class AnimationSlice:
out_i = index_out[1] if index_out[0] == i else len(
stroke.points) - 1
slices.append(StrokeSlice(stroke, in_i, out_i))
slices.append(StrokeSlice(stroke, in_i, out_i, t_offset))
return slices
def getIndexForInPoint(self, ms) -> FrameIndex:
def getIndexForInPoint(self, ms: Milliseconds) -> FrameIndex:
"""Get the frame index (path, point) based on the given time
The In point version (so the first index after ms)
Equal to annotations.js findPositionForTime(ms)
@ -235,14 +331,14 @@ class AnimationSlice:
break # done :-)
return (path_i, point_i)
def getIndexForOutPoint(self, ms) -> FrameIndex:
def getIndexForOutPoint(self, ms: Milliseconds) -> FrameIndex:
"""Get the frame index (path, point) based on the given time
The Out point version (so the last index before ms)
Equal to annotations.js findPositionForTime(ms)
"""
return self.getIndexForTime( ms)
return self.getIndexForTime(ms)
def getIndexForTime(self, ms) -> FrameIndex:
def getIndexForTime(self, ms: Milliseconds) -> FrameIndex:
"""Get the frame index (path, point) based on the given time
Equal to annotations.js findPositionForTime(ms)
"""
@ -269,49 +365,114 @@ class AnimationSlice:
point_i = len(stroke.points) - 1
return (path_i, point_i)
audiocache = {}
class AudioSlice:
def __init__(self, filename: Filename, t_in: float = None, t_out: float = None, offset: float = None):
def __init__(self, filename: Filename, drawing: Drawing, t_in: Milliseconds = None, t_out: Milliseconds = None, offset: Milliseconds = None):
self.filename = filename
self.drawing = drawing
self.t_in = t_in # in ms
self.t_out = t_out # in ms
self.offset = offset # in ms
self.offset = offset # in ms TODO: use from self.drawing metadata
def getSlice(self, t_in: float, t_out: float) -> AnimationSlice:
return AudioSlice(self.filename, t_in, t_out, self.offset)
return AudioSlice(self.filename, self.drawing, t_in, t_out, self.offset)
def export(self, format="mp3"):
def asDict(self):
return {
"file": self.getUrl(),
# "offset": self.offset/1000
}
def getUrl(self):
fn = self.filename.replace("../files/audio", "/file/")
params = []
if self.t_in:
params.append(f"t_in={self.t_in}")
if self.t_out:
params.append(f"t_out={self.t_in}")
if len(params):
fn += "?" + "&".join(params)
return fn
async def export(self, format="mp3"):
"""Returns file descriptor of tempfile"""
# Opening file and extracting segment
song = AudioSegment.from_file(self.filename)
start = self.t_in - self.offset
end = self.t_out - self.offset
start = int(self.t_in - self.offset) # millisecond precision is enough
end = int(self.t_out - self.offset) # millisecond precision is enough
if start < 0 and end < 0:
extract = AudioSegment.silent(
duration=end-start, frame_rate=song.frame_rate)
else:
if start < 0:
preroll = AudioSegment.silent(
duration=start * -1, frame_rate=song.frame_rate)
start = 0
else:
preroll = None
if end > len(song):
postroll = AudioSegment.silent(
duration=end - len(song), frame_rate=song.frame_rate)
end = len(song) - 1
else:
postroll = None
# call ffmpeg directly, with given in and outpoint, so no unnecessary data is loaded, and no double conversion (e.g. ogg -> wav -> ogg ) is performed
out_f = io.BytesIO()
extract = song[start: end]
if preroll:
extract = preroll + extract
if postroll:
extract += postroll
# build converter command to export
conversion_command = [
"ffmpeg",
'-ss', f"{start}ms",
'-to', f"{end}ms",
"-i", self.filename, # ss before input, so not whole file is loaded
]
# Saving
return extract.export(None, format=format)
conversion_command.extend([
"-f", format, '-', # to stdout
])
# read stdin / write stdout
logger.info("ffmpeg start")
proc = await asyncio.create_subprocess_exec(
*conversion_command,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL)
p_out, p_err = await proc.communicate()
logger.info("ffmpeg finished")
if proc.returncode != 0:
raise Exception(
"Encoding failed. ffmpeg/avlib returned error code: {0}\n\nCommand:{1}".format(
p.returncode, conversion_command))
out_f.write(p_out)
out_f.seek(0)
return out_f
# old way, use AudioSegment, easy but slow (reads whole ogg to wav, then export segment to ogg again)
# logger.info("loading audio")
# if self.filename in audiocache:
# song = audiocache[self.filename]
# else:
# song = AudioSegment.from_file(self.filename)
# audiocache[self.filename] = song
# logger.info("loaded audio")
# if start < 0 and end < 0:
# extract = AudioSegment.silent(
# duration=end-start, frame_rate=song.frame_rate)
# else:
# if start < 0:
# preroll = AudioSegment.silent(
# duration=start * -1, frame_rate=song.frame_rate)
# start = 0
# else:
# preroll = None
# if end > len(song):
# postroll = AudioSegment.silent(
# duration=end - len(song), frame_rate=song.frame_rate)
# end = len(song) - 1
# else:
# postroll = None
# extract = song[start: end]
# if preroll:
# extract = preroll + extract
# if postroll:
# extract += postroll
# # Saving
# return extract.export(None, format=format)
class AnnotationIndex:
@ -395,7 +556,7 @@ class AnnotationIndex:
class Point:
def __init__(self, x: float, y: float, last: bool, t: float):
def __init__(self, x: float, y: float, last: bool, t: Seconds):
self.x = float(x)
self.y = float(y) # if y == 0 it can still be integer.... odd python
self.last = last
@ -409,6 +570,9 @@ class Point:
# TODO: change so that it actually scales to FIT dimensions
return Point(self.x, self.y, self.last, self.t)
def asList(self) -> list:
return [self.x, self.y, 1 if self.last else 0, self.t]
Points = list[Point]
SvgDrawing = Union[svgwrite.container.SVG, svgwrite.container.Group]
@ -419,25 +583,28 @@ class Stroke:
self.color = color
self.points = points
def asDict(self) -> dict:
return {"color": self.color, "points": [p.asList() for p in self.points]}
def add_to_dwg(self, dwg: SvgDrawing):
path = svgwrite.path.Path(d=self.get_as_d()).stroke(
self.color, 1).fill("none")
dwg.add(path)
def get_bounding_box(self) -> Viewbox:
min_x, max_x = float("inf"), float("-inf")
min_y, max_y = float("inf"), float("-inf")
# def get_bounding_box(self) -> Viewbox:
# min_x, max_x = float("inf"), float("-inf")
# min_y, max_y = float("inf"), float("-inf")
for p in self.points:
if p.x < min_x:
min_x = p.x
if p.x > max_x:
max_x = p.x
if p.y < min_y:
min_y = p.y
if p.y > max_y:
max_y = p.y
return Viewbox(min_x, min_y, max_x - min_x, max_y - min_y)
# for p in self.points:
# if p.x < min_x:
# min_x = p.x
# if p.x > max_x:
# max_x = p.x
# if p.y < min_y:
# min_y = p.y
# if p.y > max_y:
# max_y = p.y
# return Viewbox(min_x, min_y, max_x - min_x, max_y - min_y)
def get_as_d(self):
d = ""
@ -466,17 +633,22 @@ class Stroke:
class StrokeSlice(Stroke):
def __init__(self, stroke: Stroke, i_in: int = None, i_out: int = None) -> None:
def __init__(self, stroke: Stroke, i_in: int = None, i_out: int = None, t_offset: Seconds = 0) -> None:
self.stroke = stroke
self.i_in = 0 if i_in is None else i_in
self.i_out = len(self.stroke.points) - 1 if i_out is None else i_out
# deepcopy points, because slices can be offset in time
self.points = copy.deepcopy(self.stroke.points[self.i_in: self.i_out + 1])
for p in self.points:
p.t -= t_offset
def slice_id(self):
return f"{self.i_in}-{self.i_out}"
@property
def points(self) -> Points:
return self.stroke.points[self.i_in: self.i_out + 1]
# @property
# def points(self) -> Points:
# return self.stroke.points[self.i_in: self.i_out + 1]
@property
def color(self) -> str:

8
app/svganim/uimethods.py Normal file
View File

@ -0,0 +1,8 @@
from hashlib import md5
def annotation_hash(handler, input):
return md5(input.encode()).hexdigest()
# def nmbr(handler, lst) -> int:
# leno

View File

@ -2,9 +2,12 @@
<head>
<title>Annotations</title>
<link rel="stylesheet" href="svganim.css">
<style>
body {
background: darkgray;
background: rgb(39, 40, 41);
font-family: sans-serif;
color: white
}
ul {
@ -17,21 +20,95 @@
;
}
summary h2{
display: inline-block;
cursor: pointer;
}
details[open] summary{
color: rgb(224, 196, 196);
}
/* details ul{
display: none;
}
details[open] ul{
display: block;;
} */
img {
/* width: 400px; */
background: white;
width: 300px;
height: 200px;
cursor: pointer;
padding: 20px;
}
.svganim_player {
display: inline-block;
position: relative;
width: 300px;
height: 200px;
overflow: hidden;
padding: 20px;
background: white;
}
.svganim_player svg {
width: 100%;
height: 100%;
}
.svganim_player.play:not(.loading) .controls {
visibility: hidden;
}
.svganim_player:hover .controls {
visibility: visible !important;
}
</style>
<script src="assets/nouislider-15.5.0.js"></script>
<script src="assets/wNumb-1.2.0.min.js"></script>
<script src="annotate.js"></script>
<script src="playlist.js"></script>
</head>
<body>
{% for tag in index.tags %}
<h2>{{tag}}</h2>
<ul>
{% for annotation in index.tags[tag] %}
<li><img src="/annotation/{{ annotation.id }}.svg" data-audio="/annotation/{{ annotation.id }}.mp3"></li>
{% end %}
</ul>
<details>
<summary>
<h2>{{tag}} ({{len(index.tags[tag])}})</h2>
</summary>
<ul>
{% for annotation in index.tags[tag] %}
<li>
<img src="/annotation/{{ annotation.id }}.svg" loading="lazy" id="img-{{ annotation_hash(annotation.id) }}">
<div class="play" id="annotation-{{ annotation_hash(annotation.id) }}"></div>
<script type='text/javascript'>
(function () {
let imgEl = document.getElementById('img-{{ annotation_hash(annotation.id) }}');
imgEl.addEventListener('click', () => {
imgEl.style.display = 'none';
new Annotator(
document.getElementById("annotation-{{ annotation_hash(annotation.id) }}"),
"tags.json",
"{{ annotation.getJsonUrl() }}",
{ is_player: true, crop_to_fit: true, autoplay: true }
);
})
})();
</script>
</li>
<!-- <li><img src="/annotation/{{ annotation.id }}.svg" data-audio="/annotation/{{ annotation.id }}.mp3"></li> -->
{% end %}
</ul>
</details>
{% end %}
<!-- <ul>
{% for annotation in index.annotations %}
@ -47,7 +124,7 @@
for (const image of images) {
const audio = new Audio(image.dataset.audio);
console.log(image, audio);
image.addEventListener('mouseover', (e) => {
image.addEventListener('mouseover', (e) => {
audio.play();
});
image.addEventListener('mouseout', (e) => {

View File

@ -12,6 +12,7 @@ import argparse
import coloredlogs
import glob
import svganim.strokes
import svganim.uimethods
logger = logging.getLogger("svganim.webserver")
@ -76,6 +77,7 @@ class WebSocketHandler(tornado.websocket.WebSocketHandler):
return
# write to an appendable json format. So basically a file that should be wrapped in [] to be json-parsable
# TODO use jsonlines -- which is not so much different but (semi-)standardized
with open(
os.path.join(self.config.storage, self.filename +
".json_appendable"), "a"
@ -236,13 +238,14 @@ class AudioListingHandler(tornado.web.RequestHandler):
class AnimationHandler(tornado.web.RequestHandler):
def initialize(self, config):
def initialize(self, config, index: svganim.strokes.AnnotationIndex):
self.config = config
def get(self, filename):
self.set_header("Content-Type", "application/json")
self.index = index
async def get(self, filename):
# filename = self.get_argument("file", None)
if filename == "":
self.set_header("Content-Type", "application/json")
files = []
names = [
name
@ -275,35 +278,49 @@ class AnimationHandler(tornado.web.RequestHandler):
files.sort(key=lambda k: k["mtime"])
self.write(json.dumps(files))
else:
path = os.path.join(
self.config.storage, os.path.basename(
filename) + ".json_appendable"
)
drawing = {"file": filename, "shape": [], "viewboxes": []}
with open(path, "r") as fp:
events = json.loads("[" + fp.read() + "]")
for i, event in enumerate(events):
if i == 0:
# metadata on first line
drawing["time"] = event[0]
drawing["dimensions"] = [event[1], event[2]]
else:
if type(event) is list:
# ignore double metadatas, which appear when continuaing an existing drawing
continue
if event["event"] == "viewbox":
drawing["viewboxes"].extend(event['viewboxes'])
if event["event"] == "stroke":
# points = []
# for i in range(int(len(stroke) / 4)):
# p = stroke[i*4:i*4+4]
# points.append([float(p[0]), float(p[1]), int(p[2]), float(p[3])])
drawing["shape"].append(
{"color": event["color"],
"points": event["points"]}
)
self.write(json.dumps(drawing))
if filename[-4:] == ".svg":
extension = "svg"
filename = filename[:-4]
elif filename[-4:] == ".mp3":
extension = "mp3"
filename = filename[:-4]
elif filename[-4:] == ".wav":
extension = "wav"
filename = filename[:-4]
else:
extension = None
logger.info(f"file {filename=}, {extension=}")
# if annotation_id not in self.index.annotations:
# raise tornado.web.HTTPError(404)
# annotation = self.index.annotations[annotation_id]
t_in = self.get_argument('t_in', None)
t_out = self.get_argument('t_out', None)
animation = self.index.drawings[filename].get_animation()
if t_in is not None and t_out is not None:
animation = animation.getSlice(float(t_in), float(t_out))
if extension == "svg":
self.set_header("Content-Type", "image/svg+xml")
self.write(animation.get_as_svg())
elif extension == "mp3":
self.set_header("Content-Type", "audio/mp3")
audio = await animation.audio.export(format="mp3")
self.write(audio.read())
elif extension == "wav":
self.set_header("Content-Type", "audio/wav")
audio = await animation.audio.export(format="wav")
self.write(audio.read())
else:
self.set_header("Content-Type", "application/json")
self.write(json.dumps(animation.asDict()))
class TagHandler(tornado.web.RequestHandler):
"""List all tags"""
@ -538,7 +555,7 @@ class Server:
"config": self.config,
},
),
(r"/files/(.*)", AnimationHandler, {"config": self.config}),
(r"/files/(.*)", AnimationHandler, {"config": self.config, "index": self.index}),
(
r"/audio/(.+)",
tornado.web.StaticFileHandler,
@ -573,6 +590,7 @@ class Server:
],
debug=True,
autoreload=True,
ui_methods= svganim.uimethods
)
application.listen(self.config.port)
tornado.ioloop.IOLoop.current().start()

View File

@ -4,6 +4,7 @@
<head>
<meta charset="utf-8">
<title>Annotate a line animation</title>
<link rel="stylesheet" href="svganim.css">
<style media="screen">
body {
/* background: black;
@ -11,6 +12,10 @@
background: lightgray;
}
body.player{
background: rgb(39, 40, 41);;
}
#sample,
svg {
position: absolute;
@ -24,8 +29,18 @@
/* border: solid 2px lightgray; */
}
svg .background {
fill: white
body.player svg{
height: calc(100% - 40px);
background-color: white;
}
#wrapper {
position: absolute;
top: 0;
right: 0;
bottom: 0;
left: 0;
background: none;
}
img {
@ -39,280 +54,7 @@
z-index: 1;
}
path {
fill: none;
stroke: gray;
stroke-width: 1mm;
stroke-linecap: round;
}
g.before path {
opacity: 0.5;
stroke: gray !important;
}
g.after path,
path.before_in {
opacity: .1;
stroke: gray !important;
}
#wrapper {
position: absolute;
top: 0;
right: 0;
bottom: 0;
left: 0;
background: none;
}
.gray {
position: absolute;
background: rgba(255, 255, 255, 0.7);
}
.controls--playback {
/* display:flex; */
}
.timecode {
position: absolute;
right: 100%;
width: 5%;
font-size: 8px;
}
.controls--playback input[type='range'] {
/* position: absolute;
z-index: 100;
bottom: 0;
left: 0;
right: 0; */
width: 100%;
}
.controls button.paused,
.controls button.playing {
position: absolute;
left: 100%;
width: 30px;
height:30px;
}
.controls button.paused::before {
content: '⏵';
}
.controls button.playing::before {
content: '⏸';
}
.buffering .controls button:is(.playing,.paused)::before {
content: '↺';
display:inline-block;
animation: rotate 1s infinite;
}
@keyframes rotate {
0% {
transform: rotate(359deg)
}
100% {
transform: rotate(0deg)
}
}
.controls {
position: absolute !important;
z-index: 100;
bottom: 10px;
left: 5%;
right: 0;
width: 90%;
}
.scrubber {}
.tags {
line-height: 40px;
display: flex;
flex-direction: row;
padding: 0;
margin: 0;
}
.tags .tag {
display: block;
padding: 5px;
border: solid 1px darkgray;
flex-grow: 1;
text-align: center;
}
.tags li {
display: block;
}
.tags .subtags {
padding: 0;
font-size: 80%;
display: flex;
flex-direction: row;
flex-wrap: wrap;
}
.tags .subtags .tag {
padding: 2px;
}
.tags .tag:hover {
cursor: pointer;
background: darkgray;
}
.tags .tag.selected {
background: #3FB8AF;
}
.tags .tag.annotation-rm {
/* display: none; */
overflow: hidden;
color: red;
font-size: 30px;
width: 0;
flex-grow: 0;
padding: 5px 0;
transition: width .3s;
pointer-events: none;
border: none;
direction: rtl;
/* hide behind bar, instead into nothing */
}
.selected-annotation .tags .tag.annotation-rm {
color: red;
display: block;
width: 30px;
pointer-events: all;
}
.controls .annotation-comment{
width: 100%;
visibility: hidden;
}
.selected-annotation .controls .annotation-comment{
visibility: visible;
}
.noUi-handle:focus {
/* background: red;; */
border: solid 2px #601be0;
}
/* .noUi-handle:focus::before, .noUi-handle:focus::after{
background: #601be0;
} */
.tags .tag span {
display: inline-block;
width: 20px;
height: 20px;
margin-right: 10px;
vertical-align: middle;
border-radius: 5px;
}
.tags .subtags .tag span {
width: 10px;
height: 10px;
margin-right: 2px;
}
.annotations {
height: 30px;
/* border: solid 1px darkgray; */
position: relative;
}
.annotations>div {
opacity: .4;
background: lightseagreen;
position: absolute;
bottom: 0;
top: 0;
}
.annotations>div:hover,
.annotations>div.selected {
opacity: 1;
cursor: pointer;
}
.unsaved::before {
content: '*';
color: red;
display: inline-block;
text-align: center;
font-size: 30px;
position: absolute;
top: 10px;
left: 10px;
}
.saved::before {
content: '\2713';
display: inline-block;
color: green;
text-align: center;
font-size: 30px;
position: absolute;
top: 10px;
left: 10px;
}
.noUi-horizontal .noUi-touch-area {
cursor: ew-resize;
}
#interface .noUi-horizontal .noUi-tooltip {
/* tooltips go below the buttons */
bottom: auto;
top: 110%;
}
.audioconfig {
z-index: 9;
background: black;
color: white;
position: relative;
width: 100px;
/* as wide as audio controls only */
overflow: hidden;
white-space: nowrap;
left: -50px;
}
.audioconfig:hover {
width: auto;
left: 0px;
}
.audioconfig select,
.audioconfig input {
margin: 10px;
}
audio {
vertical-align: middle;
width: 100px;
/* hides seek head */
}
.playlist img {
position: static;
@ -335,6 +77,10 @@
font-size: 6pt;
}
body:not(.help) .help{
display: none;
}
.help li {
display: inline-block;
color: gray;
@ -384,10 +130,19 @@
<script type='text/javascript'>
let ann;
if (location.search) {
const params = new URLSearchParams(location.search);
const is_player = !!parseInt(params.get('player'));
const crop_to_fit = !!parseInt(params.get('crop'));
if(is_player) {
document.body.classList.add('player');
} else {
document.body.classList.add('annotator');
}
ann = new Annotator(
document.getElementById("interface"),
"tags.json",
location.search.substring(1)
params.get('file'),
{is_player: is_player, crop_to_fit: crop_to_fit}
);
} else {
const playlist = new Playlist(document.getElementById("interface"), '/files/');

View File

@ -112,11 +112,13 @@ class StrokeSlice {
class Annotator extends EventTarget {
constructor(wrapperEl, tagFile, fileurl, config) {
fileurl = fileurl.replace("&amp;", "&"); // little hack: tornadoweb does this automatically for some reason
super();
this.config = {
is_player: config && config.hasOwnProperty('is_player') ? config.is_player : false, // in player mode annotations are not loaded, nor is the annotator shown
crop_to_fit: config && config.hasOwnProperty('crop_to_fit') ? config.crop_to_fit : false, // don't animate viewport, but show the whole drawing
autoplay: config && config.hasOwnProperty('autoplay') ? config.autoplay : false, // immediately start playback
}
this.formatter = wNumb({
@ -194,9 +196,11 @@ class Annotator extends EventTarget {
this.controlsEl.appendChild(this.scrubberEl);
this.annotationsEl = document.createElement('div');
this.annotationsEl.classList.add('annotations')
this.controlsEl.appendChild(this.annotationsEl);
if(!this.config.is_player){
this.annotationsEl = document.createElement('div');
this.annotationsEl.classList.add('annotations')
this.controlsEl.appendChild(this.annotationsEl);
}
this.inPointPosition = [0, 0];
@ -279,7 +283,7 @@ class Annotator extends EventTarget {
this.commentEl.placeholder = "comment";
this.commentEl.value = "";
this.commentEl.addEventListener('keyup', (e) => {
if (ev.key == 'Escape') {
if (e.key == 'Escape') {
this.commentEl.blur() // deselect annotation, and deselect commentEl
} else {
e.stopPropagation(); // prevent keyup event to propagate and set i/o points
@ -448,6 +452,8 @@ class Annotator extends EventTarget {
method: 'GET',
});
this.wrapperEl.classList.add('loading');
fetch(request)
.then(response => response.json())
.then(data => {
@ -456,19 +462,36 @@ class Annotator extends EventTarget {
const metadata_req = new Request(`/annotations/${data.file}`, {
method: 'GET',
});
fetch(metadata_req)
return fetch(metadata_req)
.then(response => response.ok ? response.json() : null)
.then(metadata => {
if (metadata !== null) {
metadata.annotations = metadata.annotations.map((a) => new Annotation(a.tag, a.t_in, a.t_out, a.hasOwnProperty('comment') ? a.comment : ""))
}
this.loadStrokes(data, metadata)
return this.loadStrokes(data, metadata)
})
.catch(e => console.log(e));
} else {
this.loadStrokes(data, null);
return this.loadStrokes(data, null);
}
}).catch(e => console.log(e));
})
.then(() => {
// play on click for player
if(this.config.is_player) {
this.svgEl.addEventListener('click', (ev) => {
console.log('clicked for play/pause');
this.playPause();
});
}
// autoplay if necessary
if(this.config.autoplay){
this.play(); // play should remove loading
} else{
this.wrapperEl.classList.remove('loading');
}
})
.catch(e => console.log(e));
}
updateState() {
@ -664,7 +687,7 @@ class Annotator extends EventTarget {
this.annotations = metadata.annotations;
}
if ((metadata && metadata.hasOwnProperty('audio')) || drawing.hasOwnProperty('audio')) {
if ((metadata && metadata.hasOwnProperty('audio')) || (drawing.hasOwnProperty('audio') && drawing.audio)) {
if (metadata && metadata.hasOwnProperty('audio')) {
this.audioFile = metadata.audio.file
this.audioOffset = Number.parseFloat(metadata.audio.offset);
@ -683,11 +706,8 @@ class Annotator extends EventTarget {
this.currentPointI = null;
this.currentViewboxI = null;
this.dimensions = drawing.dimensions;
if (!this.config.crop_to_fit) {
this.svgEl.setAttribute('viewBox', `0 0 ${this.dimensions[0]} ${this.dimensions[1]}`)
} else {
this.svgEl.setAttribute('viewBox', `${drawing.bounding_box.x} ${drawing.bounding_box.y} ${drawing.bounding_box.width} ${drawing.bounding_box.height}`)
}
this.bounding_box = drawing.bounding_box;
this.updateViewbox();
// let bgEl = document.createElementNS('http://www.w3.org/2000/svg', 'rect');
// bgEl.setAttribute("x", 0);
@ -704,7 +724,7 @@ class Annotator extends EventTarget {
this.nextViewboxTimeout = null;
this._setPausedFlag(true);
this.setupAudioConfig().then(() => {
return this.setupAudioConfig().then(() => {
// this.setUpAnnotator()
let keyEl;
if (this.config.is_player) {
@ -1017,12 +1037,36 @@ class Annotator extends EventTarget {
return;
}
this.currentViewboxI = box_i
const b = this.viewboxes[box_i];
if (!this.config.crop_to_fit) {
this.svgEl.setAttribute('viewBox', `${b.x} ${b.y} ${this.dimensions[0]} ${this.dimensions[1]}`)
this.updateViewbox();
}
}
updateViewbox() {
if (this.config.crop_to_fit) {
this.svgEl.setAttribute('viewBox', `${this.bounding_box.x} ${this.bounding_box.y} ${this.bounding_box.width} ${this.bounding_box.height}`);
} else {
let x,y,w,h;
if(this.currentViewboxI !== null) {
x = this.viewboxes[this.currentViewboxI].x,
y = this.viewboxes[this.currentViewboxI].y,
w = this.dimensions[0],
h = this.dimensions[1];
} else {
x = 0,
y = 0,
w = this.dimensions[0],
h = this.dimensions[1];
}
this.svgEl.setAttribute('viewBox', `${x} ${y} ${w} ${h}`);
}
}
toggleCrop(){
this.config.crop_to_fit = !this.config.crop_to_fit;
this.updateViewbox();
}
getNextPosition(path_i, point_i) {
const path = this.strokes[path_i];
let next_path, next_point;
@ -1153,6 +1197,8 @@ class Annotator extends EventTarget {
const startPlayback = () => {
console.log('start playback');
this.wrapperEl.classList.remove('loading'); // no loading anymore
this.startTimeMs = window.performance.now() - this._currentTimeMs;
// strokes
if (this._currentTimeMs < 0) {
@ -1174,11 +1220,10 @@ class Annotator extends EventTarget {
resolve();
}
if (this.audioEl.readyState !== 4) { // not ready to play after seeking audio.
if (this.audioEl.src.length && this.audioEl.readyState !== 4) { // not ready to play after seeking audio.
console.log('wait for audio before playback');
this.wrapperEl.classList.add('buffering');
this.wrapperEl.classList.add('loading');
this.audioEl.addEventListener('canplaythrough', () => {
this.wrapperEl.classList.remove('buffering');
startPlayback()
}, { once: true }); // only once
} else {

View File

@ -5,8 +5,22 @@
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Drawing & Annotating Path Animations</title>
<style>
body{
font-family: sans-serif;
font-size: 20px;
}
a{
line-height: 2;
text-decoration: none;
}
a:hover{
text-decoration: underline;
}
</style>
</head>
<body>
<h1>Diagrams</h1>
<ul>
<li><a href="/index">Tags</a></li>
<li><a href="/annotate.html">Drawings</a></li>

View File

@ -54,8 +54,8 @@ class Playlist {
playEl.classList.add('play');
playEl.innerText = "Play";
playEl.href = location;
playEl.pathname = "play.html";
playEl.search = "?"+file.name;
playEl.pathname = "annotate.html";
playEl.search = "?file="+file.name+"&player=1";
linksEl.append(playEl);
const annotateEl = document.createElement("a");
@ -63,7 +63,7 @@ class Playlist {
annotateEl.innerText = "Annotate";
annotateEl.href = location;
annotateEl.pathname = "annotate.html";
annotateEl.search = "?"+file.name;
annotateEl.search = "?file="+file.name;
linksEl.append(annotateEl);
const drawEl = document.createElement("a");

300
app/www/svganim.css Normal file
View File

@ -0,0 +1,300 @@
svg .background {
fill: white
}
path {
fill: none;
stroke: gray;
stroke-width: 1mm;
stroke-linecap: round;
}
g.before path {
opacity: 0.5;
stroke: gray !important;
}
g.after path,
path.before_in {
opacity: .1;
stroke: gray !important;
}
.gray {
position: absolute;
background: rgba(255, 255, 255, 0.7);
}
.controls--playback {
/* display:flex; */
position: relative;
}
.timecode {
position: absolute;
right: 100%;
width: 5%;
font-size: 8px;
}
.controls--playback input[type='range'] {
/* position: absolute;
z-index: 100;
bottom: 0;
left: 0;
right: 0; */
width: 100%;
}
.controls button.paused,
.controls button.playing {
position: absolute;
left: 100%;
width: 30px;
height: 30px;
}
.controls button.paused::before {
content: '⏵';
}
.controls button.playing::before {
content: '⏸';
}
.loading .controls button:is(.playing, .paused)::before {
content: '↺';
display: inline-block;
animation: rotate 1s infinite;
}
@keyframes rotate {
0% {
transform: rotate(359deg)
}
100% {
transform: rotate(0deg)
}
}
.controls {
position: absolute !important;
z-index: 100;
bottom: 10px;
left: 5%;
right: 0;
width: 90%;
}
.scrubber {}
.tags {
line-height: 40px;
display: flex;
flex-direction: row;
padding: 0;
margin: 0;
}
.tags .tag {
display: block;
padding: 5px;
border: solid 1px darkgray;
flex-grow: 1;
text-align: center;
}
.tags li {
display: block;
}
.tags .subtags {
padding: 0;
font-size: 80%;
display: flex;
flex-direction: row;
flex-wrap: wrap;
}
.tags .subtags .tag {
padding: 2px;
}
.tags .tag:hover {
cursor: pointer;
background: darkgray;
}
.tags .tag.selected {
background: #3FB8AF;
}
.tags .tag.annotation-rm {
/* display: none; */
overflow: hidden;
color: red;
font-size: 30px;
width: 0;
flex-grow: 0;
padding: 5px 0;
transition: width .3s;
pointer-events: none;
border: none;
direction: rtl;
/* hide behind bar, instead into nothing */
}
.selected-annotation .tags .tag.annotation-rm {
color: red;
display: block;
width: 30px;
pointer-events: all;
}
.controls .annotation-comment {
width: 100%;
visibility: hidden;
}
.selected-annotation .controls .annotation-comment {
visibility: visible;
}
.noUi-handle:focus {
/* background: red;; */
border: solid 2px #601be0;
}
/* .noUi-handle:focus::before, .noUi-handle:focus::after{
background: #601be0;
} */
.tags .tag span {
display: inline-block;
width: 20px;
height: 20px;
margin-right: 10px;
vertical-align: middle;
border-radius: 5px;
}
.tags .subtags .tag span {
width: 10px;
height: 10px;
margin-right: 2px;
}
.annotations {
height: 30px;
/* border: solid 1px darkgray; */
position: relative;
}
.annotations>div {
opacity: .4;
background: lightseagreen;
position: absolute;
bottom: 0;
top: 0;
}
.annotations>div:hover,
.annotations>div.selected {
opacity: 1;
cursor: pointer;
}
.unsaved::before {
content: '*';
color: red;
display: inline-block;
text-align: center;
font-size: 30px;
position: absolute;
top: 10px;
left: 10px;
}
.saved::before {
content: '\2713';
display: inline-block;
color: green;
text-align: center;
font-size: 30px;
position: absolute;
top: 10px;
left: 10px;
}
.noUi-horizontal .noUi-touch-area {
cursor: ew-resize;
}
#interface .noUi-horizontal .noUi-tooltip {
/* tooltips go below the buttons */
bottom: auto;
top: 110%;
}
.audioconfig {
z-index: 9;
background: black;
color: white;
position: relative;
width: 100px;
/* as wide as audio controls only */
overflow: hidden;
white-space: nowrap;
left: -50px;
}
.audioconfig:hover {
width: auto;
left: 0px;
}
.audioconfig select,
.audioconfig input {
margin: 10px;
}
audio {
vertical-align: middle;
width: 100px;
/* hides seek head */
}
.svganim_annotator {
display: flex;
flex-direction: column;
height: 100%;
}
.svganim_annotator svg {
top: 20px;
background: white;
margin-left: 20px;
flex-shrink: 1;
flex-grow: 1;
position: static;
}
.svganim_annotator .audioconfig {
order: -1;
flex-grow: 0;
flex-shrink: 0;
position: static;
}
.svganim_annotator .controls {
margin-left: 5%;
position: static !important;
}