= ffmpeg
Siehe auch [[transcoding]] für Realtime-Stream-Transcoding-Kommandos
== repo
stretch
echo "deb http://pkg.c3voc.de/ stretch main" > /etc/apt/sources.list.d/voc.list
curl https://pkg.c3voc.de/repo.key | apt-key add -
apt-get update
apt-get install ffmpeg
== compiling
apt-get build-dep ffmpeg yasm libmp3lame-dev libx264-dev libvpx-dev libvorbis-dev libopus-dev
=== für Debian
debian multimedia einbinden (falls noch nicht passiert)
deb http://www.deb-multimedia.org wheezy main non-free
apt-get update
apt-get install deb-multimedia-keyring
zusätzliche libs einbinden
apt-get install libass-dev libfaac-dev libfdk-aac-dev libopus-dev
Auschecken
git clone git://source.ffmpeg.org/ffmpeg.git ffmpeg
Configuren
./configure --extra-libs="-ldl" --enable-gpl --enable-libass --enable-libfdk-aac \
--enable-libmp3lame --enable-libopus --enable-libtheora --enable-libvorbis \
--enable-libvpx --enable-libx264 --enable-nonfree --enable-libfaac
Bauen / Installieren
make
make install
== HD-Master mit Loudnorm
ffmpeg -y \
-i /video/intros/XXX/intro.ts -analyzeduration 20000000 \
-ss 1858.48 -t 3550.88 -i /video/fuse//XXX/YYY/ZZZ/uncut.ts -analyzeduration 20000000 \
-i /video/intros/XXX/outro.ts \
-filter_complex '
[0:a:0] loudnorm=i=-16:print_format=summary [norm-0-0];
[1:a:0] loudnorm=i=-16:print_format=summary [norm-1-0];
[2:a:0] loudnorm=i=-16:print_format=summary [norm-2-0];
[0:v:0] [norm-0-0] [1:v:0] [norm-1-0] [2:v:0] [norm-2-0] concat=n=3:v=1:a=1 [vd] [audio0_mix];
[vd] yadif=mode=0:parity=0, hqdn3d [v]
' -analyzeduration 20000000/
-map '[v]' -c:v:0 libx264 -pix_fmt yuv420p -crf:0 23 -profile:0 high -level:0 4.1
-map '[audio0_mix]' -c:a:0 aac -b:a:0 192k -ar:a:0 48000 \
-aspect 16:9 -movflags faststart -f mp4 /video/tmp/XXX/ZZZ-hd.mp4
=== HD-Master mit Loudnorm, Pause rausschneiden
- Zeitstempel = *Sekunden.Frames*
- -t gibt Duration an, nicht Ende!
ffmpeg -y \
-i /video/intros/XXX/intro.ts -analyzeduration 20000000 \
-ss 1858.48 -t 3550.88 -i /video/fuse//XXX/YYY/ZZZ/uncut.ts -analyzeduration 20000000 \
-ss 1858.48 -t 3550.88 -i /video/fuse//XXX/YYY/ZZZ/uncut.ts -analyzeduration 20000000 \
-i /video/intros/XXX/outro.ts \
-filter_complex '
[0:a:0] loudnorm=i=-16:print_format=summary [norm-0-0];
[1:a:0] loudnorm=i=-16:print_format=summary [norm-1-0];
[2:a:0] loudnorm=i=-16:print_format=summary [norm-2-0];
[3:a:0] loudnorm=i=-16:print_format=summary [norm-3-0];
[0:v:0] [norm-0-0] [1:v:0] [norm-1-0] [2:v:0] [norm-2-0] [3:v:0] [norm-3-0] concat=n=4:v=1:a=1 [vd] [audio0_mix];
[vd] yadif=mode=0:parity=0, hqdn3d [v]
' -analyzeduration 20000000/
-map '[v]' -c:v:0 libx264 -pix_fmt yuv420p -crf:0 23 -profile:0 high -level:0 4.1
-map '[audio0_mix]' -c:a:0 aac -b:a:0 192k -ar:a:0 48000 \
-aspect 16:9 -movflags faststart -f mp4 /video/tmp/XXX/ZZZ-hd.mp4
== encoding snippets
=== H264/AAC/Stereo
ffmpeg -v warning -i -vf \
-c:v:0 libx264 -pix_fmt yuv420p -bufsize:0 8192k -maxrate:0 2000k -crf:v 23 \
-profile:v high -level:v 4.1 \
-map 0:1 \
-c:a:0 libfdk_aac -b:a:0 128k -ac:a:0 2 -ar:a:0 48000 \
-metadata:s:a:0 language=eng \
-aspect 16:9 -f mp4 \
-metadata title="" \
-metadata album="" \
-metadata copyright="This work is licensed under a Creative Commons Attribution 4.0 International License. https://creativecommons.org/licenses/by/4.0/" \
**Videofilter:** ''hqdn3d'' for denoising, ''yadif'' for deinterlacing. If both, combine them with a comma.
=== H264/AAC HQ
ffmpeg -v warning -i -vf \
-c:v libx264 -pix_fmt yuv420p -crf:v 14 -profile:v high -level:v 4.2 \
-c:a libfdk_aac -b:a 192k -aspect 16:9 -metadata title="" \
-metadata album="" \
-metadata copyright="This work is licensed under a Creative Commons Attribution 4.0 International License. https://creativecommons.org/licenses/by/4.0/" \
-f mp4 -movflags faststart
**Videofilter:** ''hqdn3d'' for denoising, ''yadif'' for deinterlacing. If both, combine them with a comma.
=== WebM
#!/bin/bash
target=$(basename ${1%.*}).webm
passlog=/tmp/${target%.*}-webm-1st-pass
if [ ! -e $passlog-0.log ]; then
ffmpeg -threads 8 -analyzeduration 40000000 -probesize 100000000 -i $1 \
-c:v libvpx -g:0 120 -b:v 1200k -qmin:0 11 -qmax:0 51 \
-minrate:0 100k -maxrate:0 5000k \
-pass 1 -passlogfile $passlog \
-c:a libvorbis -b:a 96k -ac:a 2 -ar:a 48000 -metadata:s:a language=de \
-aspect 16:9 -f webm $target
fi
ffmpeg -y -threads 8 -analyzeduration 40000000 -probesize 100000000 -i $1 \
-c:v libvpx -pass 2 -passlogfile $passlog \
-g 120 -b:v 1200k -qmin 11 -qmax 51 -minrate 100k -maxrate 5000k \
-c:a libvorbis -b:a 96k -ac:a 2 -ar:a 48000 \
-aspect 16:9 -f webm $target
=== FullHD
Die Bitraten sind für Konferenz-Content (wenig Bewegung) in SD 16:9 (1024x576) getestet. Für HD-Content (1920x1080) haben wir beide Bitraten auf -maxrate:0 5000k gesetzt.
=== WebCut LoRes MP4
Tuningmöglichkeiten:
* libx264 statt h264
* -preset:v fast
* -b:v ist die Videobitrate, kann bei Bedarf erhöht werden
ffmpeg -analyzeduration 20000000 -i uncut.XXX \
-vf scale=160:-1 -pix_fmt yuv420p \
-c:v h264 -c:a mp3 -b:a 32k -b:v 70k \
-aspect 16:9 -f mp4 webcut.mp4
Alternative Möglichkeit mit weniger Bild, gibt Files mit ca. 20 MB/h, encodet ca. 5 Minuten auf schneller Hardware:
ffmpeg -i /video/fuse/...../101/uncut.ts -vf fps=1 -s 480x270 \
-pix_fmt yuv420p -map 0 -c:v libx264 -preset ultrafast -bf:0 0 -crf 29 \
-c:a mp3 -b:a 32k -ar:a 22050 -f mp4 -movflags faststart \
/video/tmp/..../cut-101-proxy.mp4
== one input, multiple format outputs
1. h264/aac flv rtmp output
2. theora/vorbis ogg output
ffmpeg -re -i 29c3-5266-de-en-proximax_telex_flashproxy_h264.mp4 -threads 0 -pix_fmt yuv420p -profile:v baseline -preset fast -tune zerolatency -c:v libx264 -strict -2 -c:a aac \
-f flv rtmp://localhost:1935/stream/saal1 \
-map 0 -c:v libtheora -c:a libvorbis -f ogg - | oggfwd localhost 8000 …
== Translated Transcoding
ffmpeg -y -i rtmp://live.lan.c3voc.de/stream/mastermaster -aspect 16:9 -threads:0 0 -c:v libx264 -filter_complex '[0:v] yadif, scale=1920:1080 [v]; [0:a:0] pan=stereo|c0=FL [native]' -map '[native]' -map '[v]' -maxrate:v:0 2800k -bufsize:v:0 8192k -crf:0 18 -pix_fmt:0 yuv420p -profile:v:0 main -g:v:0 25 -preset:v:0 veryfast -ac 1 -y -f flv rtmp://live.lan.c3voc.de/push/master_translated_hd
== Audiospur via Backup Audioaufnahme reparieren und in MP4 packen
Sprache deu-backup im Tracker anlegen.
Nochmal neu rausrendern lassen.
einzelne Audiopuren vor oder nach Auphnic so umwandeln das Audio-Schnitt Tool wie z.B. Reaper was damit anfangen kann:
ffmpeg -i 5100-hd-audio2-auphonic.m4a -vn -c:a copy -bsf:a aac_adtstoasc 5100-hd-audio2-auphonic-echt.m4a
ffmpeg -i 5100-hd-audio2-auphonic.m4a -vn -c:a copy -bsf:a aac_adtstoasc 5100-hd-audio2-auphonic-echt.m4a
Fertige Audiospur in Reaper rausrechnen, z.B. als Flac
Audiospur wieder in Vortrag MP4 einpacken:
ffmpeg -threads 0 -i /video/encoded/fossgis16/5100-hd-primary-audio-only.mp4 -i 5100-audio-fixed.flac -map 0:0 -map 1:0 -map_metadata 0 -c:v copy -c:a:0 libfdk_aac -vbr 4 -ar:a:0 48000 -metadata:s:a:0 language=deu 5100-hd-fixed2.mp4
Alte Version
ffmpeg -threads 0 -i 5100-hd.mp4 -i 5100-audio-fixed.flac -map 0:0 -map 1:0 -map_metadata 1 5100-hd-fixed.mp4
Anmerkung: Geht auch mit MTS Datei (Transportstream mit H264 Video) der so aus der Consumer-Kamera auf die SD Karte fällt.
Siehe auch: https://trac.ffmpeg.org/wiki/AudioChannelManipulation
== Audiospur(en) und Videospur vertauscht
ffmpeg -i 5115-hd-broken.mp4 -map 0:v -map 0:a -c copy -map_metadata 0 5115-hd.mp4
== audio channel mapping / repair
https://trac.ffmpeg.org/wiki/AudioChannelManipulation
=== mono mixdown
ffmpeg -i 27-hd-orig.mp4 -codec:v copy -map_metadata 0 -ac 1 -ab 128k 27-hd.mp4
=== silent fix
ffmpeg -ar 48000 -acodec pcm_s16le -f s16le -ac 2 -i <( dd if=/dev/zero ) -y -i not_silent/$file -t 10.0 -c:v copy $file;
Oder "schöner" und ohne knacken:
ffmpeg -i not_silent/$file -filter_complex "aevalsrc=c=2c:s=48000:0|0 [a]" -map 0:v -c:v copy -map "[a]" -c:a $audiocodec -shortest $outputfile
$audiocodec z.B. mp2 bei Intros
== metadata fix
=== map metadata and fix specific entry
ffmpeg -i br0ken_licence/$file \
-map_metadata 0 \
-metadata:s:a:0 LICENSE="This work is licensed under a Creative Commons Attribution 4.0 International License. https://creativecommons.org/licenses/by/4.0/" \
-c:a copy $file
=== copy metadata from another file
ffmpeg -i $metadata_source -i $video_source -map 1 -c copy -map_metadata 0 $output_file
=== Pause rausschneiden
Pause von 3840s = 1h04m00s
bis 4691s = 1h18m11s
ffmpeg -i 50-hd-camonly.mp4 -filter_complex " \
[0:v]trim=duration=3840[v1]; \
[0:v]trim=start=4691,setpts=PTS-STARTPTS[v2]; \
[v1][v2]concat=v=1:a=0[v]; \
[0:a]atrim=duration=3840[a1]; \
[0:a]atrim=start=4691,asetpts=PTS-STARTPTS[a2]; \
[a1][a2]concat=v=0:a=1[a] \
" -map '[v]' -c:v:0 libx264 -pix_fmt yuv420p -bufsize:0 8192k -crf:0 20 -minrate:0 100k -maxrate:0 5000k -profile:0 main -level:0 4.0 -threads 8 -map '[a]' -c:a:0 libfdk_aac -b:a:0 192k -ac:a:0 1 -ar:a:0 48000 -aspect 16:9 -f mp4 50-hd-camonly-cut.mp4
=== Icecast-Audio mit ffmpeg
ffmpeg -re -i … -map 0:a -c:a mp3 -f mp3 \
-ice_public 1 -content_type audio/mpeg \
-ice_description "Livestream aus der Pommesbude" \
-ice_url "http://www.pommesbude.de/" \
-ice_name "Pommesbude" \
-ice_genre "Geräusch" \
icecast://source:…@live.ber.c3voc.de:8000/foo.mp3
=== Geschnittenes Camera MTS mit Backup Ton zu Final MP4
ffmpeg -analyzeduration 20000000 -i video-cut.mov \
-filter_complex '
[0:a] pan=1c|c0=c0, loudnorm=i=-16:print_format=summary [audio0_mix] ;
[0:a] pan=1c|c0=c1, loudnorm=i=-16:print_format=summary [audio1_mix] ;
[0:v:0] yadif=mode=0:parity=0, hqdn3d [v]' \
-analyzeduration 20000000 \
-map '[v]' -c:v:0 libx264 -pix_fmt yuv420p -crf:0 23 -profile:0 high -level:0 4.1 \
-map '[audio0_mix]' -c:a:0 aac -b:a:0 128k -ar:a:0 48000 \
-map '[audio1_mix]' -c:a:1 aac -b:a:1 128k -ar:a:1 48000 \
-aspect 16:9 -movflags faststart \
-f mp4 -metadata title= -metadata:s:a:0 language=deu -metadata:s:a:1 language=backup encoded/video.mp4
== Poster Image + Audio zu Video
https://trac.ffmpeg.org/wiki/Create%20a%20video%20slideshow%20from%20images#Addingaudio
ffmpeg -loop 1 -i 132.png -i 132.flac \
-c:v libx264 -tune stillimage \
-c:a libfdk_aac -b:a 192k \
-pix_fmt yuv420p -shortest -map 0:v -map 1:a \
-t 5 132-Foobar.mp4
ffmpeg -loop 1 -i 132.png \
-filter_complex "aevalsrc=c=2:s=48000:0|0 [a]" \
-map 0:v -c:v libx264 -tune stillimage \
-map "[a]" -c:a libfdk_aac -b:a 192k \
-t 5 -pix_fmt yuv420p 132.mp4
for i in $(ls *.png); do ffmpeg -i $i -ar 48000 -ac 1 -f s16le -i /dev/zero -ar 48000 -ac 1 -f s16le -i /dev/zero -map 0:v -c:v mpeg2video -q:v 0 -aspect 16:9 -t 5 -map 1:0 -map 2:0 -shortest -f mpegts ${i%.png}.ts; done
Loop ein PNG ohne Audio:
ffmpeg -t 60 -loop 1 -i 0-pause.png -c:v libx264 -tune stillimage -pix_fmt yuv420p -shortest -map 0:v -f mpegts pause.ts
== Streamen von einer USB Webcam (720p no audio)
ffmpeg2theora /dev/video1 -f video4linux2 -x 1280 -y 720 -F 30 -v 10 --pp fd --noaudio -o - | oggfwd /
== HLS streaming
ffmpeg -i - -filter:v yadif -c:v dvvideo -c:a copy -f dv - | \
tee \
>(ffmpeg -i - -c:v libx264 -threads 2 -tune zerolatency -c:a aac -strict -2 -f hls -hls_time 30.0 /usr/share/nginx/html/hls/${NAME}.m3u8 ) \
| ffmpeg2theora --title "GPN13 - ${FULLNAME}" - --speedlevel 2 -c 1 -v 3 -f dv -o - | oggfwd 127.0.0.1 8000 ppR973IRgg /${NAME}
modifikation zur default-nginx-config
set $no_cache "";
if ($request_uri ~* \.pls$) {
set $no_cache "1";
}
proxy_no_cache $no_cache;
proxy_cache_bypass $no_cache;
== Facebook Live-Streaming
Facebook will zwingend 720p, 30 fps und eine i-Frame-Intervall von 2 Sekunden bei max. 4000k Bitrate. Audio darf nur 44100KHz sein (Stereo ist aber erlaubt).
Workflow: Der Page-Administrator muss euch eine URL und einen Streamkey zuwerfen. Er muss den Stream zuvor als "Contiguous Stream" Stream marktiert haben. Wir brauchen die nicht-SSL-Variante, da FFMPEG sich aktuell an rmtps verschluckt. Ihr startet den Stream wie im Beispiel unten. Nach ein paar Sekunden sieht der Page-Administrator den Stream und kann Live gehen.
ffmpeg -re -i $1 -framerate 25 -r 30 -ar 44100 -s 1280:720 -c:a aac -b:a 90k -movflags +faststart -preset medium -crf 17 -tune zerolatency -profile:v baseline -maxrate 2000k -vcodec libx264 -bufsize 200000k -g 60 -max_muxing_queue_size 1024 -f flv "rtmps://live-api-s.facebook.com:443/rtmp/$2"
### DEPRECATED ###
(hat letztens nicht mehr funktioniert)
ffmpeg -threads 8 -re -i QUELLE -r 30 -vf scale=-1:720 -c:v libx264 -x264-params keyint=60 -crf 23 -preset slow -b:v 3000k -minrate 2000k -maxrate 4000k -bufsize 500k -c:a aac -ar 44100 -ac 1 -f flv "rtmp://live-api.facebook.com/rtmp/STREAMKEY"
== Periscope Live-Streaming
Periscope Producer will [[https://help.pscp.tv/customer/en/portal/articles/2600293-what-is-periscope-producer|laut Anleitung]] zwingend 720p (stellt sich raus: sollen sogar nur 540p sein), 30 fps und eine i-Frame-Intervall von 3 Sekunden (2 sind besser und gehen auch) bei max. 4000k Bitrate. Audio darf nur 44100KHz sein (Stereo ist aber erlaubt).
ffmpeg -threads 8 -re -i QUELLE -r 30 -vf scale=-1:540 -c:v libx264 -x264-params keyint=90 -crf 23 -preset slow -b:v 2500k -minrate 2000k -maxrate 4000k -bufsize 500k -c:a aac -ar 44100 -ac 1 -f flv "rtmp://de.pscp.tv:80/x/"
== ffmpeg h264 encoding
ffmpeg -y -v warning -nostdin -threads 4 -analyzeduration 20000000 \
-i /video/31c3/encoded//10-h264-hd.mp4 -filter_complex "[0:0] hqdn3d,drawbox=0:0:720:1:black [vd]" \
-map "[vd]" -c:v:0 libx264 -bufsize:0 8192k -minrate:0 100k -maxrate:0 2000k -crf:0 18 -profile:0 main \
-map 0:1 -c:a:0 libfaac -b:a:0 128k -ac:a:0 2 -ar:a:0 48000 -metadata:s:a:0 language=deu \
-map 0:2 -c:a:1 libfaac -b:a:1 128k -ac:a:1 2 -ar:a:1 48000 -metadata:s:a:1 language=eng -aspect 16:9 -f mp4 \
-metadata artist=VOC-Personal -metadata author=VOC-Personal -metadata title=HD-Multilang-Test \
-metadata album="Playground-Projekt im Tracker" -metadata genre=testrecording \
-metadata description="Ein toller HD-Recording-Test mit zweisprachigem Audio und so." \
-metadata copyright="Licensed to the public under the do-whatever-you-like license (testing purposes only)" \
/video/31c3/tmp//10-h264-sd.mp4
== Timestamp Stream overlay
localtime ffmpeg newest standard
-vf drawtext="fontfile=arial.ttf:fontsize=14:fontcolor=white:box=1:boxcolor=black@0.9:x=08:y=466:text=’%%{localtime\: %%m/%%d/%%Y %%I.%%M.%%S %%p}'"
strftime ffmpeg deprecated standard
-vf drawtext="expansion=strftime:fontfile=arial.ttf:fontsize=14:fontcolor=white:box=1:boxcolor=black@0.9:x=08:y=466:text=’%%m\/%%d\/%%Y %%I\:%%M\:%%S \%%p'"
== Konvertiere externe Video-Dateien zu mpeg-Segmente die der Tracker versteht
In unserem [[https://github.com/voc/scripts/|scripts]]-Repositorie gibt es ein Skript [[https://github.com/voc/scripts/blob/master/camera_recordings_to_segments/cam2seg.sh|cam2seg.sh]] welches alle Dateien in einem Ordner in Segmente konvertiert, welche der Tracker verstehen kann. Das Skript bitte einmal ohne Argmuente aufrufen für weitere Instruktionen: ''sh cam2seg.sh''.
== relaying a HLS stream to our cdn ( and fix most of the brokyness it comes with from "professional" CDNs )
ffmpeg -headers 'User-Agent: bar'$'\r\n' -i http://yourBrokenCDN.com/stupidfilename.m3u8 -c:v copy -c:a aac -ar 44100 -ab 128k -ac 2 -strict -2 -bsf:a aac_adtstoasc -f flv rtmp://localhost:1935/test/q1
Falls youtube und ähnliche Quellen verwendet werden sollen, deren m3u8 URLs nicht direkt auffindbar sind kann man sich [[https://streamlink.github.io/index.html|Streamlink]] zur Hilfe nehmen. Bei youtube sieht das dann in etwa so aus:
streamlink -l debug https://www.youtube.com/watch?v=DeQkjJ9JJTI 720p --player="ffmpeg -c copy -bsf:a aac_adtstoasc -f flv rtmp://127.0.0.1:1935/live/irgendwas -i"
== HLS Streamdump als Backup, gleich in Schnipseln
(Natürlich ist das dann H.264 in TS statt MPEG2 in TS, aber das sollte trotzdem alles funktionieren)
for ((;;)) ; do ffmpeg -f applehttp -i http://cdn.c3voc.de/hls/s2_native_hd.m3u8 -c copy -f segment -segment_format mpegts -segment_time 180 -strftime 1 emf16-saalb-%Y-%m-%d_%H-%M-%S-$$.ts ; sleep 2; done
== Blur eines Bildausschnitts (Rechteckiges Fenster der Größe $XSIZE und $YSIZE, mit Offset $XOFFSET und $YOFFSET, alles in Pixeln)
ffmpeg -i INPUT.MOV -filter_complex "[0:v]crop=$XSIZE:$YSIZE:$XOFFSET:$YOFFSET,boxblur=3[fg];[0:v][fg]overlay=$XOFFSET:$YOFFSET[v]" -map "[v]" -map 0:a -c:v libx264 -c:a copy -movflags +faststart BLURRED_OUTPUT.MOV
== Schwarze Box / Black Box über einen Bildausschnitt legen
50 = Box-Breite, 40 = Box-Höhe, 5 = Box-X-Position, 4 = Box-Y-Position
ffmpeg -i INPUT.MOV -vf "color=black:50x40 [over]; [in][over] overlay=5:4 [out]" OUTPUT.MOV
== Filter konkatenieren, hier: 2 Schwarze Boxen / Black Boxes über einen Bildausschnitt legen
Box 1 (z.B. Laufschrift-Balken)
640 = Box1-Breite, 20 = Box1-Höhe, 0 = Box1-X-Position, 330 = Box1-Y-Position
Box2 (z.B. Senderlogo rechts unten)
150 = Box2-Breite, 60 = Box2-Höhe, 485 = Box2-X-Position, 272 = Box2-Y2-Position
ffmpeg -i INPUT.MOV -vf "color=black:640x20 [over]; [in][over] overlay=0:330 [out1], color=black:150x60 [over]; [out1][over] overlay=485:272 [out2]" OUTPUT.MOV
== RGB/YUV Blödsinn
Das ist alles sehr verwirrend, einfach beides probieren und das nehmen, was richtig aussieht.
RGB als YUV interpretieren:
-filter:v 'format=rgb24,mergeplanes=0x000102:yuv444p'
YUV als RGB interpretieren:
-filter:v 'format=yuv444p,mergeplanes=0x000102:gbrp'
== AV sync test using ffmpeg / ffplay
This might be useful to find A/V drifting behavior in the production chain. Simply use this as the generator. For use as a source for our streaming relay:
ffmpeg -re -f lavfi -i 'testsrc=s=1920x1080:n=3[out0];sine=beep_factor=1:f=1000[out1]' -c:v libx264 -c:a aac -f flv rtmp://[endpoint]
For use with voctomix:
ffmpeg -re -f lavfi -i 'testsrc=s=1920x1080:n=3[out0];sine=beep_factor=1:f=1000[out1]' \
-pix_fmt yuv420p -c:v rawvideo -c:a pcm_s16le -f matroska tcp://localhost:10000
To check how this looks locally without any chain involved using ffplay (good for tweaking/reference checking):
ffplay -f lavfi -i 'testsrc=s=1920x1080:n=3[out0];sine=beep_factor=1:f=1000[out1]'
== ffmpeg only multiview
#/bin/bash
o='rtmp://127.0.0.1/stream/loudness'
for i in s{1,2,3,4,5,6,41,42}
do
if [[ ! -e /tmp/fifo${i} ]];
then
mkfifo /tmp/fifo${i};
fi
ffmpeg -hide_banner -v warning -y \
-i "http://cdn.c3voc.de/${i}_native_sd.webm" \
-filter_complex "nullsrc=size=640x840 [base];
[0:v] scale=640:360, fps=15 [scaled];
[0:a] ebur128=video=1:meter=18:size=640x480 [ebur][a1];
[ebur] fps=15 [v1];
[a1] aformat=sample_fmts=fltp:sample_rates=44100:channel_layouts=stereo [audio];
[base][v1] overlay=shortest=1 [tmp1];
[tmp1][scaled] overlay=shortest=1:y=480 [ov];
[ov] drawtext=fontcolor=white:x=200:y=50:fontsize=128:fontfile=/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf:text=${i} [out]" \
-map "[out]" -c:v rawvideo -pix_fmt yuv420p \
-map "[audio]" -c:a pcm_s16le \
-f matroska "/tmp/fifo${i}" &
#echo $! > /tmp/fifo${i}.pid
done
ffmpeg -hide_banner -v warning -y -fflags +genpts -flags +global_header \
-thread_queue_size 1024 -i /tmp/fifos1 \
-thread_queue_size 1024 -i /tmp/fifos2 \
-thread_queue_size 1024 -i /tmp/fifos3 \
-thread_queue_size 1024 -i /tmp/fifos4 \
-thread_queue_size 1024 -i /tmp/fifos5 \
-thread_queue_size 1024 -i /tmp/fifos6 \
-thread_queue_size 1024 -i /tmp/fifos41 \
-thread_queue_size 1024 -i /tmp/fifos42 \
-filter_complex "
[0:a][1:a][2:a][3:a][4:a][5:a][6:a][7:a] amix=inputs=8 [audio];
nullsrc=size=2560x1680 [base];
[base][0:v] overlay=shortest=1 [tmp1];
[tmp1][1:v] overlay=shortest=1:x=640 [tmp2];
[tmp2][2:v] overlay=shortest=1:x=1280 [tmp3];
[tmp3][3:v] overlay=shortest=1:x=1920 [tmp4];
[tmp4][4:v] overlay=shortest=1:x=0:y=840 [tmp5];
[tmp5][5:v] overlay=shortest=1:x=640:y=840 [tmp6];
[tmp6][6:v] overlay=shortest=1:x=1280:y=840 [tmp7];
[tmp7][7:v] overlay=shortest=1:x=1920:y=840, fps=15 [out]" \
-map "[out]" -s 1280x800 \
-c:v libx264 \
-maxrate:v:0 2000k -bufsize:v:0 8192k -crf:0 21 \
-pix_fmt:0 yuv420p -profile:v:0 main -g:v:0 15 \
-preset:v:0 veryfast \
-map "[audio]" \
-c:a aac -b:a 96k -ar 44100 \
-y -f flv ${o}
killall ffmpeg
exit
== DASH encoding
=== VOD encoding
um static files in mehreren bitraten/aufloesungen nach DASH zu konvertieren.
ffmpeg -hide_banner -i thms-11-deu-Global_Warming_Sucks_hd.mp4 \
-fflags +genpts \
-filter_complex '
[0:v:0] scale=720:trunc(ow/a/2)*2, drawtext=fontfile=/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf:fontsize=34:fontcolor=white:x=08:y=08:box=1:boxcolor=black@0.5:text\=MID_150k_%{frame_num} [mid];
[0:v:0] scale=320:trunc(ow/a/2)*2, drawtext=fontfile=/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf:fontsize=14:fontcolor=white:x=08:y=08:box=1:boxcolor=black@0.5:text\=LOW_700k_%{frame_num} [low];
[0:v:0] scale=1280:720, drawtext=fontfile=/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf:fontsize=14:fontcolor=white:x=08:y=08:box=1:boxcolor=black@0.5:text\=720p_1500k_%{frame_num} [720p];
[0:v:0] drawtext=fontfile=/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf:fontsize=14:fontcolor=white:x=08:y=08:box=1:boxcolor=black@0.5:text\=1080p_4000k_%{frame_num} [hd]
' \
-map '[hd]' \
-metadata:s:v:0 title="1080p" \
-c:v:0 libx264 \
-preset:v veryfast \
-profile:v main \
-pix_fmt yuv420p \
-flags +cgop \
-threads:v 0 \
-aspect 16:9 \
-r:v:0 25 \
-g:v:0 75 \
-crf:v:0 21 \
-maxrate:v:0 4M \
-minrate:v:0 2M \
-bufsize:v:0 18M \
-map '[mid]' \
-metadata:s:v:1 title="MID" \
-c:v:1 libx264 \
-maxrate:v:1 700k \
-minrate:v:1 500k \
-crf:v:1 18 \
-bufsize:v:1 3600k \
-pix_fmt:v:1 yuv420p \
-profile:v:1 main \
-g:v:1 75 \
-flags:v:1 +cgop \
-preset:v:1 veryfast \
-r:v:1 25 \
-map '[low]' \
-metadata:s:v:2 title="LOW" \
-c:v:2 libx264 \
-maxrate:v:2 150k \
-minrate:v:2 100k \
-crf:v:2 18 \
-bufsize:v:2 3600k \
-pix_fmt:v:2 yuv420p \
-profile:v:2 main \
-g:v:2 75 \
-flags:v:2 +cgop \
-preset:v:2 veryfast \
-r:v:2 25 \
-map '[720p]' \
-metadata:s:v:3 title="720p" \
-c:v:3 libx264 \
-preset:v:3 veryfast \
-profile:v:3 main \
-pix_fmt:v:3 yuv420p \
-flags:v:3 +cgop \
-aspect:v:3 16:9 \
-r:v:3 25 \
-g:v:3 75 \
-crf:v:3 21 \
-maxrate:v:3 1500k \
-minrate:v:3 1000k \
-bufsize:v:3 18M \
-map '0:a' \
-metadata:s:a:0 language="Native" \
-c:a aac -b:a 96k -ar 48000 -ac 1 \
-adaptation_sets "id=0,streams=v id=1,streams=a" \
-seg_duration 30 -streaming 1 \
-hls_playlist true -use_template 1 -use_timeline 1 -f dash dash/klima1/stream.mpd
=== screen capture and stream ===
ffmpeg -vaapi_device /dev/dri/renderD128 -f x11grab -video_size 1920x1080 -framerate 25 -i :0+100,200 \
-f pulse -ac 2 -i default \
-c:a aac -b:a 128k -ar:a 48000 \
-vf 'hwupload,scale_vaapi=format=nv12' -c:v h264_vaapi -qp 24 \
-f flv rtmp://ingest.c3voc.de/xxx/xtest
ffmpeg -vaapi_device /dev/dri/renderD128 -f x11grab -video_size 1920x1080 -framerate 25 -i :0+100,200 \
-f pulse -ac 2 -i alsa_output.pci-0000_00_1f.3.analog-stereo.monitor \
-c:a aac -b:a 128k -ar:a 48000 \
-vf 'hwupload,scale_vaapi=format=nv12' -c:v h264_vaapi -qp 24 \
-f flv rtmp://ingest.c3voc.de/xxx/xtest
ffmpeg -vaapi_device /dev/dri/renderD128 -f x11grab -video_size 2560x1440 -framerate 25 -i :0 \
-f pulse -ac 2 -i alsa_output.pci-0000_00_1f.3.analog-stereo.monitor \
-c:a aac -b:a 128k -ar:a 48000 \
-vf 'hwupload,scale_vaapi=w=1920:h=1080:format=nv12' -c:v h264_vaapi -qp 24 \
-f flv rtmp://ingest.c3voc.de/xxx/xtest
ffmpeg -device /dev/dri/card0 -f kmsgrab -i - \
-vf 'hwmap=derive_device=vaapi,scale_vaapi=w=1920:h=1080:format=nv12' -c:v h264_vaapi -qp 24 \
-f flv rtmp://ingest.c3voc.de/xxx/xtest