Creating a blank video
ffmpeg -f lavfi -i color=c=black:s=1366x768:r=25/1 -f lavfi -i anullsrc=cl=mono:r=48000 -c:v h264 -c:a pcm_s16be -t 3 out.mov -y
-c:v copy video
-c:a copy audio
for mp4 remove '-c:a pcm_s16be'
Because of Error Could not find tag for codec pcm_s16be
ffmpeg -f lavfi -i color=c=black:s=1366x768:r=25/1 -f lavfi -i anullsrc=cl=mono:r=48000 -c:v h264 -t 3 out.mp4 -y
-f lavfi means format in libavformat
otherwise error:
[mp3 @ 00000241ac434180] Invalid audio stream. Exactly one MP3 audio stream is required.
Could not write header for output file #0 (incorrect codec parameters ?): Invalid argument
Error initializing output stream 0:0 --
overlap small image on center of blank video
@echo off
rem ffmpeg -i out.mp3 -i allportPersonality.jpeg -filter_complex "overlay=20:20" output.mp4
rem *Error:* Cannot find a matching stream for unlabeled input pad 1 on filter Parsed_overlay_0
rem reason first file was mp3
ffmpeg -i out.mp4 -i allportPersonality.jpeg -filter_complex "overlay=20:20" output.mp4 -y
rem map in center (main_w-overlay_w)/2:(main_h-overlay_h)/2
ffmpeg -i out.mp4 -i allportPersonality.jpeg -filter_complex "overlay=(main_w-overlay_w)/2:(main_h-overlay_h)/2" output.mp4 -y
Overlap big image into small Canvas
rem traitTheory.jpeg canvas size 1366x768, image size 841x1024, so scale to fit in BG
soure
rem Here is a generic filter expression for scaling (maintaining aspect ratio) and padding any source size to any target size:
rem scale=min(iw*TARGET_HEIGHT/ih\,TARGET_WIDTH):min(TARGET_HEIGHT\,ih*TARGET_WIDTH/iw),pad=TARGET_WIDTH:TARGET_HEIGHT:(TARGET_WIDTH-iw)/2:(TARGET_HEIGHT-ih)/2
rem scale=iw*min(1366/iw\,768/ih):ih*min(1366/iw\,768/ih)
ffmpeg -i out.mp4 -i traitTheory.jpeg -filter_complex "overlay=0:0" output.mp4 -y
rem overlay in center
ffmpeg -i out.mp4 -i traitTheory.jpeg -filter_complex "[1:v]scale=iw*min(1366/iw\,768/ih):ih*min(1366/iw\,768/ih)[ovrl],[0:v][ovrl]overlay=(main_w-overlay_w)/2:(main_h-overlay_h)/2" output2.mp4 -y
Create blank image and overlay in one go
ffmpeg -f lavfi -i color=size=1366x768:duration=3:rate=25:color=blue -i allportPersonality.jpeg -filter_complex ^
"[0:v][1:v]overlay=(main_w-overlay_w)/2:(main_h-overlay_h)/2" output.mp4 -y
Concat 2 files
ffmpeg -y -i "output.mp4" -i "output2.mp4" -filter_complex "[0:v][1:v]concat=n=2:v=1 [outv]" -map "[outv]" -y concat.mp4
Create blank image and overlay for 2 images and concat in one go
ffmpeg -f lavfi -i color=size=1366x768:duration=3:rate=25:color=blue -i allportPersonality.jpeg -f lavfi -i color=size=1366x768:duration=3:rate=25:color=black -i traitTheory.jpeg -filter_complex "[0:v][1:v]overlay=(main_w-overlay_w)/2:(main_h-overlay_h)/2[v0]; [3:v]scale=iw*min(1366/iw\,768/ih):ih*min(1366/iw\,768/ih)[ovrl],[2:v][ovrl]overlay=(main_w-overlay_w)/2:(main_h-overlay_h)/2[v1];[v0][v1]concat=n=2:v=1[outv]" -map "[outv]" -y concatSingle.mp4
spliting commands for windows bat
echo one ^
two ^
three ^
four ^
five ^
*
Splitting ffmpeg concat command into multiple lines
ffmpeg -f lavfi -i color=size=1366x768:duration=0.1:rate=25:color=FF00FF -i allportPersonality.jpeg ^
-f lavfi -i color=size=1366x768:duration=0.1:rate=25:color=FFFF00 -i traitTheory.jpeg ^
-filter_complex ^
"[0:v][1:v]overlay=(main_w-overlay_w)/2:(main_h-overlay_h)/2[v0];^
[3:v]scale=iw*min(1366/iw\,768/ih):ih*min(1366/iw\,768/ih)[ovrl],^
[2:v][ovrl]overlay=(main_w-overlay_w)/2:(main_h-overlay_h)/2[v1];^
[v0][v1]concat=n=2:v=1[outv]" -map "[outv]" -y concatSingle.mp4
Combine 3 videos
ffmpeg -y -i "input1.mkv" -i "bumper.mkv" -i "input2.mkv" -filter_complex '[0:v]scale=1280x720:force_original_aspect_ratio=increase[v0],pad=1280x720:max(0\,(ow-iw)/2):max(0\,(oh-ih)/2):black[v0]; [1:v]scale=1280x720:force_original_aspect_ratio=increase[v1],pad=1280x720:max(0\,(ow-iw)/2):max(0\,(oh-ih)/2):black[v1]; [2:v]scale=1280x720:force_original_aspect_ratio=increase[v2],pad=1280x720:max(0\,(ow-iw)/2):max(0\,(oh-ih)/2):black[v2]; [v0][0:a][v1][1:a][v2][2:a]concat=n=3:v=1:a=1 [outv] [outa]' -map "[outv]" -map "[outa]" 'output.mkv'
Split a file into multiple parts
ffmpeg -y \
-i "Robotica_1080.mkv" \
-filter_complex "[0:v]split=2[v1][v2]; \
[v1]scale=640:360,setpts=PTS-STARTPTS[vout1]; \
[v2]trim=10:15,scale=640:360,setpts=PTS-STARTPTS[vout2]; \
[0:a]asplit=2[a1][a2]; \
[a1]anull,asetpts=PTS-STARTPTS[aout1]; \
[a2]atrim=10:15,asetpts=PTS-STARTPTS[aout2]; \
[vout1][aout1]concat=n=2:v=1:a=1[vout1][aout1]; \
[vout2][aout2]concat=n=2:v=1:a=1[vout2][aout2]" \
-map [vout1] -map [aout1] "1.mp4" \
-map [vout2] -map [aout2] "2.mp4"
Note 'PTS'
PTS stands for Presentation TimeStamps. See What is video timescale, timebase, or timestamp in ffmpeg?
The setpts filter evaluates the expression and assigns the value as the timestamp for the current frame it is processing
Split Video only into 2 files
ffmpeg -y ^
-i "concatSingle.mp4" ^
-filter_complex "[0:v]split=2[v1][v2];[v1]trim=0:3,scale=640:360,setpts=PTS-STARTPTS[vout1];[v2]trim=4:6,scale=640:360,setpts=PTS-STARTPTS[vout2]" -map [vout1] "1.mp4" -map [vout2] "2.mp4"
Fade Overlay text
ffmpeg -y -i input.mp4 -filter_complex "[0]split[base];drawtext=fontfile=HelveticaNeue.ttf:text='Testing': fontcolor=white:\
fontsize=40: box=1: boxcolor=black@0.5:boxborderw=5:x=(w-text_w)/2:y=(h-text_h)/2,format=yuva444p,fade=t=in:st=2:d=1:alpha=1,fade=t=out:st=3:d=1:alpha=1[subtitles]; \
[base][subtitles]overlay" output.mp4
Applying in blank image
ffmpeg -y -f lavfi -i color=size=1366x768:duration=10:rate=25:color=FF00FF -filter_complex ^
"[0]split[base];drawtext=fontfile=C\\:/Windows/Fonts/arial.ttf:text='Testing': fontcolor=white:fontsize=40: x=(w-text_w)/2:y=(h-text_h)/2,format=yuva444p,fade=t=in:st=2:d=3:alpha=1[subtitles];[base][subtitles]overlay" videoWithFadingText.mp4
Another
ffmpeg -y -i vlogOpener1366x768.mp4 -filter_complex "[0]split[base];drawtext=fontfile=C\\:/Windows/Fonts/ITCBLKAD.TTF:fontsize=85:fontcolor=black:x=(w-text_w)/2:y=(h-text_h-200)/2:text='Learning Psychology',format=yuva444p,fade=t=in:st=3:d=3:alpha=1[subtitles];[base][subtitles]overlay" openerWithFadingText.mp4
Fade in and out with box
ffmpeg -y -f lavfi -i color=size=1366x768:duration=5:rate=25:color=FFFFFF -filter_complex ^
"[0]split[base];drawtext=fontfile=C\\:/Windows/Fonts/arial.ttf:text='Credits': fontcolor=black:fontsize=40: box=1:^ boxcolor=FFFF00@0.5:boxborderw=5:x=(w-text_w)/2:y=(h-text_h)/2,format=yuva444p,fade=t=in:st=0:d=1:alpha=1,fade=t=out:st=3:d=1:alpha=1[subtitles];[base][subtitles]overlay" videoWithFadingText2.mp4
multiple line text
ffmpeg -i test_in.avi -vf "[in]drawtext=fontsize=20:fontcolor=White:fontfile='/Windows/Fonts/arial.ttf':text='onLine1':x=(w)/2:y=(h)/2, drawtext=fontsize=20:fontcolor=White:fontfile='/Windows/Fonts/arial.ttf':text='onLine2':x=(w)/2:y=((h)/2)+25, drawtext=fontsize=20:fontcolor=White:fontfile='/Windows/Fonts/arial.ttf':text='onLine3':x=(w)/2:y=((h)/2)+50[out]" -y test_out.avi
ffmpeg -y -f lavfi -i color=size=1366x768:duration=8:rate=25:color=FFFFFF -filter_complex "[0]split[base][textLayer];[textLayer]drawtext=fontsize=20:fontcolor=black:fontfile=C\\:/Windows/Fonts/arial.ttf:text='onLine1':x=(w)/2:y=(h)/2, drawtext=fontsize=20:fontcolor=black:fontfile=C\\:/Windows/Fonts/arial.ttf:text='onLine2':x=(w)/2:y=((h)/2)+25, drawtext=fontsize=20:fontcolor=black:fontfile=C\\:/Windows/Fonts/arial.ttf:text='onLine3':x=(w)/2:y=((h)/2)+50,format=yuva444p,fade=t=in:st=3:d=1:alpha=1,fade=t=out:st=6:d=1:alpha=1[subtitles];[base][subtitles]overlay" multiLineFading.mp4
Get Width X Height
rem Example 1: With keys / variable names
ffprobe -v error -show_entries stream=width,height -of default=noprint_wrappers=1 input.mp4
rem output will be
rem width=1280
rem height=720
rem Example 2: Just width x height
ffprobe -v error -select_streams v -show_entries stream=width,height -of csv=p=0:s=x input.m4v
rem output will be
rem 1280x720
rem Example 3: JSON
ffprobe -v error -select_streams v -show_entries stream=width,height -of json ^
input.mkv
rem output will be
{
"programs": [
],
"streams": [
{
"width": 1280,
"height": 720
}
]
}
rem Example 4: JSON Compact
ffprobe -v error -select_streams v -show_entries stream=width,height -of json=compact=1 input.mkv
rem output will be
{
"programs": [
],
"streams": [
{ "width": 1280, "height": 720 }
]
}
rem Example 5: XML
ffprobe -v error -select_streams v -show_entries stream=width,height -of xml input.mkv
rem output will be
<?xml version="1.0" encoding="UTF-8"?>
<ffprobe>
<programs>
</programs>
<streams>
<stream width="1280" height="720"/>
</streams>
</ffprobe>
rem Example 6 : Just get width and Height
ffprobe -v quiet -show_entries "stream=width" -of "compact=p=0:nk=1" B1U1.mp4
rem output will be
1366
ffprobe -v quiet -show_entries "stream=height" -of "compact=p=0:nk=1" B1U1.mp4
rem output will be
768
Get width & height to a variable inside .bat file
for /f %i in ('ffprobe -v quiet -show_entries "stream=width" -of "compact=p=0:nk=1" input.mkv') do set width=%i
for /f %i in ('ffprobe -v quiet -show_entries "stream=height" -of "compact=p=0:nk=1" input.mkv') do set height=%i
for /f %i in ('ffprobe -v quiet -show_entries "stream=width" -of "compact=p=0:nk=1" traitTheory.jpeg') do set width=%i
for /f %i in ('ffprobe -v quiet -show_entries "stream=height" -of "compact=p=0:nk=1" traitTheory.jpeg') do set height=%i
echo %width%
echo %height%
Logical operators in bat file
source
You can do AND
with nested conditions:
if %age% geq 2 (
if %age% leq 12 (
set class=child
)
)
or:
if %age% geq 2 if %age% leq 12 set class=child
You can do OR
with a separate variable:
set res=F
if %hour% leq 6 set res=T
if %hour% geq 22 set res=T
if "%res%"=="T" (
set state=asleep
)
Compare ops in .bat file
EQU - equal
NEQ - not equal
LSS - less than
LEQ - less than or equal
GTR - greater than
GEQ - greater than or equal
ffmpeg draw a line from point to point
source
There is no drawline
filter in ffmpeg, so I'd assume 'drawbox'
would need to be used`
ffmpeg -i input.mp4 -vf drawbox=x=10:y=10:w=692:h=1:color=red output.mp4
ffmpeg -y -i 1.mp4 -vf drawbox=x=10:y=10:w=100:h=1:color=red lined1.mp4
slanting line
(45 degree)
ffmpeg -i in.mp4 -filter_complex
"color=red:s=490x490,geq=lum='p(X,Y))':a='if(eq(X,Y),255,0)';
[0]overlay=10:10:shortest=1" out.mp4
add silent audio
ffmpeg -y -f lavfi -i aevalsrc=0 -f lavfi -i color=size=1366x768:duration=5:rate=25:color=FF00FF -c:v copy -c:a aac -map 0 -map 1:v -shortest output.mov
for mp4 it has to be 2 step process
ffmpeg -y -f lavfi -i color=size=1366x768:duration=5:rate=25:color=FF00FF silentVideo.mp4
ffmpeg -y -i silentVideo.mp4 -f lavfi -i anullsrc=channel_layout=stereo:sample_rate=44100 -c:v copy -shortest output.mp4
Replace, add & mix videos
replace
ffmpeg -i video.mp4 -i audio.wav -map 0:v -map 1:a -c:v copy -shortest output.mp4
add
ffmpeg -i video.mkv -i audio.mp3 -map 0 -map 1:a -c:v copy -shortest output.mkv
mix
ffmpeg -y -i silentVideo.mp4 -i "G:\studio\soundEffects\ting.mp3" -i E:\projects\tests\ReactJS\vlogMaker\BGs\Uncaria-Espionage.mp3 -filter_complex "[1:a][2:a]amix=2[aud_fin]" -map 0:v -map [aud_fin] -c:v copy -c:a aac -b:a 256k -t 8 "diffAudioDiffTimes.mp4"
rem Mix Multiple audios at multiple times
ffmpeg -y -i output.mp4 -i "G:\studio\soundEffects\ting.mp3" -i "G:\studio\soundEffects\waterDrops.mp3" -filter_complex "[1]adelay=1000|1000[aud1];[2]adelay=3000|3000[aud2];[0][aud1][aud2]amix=3" -c:v copy outWithMutlipleMix.mp4
rem for mp4 silentvideo is okay but not raw video input. use 2 step process in such case
ffmpeg -y -i silentVideo.mp4 -i "G:\studio\soundEffects\ting.mp3" -i "G:\studio\soundEffects\waterDrops.mp3" -filter_complex "[1]adelay=1000|1000[aud1];[2]adelay=3000|3000[aud2];[aud1][aud2]amix=2[aud_fin]" -map 0:v -map [aud_fin] -c:v copy -c:a aac -b:a 256k "diffAudioDiffTimes.mp4"
using different parts of same video file
ffmpeg -y -i silentVideo.mp4 -i "G:\studio\soundEffects\Cute Sound Effects for Editing No copyright 128 kbps.mp3" -filter_complex "[1]atrim=44:45,adelay=1000|1000[aud1];[1]atrim=42:43,adelay=3000|3000[aud2];[aud1][aud2]amix=2[aud_fin]" -map 0:v -map [aud_fin] -c:v copy -c:a aac -b:a 256k "diffAudioDiffTimes.mp4"
Mix bits along with BG
rem BG : E:\projects\tests\ReactJS\vlogMaker\BGs\Uncaria-Espionage.mp3
ffmpeg -y -i silentVideo.mp4 -i "G:\studio\soundEffects\Cute Sound Effects for Editing No copyright 128 kbps.mp3" -i "E:\projects\tests\ReactJS\vlogMaker\BGs\Uncaria-Espionage.mp3" -filter_complex "[1]atrim=44:45,adelay=1000|1000[aud1];[1]atrim=42:43,adelay=3000|3000[aud2];[aud1][aud2][2]amix=3[aud_fin]" -map 0:v -map [aud_fin] -c:v copy -c:a aac -b:a 256k -shortest "diffAudioDiffTimes.mp4"
Trim videos for filter complex without loosing audio
ffmpeg -y -i B1U1WithAudioAndWatermark.mp4 -filter_complex ^
"[0:v] trim=start=5:end=10, setpts=PTS-STARTPTS [v0];^
[0:a]atrim=start=5:end=10,asetpts=PTS-STARTPTS [a0]" -map "[v0]" -map "[a0]" vtrimmed.mp4
ffmpeg -y -i B1U1WithAudioAndWatermark.mp4 -filter_complex ^
"[0:v] trim=start=15:end=20, setpts=PTS-STARTPTS [v1];^
[0:a]atrim=start=15:end=20,asetpts=PTS-STARTPTS [a1]" -map "[v1]" -map "[a1]" vtrimmed.mp4
Concat vidoes without loosing audio
ffmpeg -i B1U1WithAudioAndWatermark.mp4 -filter_complex ^
"[0:v] trim=start=5:end=10, setpts=PTS-STARTPTS [v0];^
[0:a]atrim=start=5:end=10,asetpts=PTS-STARTPTS [a0];^
[0:v] trim=start=15:end=20, setpts=PTS-STARTPTS [v1];^
[0:a]atrim=start=15:end=20,asetpts=PTS-STARTPTS [a1];^
[v0][a0][v1][a1]concat=n=2:v=1:a=1 [v] [a]" -map "[v]" -map "[a]" vtrimmedConcat.mp4
Scrolling WaterMark
ffmpeg -i "outputWithAudio.mp4" -filter_complex drawtext="fontfile=C\\:/Windows/Fonts/ITCBLKAD.TTF:fontsize=20:fontcolor=orange@.5:box=1:boxcolor=white@0.1:shadowcolor=black@0.5:shadowx=2:shadowy=2:text=Water Mark Text':y=h-line_h-2:x=(mod(1*n\,w+tw)-tw)" "outputWithAudioThumbail.mp4" -y
|
|