Last active
June 6, 2022 17:47
-
-
Save YukinoAi/36a9f4c0deb193b1113c8fd10d5d8fc7 to your computer and use it in GitHub Desktop.
AviSynth CopyFromMe Template
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#load video | |
vid="c:\myvideo.mkv" | |
aud=vid | |
#load normal video, | |
#FFM2 does not handle yuv422p or yuv444p video well, regardless of codec, or 10-12 bit-depth. | |
FFVideoSource(vid) | |
FFIndex(vid) AudioDub(FFVideoSource(vid), FFAudioSource(aud)) | |
FFIndex(vid) AudioDub(FFVideoSource(vid,fpsnum=24000,fpsden=1001,colorspace="yv12"), FFAudioSource(aud,track=0)) | |
#load .ts or .m2ts, Note: Try LibAV if FFVideoSource does not work. | |
AudioDub(LWLibavVideoSource(vid), LWLibavAudioSource(aud)) | |
AudioDub(LWLibavVideoSource(vid), LWLibavAudioSource(aud,stream_index=0)) | |
LWLibavVideoSource(vid,format="yuv420p8") | |
LWLibavVideoSource(vid,format="YUY2") #4:2:2 interleaved, use to load interlaced video | |
#load raw video (uses vfw codecs), Note: Not usually frame accurate. Convert to MKV and use FF/LibAV source filters instead. | |
#Note: x264 VFW encoder has various issues. Do not use if at all possible. | |
AVISource(vid) | |
ConvertToYV12() | |
#load .vob (via DGIndex index) | |
vid="R:\vobs\MyDVD_01.d2v" | |
aud="R:\vobs\MyDVD_01.wav" | |
AudioDub(MPEG2Source(vid),FFAudioSource(aud)) | |
MPEG2Source(vid) | |
#Load existing VapourSynth (.vpy) script | |
#Either Import: | |
VSImport(source=vid, stacked=false) | |
#Or interleave .vpy script into AviSynth script | |
script = """ | |
import vapoursynth as vs | |
core = vs.get_core() | |
vidPath=r"C:\Users\User\vid.mkv" | |
video = core.ffms2.Source(source=vidPath) | |
video2 = core.lsmas.LWLibavSource(source=vidPath,format='YUV420P8') | |
video3 = core.avsr.Import(r'script.avs') | |
video2.FluxSmoothT(3) | |
video = core.fmtc.bitdepth(video ,bits=32) | |
#video = core.caffe.Waifu2x(video, scale=1, noise=1, model=3, block_w=128, cudnn=True) | |
video = core.fmtc.bitdepth(video ,bits=8) | |
video.set_output() #or | |
video.set_output(index=0) | |
video2.set_output(index=1) | |
""" | |
VSEval(script, index=1) | |
#Above example uses waifu2x-caffe. Remember that .vpy files do not support audio. | |
#For additional filters see: http://www.vapoursynth.com/doc/pluginlist.html | |
#Notable vapoursynth exclusives are: waifu2x (GPU), BM3D (CPU) | |
#Same as above but simpler .vpy script into AviSynth script | |
script = """ | |
import vapoursynth as vs | |
core = vs.get_core() | |
vidPath=r"C:\Users\User\vid.mkv" | |
video = core.lsmas.LWLibavSource(source=vidPath,format='YUV420P8') | |
video = core.edgefixer.ContinuityFixer(video, [2,2,2], [2,2,2], [2,2,2], [2,2,2], [10,5,5]) #magic | |
video = core.fmtc.bitdepth(video ,bits=32) | |
#video = core.caffe.Waifu2x(video, scale=2, noise=1, model=3, block_w=128, cudnn=True) | |
#video = core.resize.Spline36(video ,width=1024,height=576) | |
video = core.fmtc.bitdepth(video ,bits=8) | |
video.set_output() | |
""" | |
VSEval(script, index=0) | |
#Working with Groups of Pictures (GOPs): | |
#To create: ffmpeg -i "myvideo.avs" "out\video-%06d.png" | |
#To encode directly: ffmpeg -r 24000/1001 -i "out\image-%06d.png" -c:v huffyuv out.huffyuv.mkv | |
#To load: (basic) | |
images="relative\path\out_waifu2_ed1\video-%06d.png" | |
ImageReader(images,start=1,end=2734) #be sure to specify last frame | |
AssumeFPS(24000,1001) #adjust framerate to fit actual source | |
ConvertToYV12(matrix="Rec709") #be sure to specify a color matrix: (Rec601), PC.601, Rec709, PC.709 | |
#load from series of pictures (more flexible), but requires http://avisynth.nl/index.php/ImageSequence | |
images="R:\corona\sequence\requires\absolute\path\to\images_%06d.png" | |
CoronaSequence(images,start=1,stop=2206) #always outputs rgb32 at 25fps | |
AssumeFPS("ntsc_film") | |
ConvertToYV12(matrix="Rec709") #be sure to specify a color matrix: (Rec601), PC.601, Rec709, PC.709 | |
AudioDub(last,FFAudioSource("myAudioFile.mp3")) | |
#IVTC if source was FILM, but was tecelined to interlace it (to conform to NTSC standards) | |
#Note: Do not IVTC if "Force FILM" is enabled in DGIndex for DVDs (which is for progressive streams labeled as interlaced) | |
Telecide() #basic, all that is needed sometimes | |
TFM(slow=2, pp=6, clip2=TDeint(mode=2, type=3)) | |
TFM(order=1) | |
TFM(cthresh=8) #default settings, higher quality than Telecide | |
tdecimate(mode=1) #use mode=0 or mode=1 to remove extra frames | |
tdecimate(mode=1,hybrid=0) #use hybrid=0 for NTSC Anime | |
#Or deinterlace if source really is interlaced VIDEO and IVTC fails, pick one of the following: | |
#AssumeFrameBased() #Prerequisites if interlaced video was encoded as progressive, Use SelectEven() after | |
#AssumeFieldbased().AssumeTFF().AssumeBFF() | |
bob() #Vanilla bob(), completely removes all interlacing, but doubles fps | |
TDeint(mode=2, type=3) #does excellent job while keeping original FPS | |
TDeint(mode=1, type=3) #smart bobing, doubles fps | |
TempGaussMC_beta2() #slower than TDeint but better quality | |
QTGMC(Preset="Slower") #better quality than TempGuassMC() | |
QTGMC(Preset="Very Slow",SourceMatch=2,Sharpness=1.0,TR2=2) #Does not stack well with TemporalDegrain() | |
#If using bob() or similar to completely remove interlacing, remember to remove the duplicate frames: | |
SelectEven() #blindly pick every other frame | |
SelectOdd() | |
srestore(frate=30000/1001,omode=6).decimate(cycle=2,quality=0) #for use with blended frames, it nearly does an okay job, still needs decimation after | |
#Dealing with blended sources | |
#To check if source material is blended, look for blended frames after using SeparateFields().selectEven() | |
srestore(frate=23.976)#used after IVTC and decimation to deal with blended material, reliably causes ghosting in very dark frames/scenes especially during scene changes | |
#Stabalization | |
#use virtualDub's Deshaker http://www.guthspot.se/video/deshaker.htm | |
#Settings: Either default or Scale=full, pixels=all, deep analysis if <55%, skip <15%, edge compenstation: none, stabLevel=8000, max: hor=0.3-1;vert=0.3-1;rot=.001;zoom=0.0001 | |
#Export from vdub using Lagarith/Huffyuv/x264(lossless) and import using LWLibavVideoSource("myvideo_stabalized.h264.crf0.mkv") or FFSource(). | |
#merge stabalized video and original video per-scene using fclip(original,234,653) or scene filtering syntax | |
#Denoise | |
#Denoise (temporal) | |
TemporalDegrain() #Best temporal denoiser period. But very slow. Works well <720p or when sole filter in a script. But can cause ghosting in very grainy scenes. Does include a minor spatial denoising component. | |
FluxSmoothT(3).FluxSmoothT(3) #fast, even when stacked. Removes rain but otherwise works well. Disable for low contrast grainy scenes due to line blurring. | |
FluxSmoothT() #default=7, consider using this to generate a preclip for use with a masking scipt with SMDegrain | |
MCTemporalDenoise(sigma=20) #chroma noise? | |
MCDegrainSharp(frames=3) #very light grain removal and causes ghosting, unimpressive | |
SMDegrain(tr=6,thSad=1000,refinemotion=true) #can work very well but needs tuning for each source and a preclip, defaults: tr=3 and thSad=400,refinemotion=off | |
#SMDegrain preclip example | |
preClip=FluxSmoothT().removegrain(11) #The more denoising on the preclip, the more noise SMDegrain will remove. Make sure all details are not lost in the preclip. | |
SMDegrain(tr=6,thSAD=500,str=1.2,contrasharp=40,prefilter=pre,refinemotion=true) #very similar to TemporalDegrain(), but with less ghosting | |
SMDegrain(tr=6,thSad=600,prefilter=preClip,refinemotion=true) | |
#Denoise (spatial, spatial-temporal) | |
MSmooth(threshold=2,strength=2,mask=false,chroma=true) #Do not go above 3/3 or 4/2. Messes up the chroma. Does not have a temporal component. | |
FluxSmoothST(temporal_threshold=7, spatial_threshold=7) #Do not increase spatial_threshold. | |
dfttest(sigma=16,tbsize=1) #Pretty on some sources, ugly on others. tbsize=1 disables temporal aspect. Default=16, 4-32 is okay depending on scene. Works well with scenefiltering techniques. | |
dfttest(sigma=8,tbsize=5) #tbsize>1 enables temporal filtering. Makes unstabalized video and scenes with high motion can look bad, esp with a high sigma (>=8). | |
dfttestMC(sigma=4) #motion compensated wrapper script for dfttest. Looks very nice. Do not go much higher than sigma=4 without scenefiltering. | |
KNLMeansCL(h=1.2,device_type="gpu") #very high quality and very fast, low values (h=0.6) can look very nice, this and bm3d for vapoursynth are "go to" filters, Wiki: https://github.com/Khanattila/KNLMeansCL/wiki | |
KNLMeansCL(h=2,device_type="gpu",a=8,d=1) #Try not to go much above h=3-4 depending on source, a is pixel radius, d is temporal radius (quadradic) | |
#Crop pixels from resolution. | |
#crop left-top-right-bottom | |
crop(8,0,-8,0) #NTSC spec | |
crop(2,2,-2,-2) | |
crop(2,0,-2,0) #typical, Alternative to cropping is VapourSynth's core.edgefixer.ContinuityFixer() | |
#Resize (spline) | |
Spline64Resize(1280,720) #might want to sharpen->downscale instead of downscale->sharpen | |
#Resize (nnedi3) | |
nnedi3_resize16(target_width=740,target_height=556,qual=2,nns=4) | |
#Resize (waifu2) | |
#Use vapoursynth implementation of waifu2x and import as source. | |
#Alternative: -end script here, 1) output as series of pictures 2) use waifu2x-caffe with nvidia-gpu cudnn accel 3) reimport as series of pictures | |
#https://github.com/lltcggie/waifu2x-caffe/releases - Do not crop before using waifu2 to upscale, crop after. | |
#Note: waifu2 can also do spatial denoising instead of or in addition to upscaling. | |
#To create: ffmpeg -i "myvideo.avs" "out_waifu2_ed1\video-%06d.png" | |
#Resize (lazy) | |
#http://www.l33tmeatwad.com/anime-upscale | |
#Line Darkeners | |
Hysteria(strength=3,lowthresh=2,highthresh=5,showmask=false,luma_cap=255).Spline64Resize(1280,720) #preferred | |
FastLineDarkenMOD2(strength=100,luma_cap=200,threshold=10,thinning=0) | |
#Sharpen (line) | |
UnFilter(100,100) #introduces massive amounts of noise | |
MSharpen(threshold=4,strength=200,mask=false) #causes haloing and some noise, so might want to supersample or downsample afterwards | |
LSFmod(strength=300,edgemaskHQ=true,ss_x=1.0,ss_y=1.0) #Disables super sampling, causes artifacts | |
LSFmod(strength=300,edgemaskHQ=true) #less prone to artifacting | |
LSFmod(strength=200,edgemaskHQ=true).LSFmod(strength=60,edgemaskHQ=true) #Stacking can look better, esp when mixed with AA | |
#op/ed credit wizardry by 06_taro | |
#Line sharpeners tend to distort op/ed credits. Combine the following with scene filtering techniques to avoid filtering them. | |
m_credit = mt_lut("x 235 > 255 0 ?") # "x > 235 ? 255 : 0" | |
m_credit=m_credit.mt_expand(mode= mt_circle(6)) #raise or lower the radius for the mask | |
mt_merge(LSFmod(strength=300,edgeMaskHQ=true),last,m_credit,luma=true) | |
#Sharpen (warp) #thin lines and can help with AA, Avoid unless you know what you are doing. | |
aWarpSharp2(blur=4,depth=4,type=1,thresh=128) | |
aWarpSharp2(blur=8,depth=8,type=1,thresh=160) #Do not go above 8/8, even 4/4 is somewhat high | |
WarpSharp() #a very low thresh can also help | |
#Halo remover (white around black lines caused by sharpening or sharpening+upscaling) | |
#Uses warping internally. Do not use aWarpSharp2 if using a halo remover. Suggestion: Do not cause halos in the first place. | |
DeHalo_Alpha(brightstr=1.0) | |
DeHalo_Alpha(rx=1.7,ry=1.7) #lower than default | |
FineDehalo(darkstr=0,rx=1.5,ry=1.5) #wrapper script for DeHalo_Alpha | |
#AA filters | |
HiAA(threads=8) #https://forum.doom9.org/showthread.php?t=169898, preferred, HiAA can use sangnom2 (default), eedi3, nnedi3, or eedi3+sangnom2 | |
HiAA(ssf=1.0,threads=8,aa=nnedi3) #ssf=1.0 looks better but can cause artifacts | |
maa2(aa=100, threads=4) | |
SharpAAMCmod() | |
#Fix colour | |
ColorYUV(gain_u=7,gain_v=6) | |
#Tweak(sat=1.2,cont=1.00,maxSat=20) | |
Tweak(sat=1.2,maxSat=22) | |
Levels(0, 0.95, 255, 0, 255) #the second value is the luma, "0.95" means "lower the luma by 5%" | |
#Fix Colour Example: | |
Tweak(sat=1.2,maxSat=25) | |
#Fix Colour Example2: | |
Tweak(sat=1.0,cont=1.05,hue=-8) | |
ColorYUV(gain_u=8,gain_v=0) | |
Tweak(bright=10,sat=1.3,cont=1.00,maxSat=30) | |
#Levels can also create all white/black clips for masks, e.g. mt_merge(x=,y=,xOffset=,yOffset=... | |
blackClip=originalClip.Levels(0, 1.0, 255, 0, 0,coring=false) | |
whiteClip=originalClip.Levels(255, 10.0, 0, 255, 255,coring=false) | |
#Deband (random dithering) | |
gradfun2db(thr=1.3) #works and never causes artifacts | |
f3kdb() | |
#deband (ordered dithering) #preferred | |
gradfun3() #"flat" look, compresses better, careful with fine lines at higher settings | |
f3kdb(dither_algo=2) #"grainy" look, increases required bitrate | |
#reduce number of total frames to those below (inclusive) | |
trim(0,35423) | |
##Scenefiltering syntax (RemapFrames) #preferred | |
original=last | |
KNLMeansCL().LSFMod().gradfun3() #base filtering | |
ReplaceFramesSimple(last, original.TemporalDegrain(), mappings=" | |
[4 433] | |
[434 725] | |
") | |
KNLMeansClip=original.KNLMeansCL(h=2,device_type="gpu") | |
ReplaceFramesSimple(last, KNLMeansClip, mappings=" | |
[4 433] | |
[434 725] | |
") | |
ReplaceFramesSimple(last, original.LSFmod(), mappings=" | |
[4 433] | |
[434 725] | |
") | |
#run gradfun3() twice on this section of video, the second time at high settings | |
ReplaceFramesSimple(last, last.gradfun3(thr=1.1), mappings=" | |
[434 725] | |
") | |
##Scenefiltering syntax (frametools) | |
replaceClipFromStart(ncopClip,20) | |
replaceClipFromEnd(ncedClip,35530) | |
fclip(original,324,538) | |
sceneFilter(0,2205,"ncop") | |
function ncop(clip clip2){last=clip2 | |
LSFMod() | |
} | |
function fixLuma1(clip clip2){last=clip2 | |
Levels(0, 0.95, 255, 0, 255) | |
} | |
redoSceneFilter(original,32239,34434,"ed") | |
function ed(clip clip2){last=clip2 | |
fixLuma1() | |
} | |
""" #start frametools documentation# | |
Notice: Merging clips is possible only if both clips have audio, or if neither do. Load audio for both or wait until after merging to AudioDub(vid,aud). | |
#fix frames by replacement - replace frame start for number of frames "length" | |
Syntax: ff(clip clp, int start, int "length") | |
Usage: ff(234,1) | |
#reverse fix frame - replaces frame from start backwards | |
Syntax: rff(clip clp, int start, int "length") | |
Usage: rff(234,2) | |
#merge fix frame - creates a hybrid frame from the adjacent frames | |
Dependency: http://avisynth.org.ru/badframes/badframes.html | |
Syntax: mff(clip clp, int frame) | |
Usage: mff(234) | |
#fix scene change - replaces frame specified, and the one before with length specified by lengthPrior, lengthPost | |
Dependency: ff() and rff() | |
Syntax: fs(clip clp, int start, int "lengthPrior",int "lengthPost") | |
Usage1: fs(234,1,1) | |
Usage2: fs(456,1,2) | |
#replace the frames of the current clip with the frames of another clip starting from frame start/end | |
Syntax: function replaceClipFromStart(clip clip1, clip clip2, int start) #used to specify the first frame to replace | |
Syntax: function replaceClipFromEnd(clip clip1, clip clip2, int end) #used to specify the last frame to replace | |
Usage: | |
FFVideoSource("my show.mkv") | |
ncop="my show ncop.mkv" | |
nced="my show nced.mkv" | |
ncopClip=FFVideoSource(ncop).trim(0,2263).Spline64Resize(1280,720) | |
ncedClip=FFVideoSource(nced).trim(0,2250).Spline64Resize(1280,720) | |
Spline64Resize(1280,720) | |
replaceClipFromStart(ncopClip,20) | |
replaceClipFromEnd(ncopClip,2205) | |
replaceClipFromStart(ncedClip,33359) | |
replaceClipFromEnd(ncedClip,35530) | |
#fixes clip - by replacing it's frames with the frames from another clip | |
Note: frame numbers/count must match from both clips - Meant for merging raw and stabalized video clips together. | |
- Can also be used to "undo" the filtering for a scene or apply a specific set of filters to portions of the video. | |
Syntax: function fclip(clip clip1, clip clip2, int start, int end) | |
Usage: | |
original=FFVideoSource(myvideo.mkv) | |
AVISource(myvideo_stabalized.avi) | |
fclip(original,324,538) | |
Usage2: | |
FFVideoSource("myvideo.mkv") | |
original=last | |
Tweak(sat=1.2) | |
fclip(original,50,550) | |
Usage3: | |
LWLibavVideoSource("myvid.mkv",format="yuv420p8") | |
fixed=last.crop(2,0,0,0).Spline64Resize(854, 576) | |
fclip(fixed,234,642) | |
#sceneFilter - applies a set of filters to parts of a clip, from frameStart to frameEnd, defined by a function named "filterGroup" | |
Syntax: function sceneFilter(clip clip1, int frameStart, int frameEnd, string filterGroup) | |
#Usage: | |
sceneFilter(0,2205,"op") | |
function op(clip clip2){last=clip2 | |
LSFMod() | |
} | |
sceneFilter(2205,2205,"scene1") | |
function scene1(clip clip2){last=clip2 | |
TemporalDegrain() | |
LSFMod() | |
} | |
last | |
#meant to "redo" the filtering for a scene seperately from entire video - useful for filtering specific scenes differently (like ops/eds) | |
Dependencies: fclip, sceneFilter | |
Syntax: redoSceneFilter(clip clip1, clip original, int frameStart, int frameEnd, string filterGroup) | |
Usage: | |
FFVideoSource("myvideo.mkv") | |
original=last | |
Tweak(sat=1.2) | |
redoSceneFilter(original,32239,34434,"ed") | |
function ed(clip clip2){last=clip2 | |
Tweak(sat=1.0) | |
} | |
last | |
#The idea is to define a rectangle area to replace. Can work similar to FreezeFrame() but for parts of a frame or can be used to embed one clip inside of another. | |
Dependencies: Masktools2 | |
function ReplaceBox(clip clp, int "offsetX", int "offsetY", int "width", int "height", int "startFrame", int "endFrame", int "sourceFrame", clip "clip2", bool "show") | |
Usage: | |
ReplaceBox(0, 0, 200, 300, show=true) | |
#Translation: Starting at coordinate (0,0) (top left), create a 200x300 pixel box. | |
ReplaceBox(250, 400, 200, 300, show=true) | |
#Translation: Start at coordinate (250,400) and use a 200x300 box. This means the bottom right corner of the box will extend to (450,700). | |
ReplaceBox(0, 0, 400, 500, startFrame=50, endFrame=250, sourceFrame=49) | |
#Translation: Start from the top left and use a 400x500 box. The contents displayed for frames 50-250 will be from frame 49. | |
ReplaceBox(250, 400, 200, 300, 100, 120, 50) | |
#Translation: Start at coordinate (250,400) and use a 200x300 box. The box only affects frames 100-120, using the contents from frame 50. | |
ReplaceBox(250, 400, 200, 300, clip2=last.Greyscale()) | |
#Translation: Start at coordinate (250,400) and use a 200x300 box. The box contents are from a different clip with identical properties. | |
ReplaceBox(250, 400, 200, 300, 100, 120, 50, clip2=last.Greyscale()) | |
#Translation: Start at coordinate (250,400) and use a 200x300 box. The box freezes frames 100-120, using the contents from frame 50 from clip2. The normal frames from clip2 are for replacements outside of [100-120]. Note: Specifying clip2 will always replace the contents for the entire clip regardless of the specified range of frames to freeze. | |
""" #end documentation# |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment