Skip to content

Instantly share code, notes, and snippets.

@YukinoAi
Last active November 14, 2020 17:15
Show Gist options
  • Save YukinoAi/fa90db376296d5673fd2a2f0e4442ee3 to your computer and use it in GitHub Desktop.
Save YukinoAi/fa90db376296d5673fd2a2f0e4442ee3 to your computer and use it in GitHub Desktop.
VapourSynth CopyFromMe Template
#from vapoursynth import core #
import vapoursynth as vs
core = vs.get_core()
#import mvsfunc as mvf #only import mvsfunc if using BM3D
#load video
vidPath=r"C:\Users\User\video.mkv" #absolute path is required
vidPath=r"myscript.avs"
#ffms2
#video = core.ffms2.Source(source=vidPath)
#libav
video = core.lsmas.LWLibavSource(source=vidPath,format='YUV420P8')
video = core.lsmas.LWLibavSource(source=vidPath,format='YUY2')#4:2:2 interleaved, used to load interlaced video
#avisynth
video = core.avsr.Import(vidPath)
#Use avisynth "Eval" to create interleaved MSB/LSB Avisynth script.
lines = '''
LoadPlugin("C:/plugins/DGDecode.dll")
LoadPlugin("C:/plugins/Dither.dll")
Import("C:/scripts/Dither.avsi")
MPEG2Source("D:/source.d2v")
Dither_convert_8_to_16()
Dither_resize16(1280, 720)
Dither_convey_yuv4xxp16_on_yvxx()
'''
video = core.avsr.Eval(lines=lines, bitdepth=16)
#second example
lines = '''
video="D:/use/forward/slashes/video.mkv"
LWLibavSource(video)
'''
video = core.avsr.Eval(lines)
#debugging
video = core.std.Transpose(video)
print(core.version())
video.set_output()
#crop
video = core.std.CropRel(video, 4,4,2,2)#Left, Right, Top Bottom
#Resize
video = core.resize.Spline36(video, width=1024, height=576)
#ContinuityFixer (Fixes edges)
#Syntax: core.edgefixer.ContinuityFixer(video, [left_luma,left_u,left_v], [0,0,0], [0,0,0], [0,0,0], [10,5,5]) #left, top, right, bottom, radius
#repair two left row on the luma plane and one left row on the chroma planes with a radius of 10 for the luma plane and 5 for the chroma planes
#fix = core.edgefixer.ContinuityFixer(video, [2,2,2], [0,0,0], [0,0,0], [0,0,0], [10,5,5]) #magic
#IVTC
video = core.vivtc.VFM(video, order=1, cthresh=10) #try field=0, or field=2 on different sources
#decimation for post IVTC
video = core.vivtc.VDecimate(video)
#Denoising filters
#Temporal
#Syntax: flux.SmoothT(clip [, int temporal_threshold=7, int[] planes=[0, 1, 2]])
video = core.flux.SmoothT(video,temporal_threshold=3)
#Spatial
#waifu2x (CUDA/CUDNN variant) requires 32-bit depth input for YUV
#https://github.com/HomeOfVapourSynthEvolution/VapourSynth-Waifu2x-caffe
#For OpenCL, instead check out: https://github.com/HomeOfVapourSynthEvolution/VapourSynth-Waifu2x-w2xc
#Note: Internally, the algorithm always scales by 2 so settings scale=2 and using a custom resizer to downscale can produce a better result.
video = core.fmtc.bitdepth(video ,bits=32)
video = core.caffe.Waifu2x(video, scale=1, noise=1, model=3, block_w=512, block_h=512, cudnn=True) #no upscaling (via default downscaler), denoise can be -1,1-3
video = core.caffe.Waifu2x(video, scale=2, noise=2, model=3, block_w=512, block_h=512, cudnn=True) #default upscaling, denoise can be -1,1-3
video = core.caffe.Waifu2x(video, scale=2, noise=-1, model=0, block_w=512, block_h=512, cudnn=True, tta=False) #no denoise, upscale using model=0
#BM3D, CPU, requires RGB input
video= core.fmtc.resample (video, css="444")
#Be sure to pick the right color matrix: mat="601", 709, 2020, 240, FCC, YCgCo, RGB
video = core.fmtc.matrix (video,mat="709", col_fam=vs.RGB) #"col_fam=vs.RBG" can be "col_fam=vs.YUV"
video= mvf.BM3D(video, sigma=20,radius1=1)
#KNLMeansCL, OpenCL, check out wiki for syntax
#https://github.com/Khanattila/KNLMeansCL/wiki
#knlm.KNLMeansCL(clip clip, int d, int a, int s, float h, string channels, int wmode, float wref, clip rclip, string device_type, int device_id, bool info)
video=core.knlm.KNLMeansCL(video,h=1.2,device_type="gpu",d=1)
video=core.knlm.KNLMeansCL(video,h=3,device_type="gpu",a=10,d=2)
#trim
#Syntax: std.Trim(clip clip[, int first=0, int last, int length])
video = core.std.Trim(video, first=0, last=50)
#output video
video = core.fmtc.bitdepth(video ,bits=8)
video.set_output()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment