Using:
# Imports
import vapoursynth as vs
# getting Vapoursynth core
core = vs.core
import site
import os
# Adding torch dependencies to PATH
path = site.getsitepackages()[0]+'/torch_dependencies/'
path = path.replace('\\', '/')
os.environ["PATH"] = path + os.pathsep + os.environ["PATH"]
# Loading Plugins
core.std.LoadPlugin(path="i:/Hybrid/64bit/vsfilters/Support/fmtconv.dll")
core.std.LoadPlugin(path="i:/Hybrid/64bit/vsfilters/SourceFilter/LSmashSource/vslsmashsource.dll")
# source: 'G:\TestClips&Co\files\test.avi'
# current color space: YUV420P8, bit depth: 8, resolution: 640x352, fps: 25, color matrix: 470bg, yuv luminance scale: limited, scanorder: progressive
# Loading G:\TestClips&Co\files\test.avi using LWLibavSource
clip = core.lsmas.LWLibavSource(source="G:/TestClips&Co/files/test.avi", format="YUV420P8", stream_index=0, cache=0, prefer_hw=0)
# Setting color matrix to 470bg.
clip = core.std.SetFrameProps(clip, _Matrix=5)
clip = clip if not core.text.FrameProps(clip,'_Transfer') else core.std.SetFrameProps(clip, _Transfer=5)
clip = clip if not core.text.FrameProps(clip,'_Primaries') else core.std.SetFrameProps(clip, _Primaries=5)
# Setting color range to TV (limited) range.
clip = core.std.SetFrameProp(clip=clip, prop="_ColorRange", intval=1)
# making sure frame rate is set to 25
clip = core.std.AssumeFPS(clip=clip, fpsnum=25, fpsden=1)
clip = core.std.SetFrameProp(clip=clip, prop="_FieldBased", intval=0)
original = clip
from vsrealesrgan import RealESRGAN
# adjusting color space from YUV420P8 to RGBH for VsRealESRGAN
clip = core.resize.Bicubic(clip=clip, format=vs.RGBH, matrix_in_s="470bg", range_s="limited")
# resizing using RealESRGAN
clip = RealESRGAN(clip=clip, device_index=0, trt=True, trt_cache_path="G:/Temp", num_streams=4) # 2560x1408
# resizing 2560x1408 to 640x352
# adjusting resizing
clip = core.resize.Bicubic(clip=clip, format=vs.RGBS, range_s="limited")
clip = core.fmtc.resample(clip=clip, w=640, h=352, kernel="lanczos", interlaced=False, interlacedd=False)
original = core.resize.Bicubic(clip=original, width=640, height=352)
# adjusting output color from: RGBS to YUV420P8 for x264Model
clip = core.resize.Bicubic(clip=clip, format=vs.YUV420P8, matrix_s="470bg", range_s="limited", dither_type="error_diffusion")
original = core.text.Text(clip=original,text="Original",scale=1,alignment=7)
clip = core.text.Text(clip=clip,text="Filtered",scale=1,alignment=7)
stacked = core.std.StackHorizontal([original,clip])
# Output
stacked.set_output()
I get
Failed to evaluate the script:
Python exception: Ran out of input
Traceback (most recent call last):
File "src\cython\vapoursynth.pyx", line 2866, in vapoursynth._vpy_evaluate
File "src\cython\vapoursynth.pyx", line 2867, in vapoursynth._vpy_evaluate
File "C:\Users\Selur\Desktop\test_2.vpy", line 32, in
clip = RealESRGAN(clip=clip, device_index=0, trt=True, trt_cache_path="G:/Temp", num_streams=4) # 2560x1408
File "I:\Hybrid\64bit\Vapoursynth\Lib\site-packages\torch\autograd\grad_mode.py", line 27, in decorate_context
return func(*args, **kwargs)
File "I:\Hybrid\64bit\Vapoursynth\Lib\site-packages\vsrealesrgan\__init__.py", line 284, in RealESRGAN
module = [torch.load(trt_engine_path) for _ in range(num_streams)]
File "I:\Hybrid\64bit\Vapoursynth\Lib\site-packages\vsrealesrgan\__init__.py", line 284, in
module = [torch.load(trt_engine_path) for _ in range(num_streams)]
File "I:\Hybrid\64bit\Vapoursynth\Lib\site-packages\torch\serialization.py", line 795, in load
return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
File "I:\Hybrid\64bit\Vapoursynth\Lib\site-packages\torch\serialization.py", line 1002, in _legacy_load
magic_number = pickle_module.load(f, **pickle_load_args)
EOFError: Ran out of input
Works fine with trt=False.
->Any idea what is going wrong there?