import mvsfunc as mvf import kagefunc as k import havsfunc as haf import fvsfunc as fvf import vsTAAmbk as taa import nnedi3_rpow2 as edi2 from functools import partial import vapoursynth as vs core = vs.core src = core.lsmas.LWLibavSource("/mnt/hephaestos/BDMV/MiA/vol1/Disc1/BDROM/BDMV/STREAM/00004.m2ts") src32 = src.fmtc.bitdepth(bits=32) dn = mvf.BM3D(src32, sigma=[2], radius1=1) ref = mvf.Depth(dn, 16) y = k.getY(dn) def descale(y: vs.VideoNode, h: int) -> tuple: """Descale and return a tuple of descaled clip and diff mask between that and the original.""" down = y.descale.Debicubic(k.getw(h), h) up = down.resize.Bicubic(1920, 1080) diff = core.std.Expr([y, up], 'x y - abs').std.PlaneStats() return down, diff debic_listp = [descale(y, h) for h in range(846, 849)] debic_list = [a[0] for a in debic_listp] debic_props = [a[1] for a in debic_listp] # I suppose this requires an explanation, so here goes: # Abyss seems to be special in that the different scenes are drawn in resolutions ranging from 846p to 848p. # This simply descales to each of those and selects the most appropriate for each frame. # FrameEval() must return a constant size clip, so a few things have to be done inside this. def select(n, debic_list, f): errors = [x.props.PlaneStatsAverage for x in f] y_deb = debic_list[errors.index(min(errors))] # simple detail mask with binarize dmask = core.std.Expr([y, y_deb.resize.Bicubic(1920, 1080)], 'x y - abs 0.025 > 1 0 ?').std.Maximum() # tfw nnedi still can’t into float nnedi_y = mvf.Depth(y_deb, 16) nnedi_y = edi2.nnedi3_rpow2(nnedi_y, nns=4, correct_shift=True, width=1920, height=1080).fmtc.bitdepth(bits=32) # can be returned instead to verify that this actually works, i.e. picks different resolutions for each frame for the mask as well # dmask = core.text.Text(dmask, y_deb.height) # Attach the mask as a frame property to the rescaled clip. # We return both here so we don’t need two separate FrameEvals return core.std.ClipToProp(nnedi_y, dmask) # descaled luma y_deb = core.std.FrameEval(y, partial(select, debic_list=debic_list), prop_src=debic_props) # extract the mask again dmask = core.std.PropToClip(y_deb) # Some edges still have leftover aliasing. we can catch parts of those with a mask later on. # It doesn’t catch the whole aliasing (not even close), so we're doing this to interrupt the # otherwise continuous lines of tcanny to then grow our mask only into the affected edges. # This function just produces a mask with lots of black squares. def square(): top = core.std.BlankClip(length=len(y), format=vs.GRAYS, height=4, width=10, color=[1]) side = core.std.BlankClip(length=len(y), format=vs.GRAYS, height=2, width=4, color=[1]) center = core.std.BlankClip(length=len(y), format=vs.GRAYS, height=2, width=2, color=[0]) t1 = core.std.StackHorizontal([side, center, side]) return core.std.StackVertical([top, t1, top]) # and now loop our small square to make a 1080p mask from it line = core.std.StackHorizontal([square()]*192) full_squares = core.std.StackVertical([line]*108) # This will also be used for AA later, as it finds native 1080p credits and other oversharpened elements. # The show barely has any other “real“ 1080p stuff (apart from full 1080p scenes which we handle later), and most of what it has needs some light aliasing. # The dmask (which is the scaling error that was above a certain threshold) only affects a few pixels whereever it occurs. # This grows it into an edgemask (with Hysteresis) with little black squares everywhere to stop the Hysteresis from filling everything. # If artifacts only occur around a small part of an edge, they will grow into the edge but only until the next square, # rather than filling it and all of the connected lines (likely almost all in the frame). artifacts = core.misc.Hysteresis( dmask.resize.Bicubic(1920, 1080, _format=vs.GRAYS), core.std.Expr([k.getY(src32).tcanny.TCanny(sigma=3), full_squares], 'x y min')) ret_raw = k.retinex_edgemask(src) ret = ret_raw.std.Binarize(30).rgvs.RemoveGrain(3) # Subtract both masks (edgemask - artifacts/1080p stuff with some maximum) so we get the areas that we can actually descale; # then binarize the result to get rid of blur as well as some potential limited/full range issues that are easily circumvented by doing this. mask = core.std.Expr([ ret.resize.Point(_format=vs.GRAYS), k.iterate(artifacts, core.std.Maximum, 3)], 'x y -') .std.Binarize(0.4) # mask now marks all areas that *can* be descaled mask = mask.std.Inflate().std.Convolution(matrix=[1]*9).std.Convolution(matrix=[1]*9) merged = core.std.MaskedMerge(y, y_deb, mask) merged = core.std.ShufflePlanes([merged, src32], [0, 1, 2], vs.YUV) merged = mvf.Depth(merged, 16) # Yeah, eedi2 in $current_year. Don’t ask me; it seems to work quite well here. aa = taa.TAAmbk(merged, aatype='Eedi2') artifacts = k.iterate(artifacts.resize.Point(_format=vs.GRAY16), core.std.Maximum, 2) # Create a white clip with 40 black pixels at the bottom to work around a bug in taa that causes artifacts near the bottom. # If anyone reads this: you can probably remove this part. It’s not 2017 anymore. fix_bottom = k.squaremask(artifacts, width=1920, height=1040, offset_x=0, offset_y=0) aamask = core.std.Expr([artifacts, fix_bottom], 'x y min') merged = core.std.MaskedMerge(merged, aa, aamask) def restore_original(n, f, clip, orig): if f.props.PlaneStatsAverage < 0.05: return clip elif f.props.PlaneStatsAverage > 0.1: return orig return core.std.Merge(clip, orig, (f.props.PlaneStatsAverage - 0.05) * 20) # Just revert the entire scaling if the difference is too big. This should catch the 1080p scenes (like the entire ED). # And yes, I’m so confident in this that I don’t manually exclude the ED. dmask = dmask.std.PlaneStats() merged = merged.std.FrameEval(partial(restore_original, clip=merged, orig=ref), prop_src=dmask) db = merged.f3kdb.Deband(y=40, cb=40, cr=40, grainy=16, grainc=0, output_depth=16) merged = core.std.MaskedMerge(db, merged, ret_raw.std.Expr('x 20 < 0 x 3 * ?').std.Inflate().resize.Point(_format=vs.GRAY16)) merged = mvf.Depth(merged, 10) merged[24:-24].set_output()