-
Notifications
You must be signed in to change notification settings - Fork 3
/
MaskDetail.py
131 lines (105 loc) · 4.38 KB
/
MaskDetail.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import vapoursynth as vs
import descale
def get_scale_offsets(scaled_w, scaled_h, origin_w, origin_h,
offset_l, offset_t, offset_w, offset_h):
"""
Inverts scaling offsets (kind of like inverting the scaling itself, really).
Calculates the scale offsets necessary to scale back to its original form an
image that was scaled to the specified dimensions using the specified offsets.
"""
# input parameters
scaled = [scaled_w, scaled_h]
origin = [origin_w, origin_h]
scaled_off = [offset_l, offset_t, offset_w, offset_h]
# source offsets, from lengths (w / h) to crop-style (negative values)
for i, (org_l, off_l) in enumerate(zip(origin, scaled_off[:2]), 2):
# If the input is positive, it represents a length, rather than a crop-
# style offset from the bottom right. Even if the output from this code
# is positive, however, it must always be a crop-style offset, so there
# won't be any special treatment required.
if scaled_off[i] > 0:
scaled_off[i] -= org_l - off_l
# the actual scaling of the offsets
scales = 2 * [scale / original for original, scale in zip(origin, scaled)]
off = [-offset * scale for offset, scale in zip(scaled_off, scales * 2)]
# target offsets, from crop-style to lengths
for i, (scaled_l, off_l) in enumerate(zip(scaled, off[:2]), 2):
# The input must be a crop-style offset from the bottom right, even if
# it happens to be positive. I could process only the positive values,
# but because I like consistency, I decided to convert all values from
# crop-style offsets to represent a length, regardless of whether it's
# really necessary.
off[i] += scaled_l - off_l
return tuple(off)
def MaskDetail(clip, final_width, final_height, RGmode=3, cutoff=None,
gain=0.75, expandN=2, inflateN=1, blur_more=False,
src_left=0, src_top=0, src_width=0, src_height=0,
kernel='bilinear', invkstaps=4, taps=4, mode='normal',
lowpasskernel='blackman', lowpassintaps=4, lowpassouttaps=3,
lowpassthr=None, exportlowpass=False, pclevelthr=None, b=1/3, c=1/3):
depth = clip.format.bits_per_sample
scale = (2 ** 16 - 1) / (2 ** depth - 1)
if cutoff is None:
cutoff = 17990
else:
cutoff *= scale
if lowpassthr is None:
lowpassthr = 1542
else:
lowpassthr *= scale
if pclevelthr is None:
pclevelthr = 59881
else:
pclevelthr *= scale
def lowpassLut16(x):
p = x - 0x8000
if p > 0 and p - lowpassthr > 0:
return x - lowpassthr
elif p <= 0 and p + lowpassthr < 0:
return x + lowpassthr
else:
return 0x8000
def luma16(x):
x <<= 4
value = x & 0xFFFF
return 0xFFFF - value if x & 0x10000 else value
def f16(x):
if x < cutoff:
return 0
result = x * gain * (0x10000 + x) / 0x10000
return min(0xFFFF, int(result))
def pclevelLut16(x):
return x if x > pclevelthr else 0
core = vs.core
startclip = core.fmtc.bitdepth(clip, bits=16)
original = (startclip.width, startclip.height)
target = (final_width, final_height, src_left, src_top, src_width, src_height)
if mode.startswith('lowpass'): # lowpass and lowpasspc
twice = tuple(2 * o for o in original)
lowpass = core.fmtc.resample(startclip, *twice, kernel=lowpasskernel, taps=lowpassintaps)
lowpass = core.fmtc.resample(lowpass, *original, kernel=lowpasskernel,taps=lowpassouttaps)
difflow = core.std.MakeDiff(startclip, lowpass, 0)
if exportlowpass:
return core.std.Lut(difflow, function=luma16)
difflow = core.rgvs.RemoveGrain(difflow, mode=[1])
difflow = core.std.Lut(difflow, function=lowpassLut16)
startclip = core.std.MergeDiff(startclip, difflow, 0)
if mode.startswith('pc') or mode.endswith('pc'): # pclevel and lowpasspc
diff = core.std.Lut(startclip, function=pclevelLut16)
else:
temp = descale.Descale(startclip, *target[:2], kernel=kernel, taps=taps, b=b, c=c)
temp = core.fmtc.resample(temp, *original, kernel=kernel, taps=taps, a1=b, a2=c)
diff = core.std.MakeDiff(startclip, temp, 0)
mask = core.std.Lut(diff, function=luma16).rgvs.RemoveGrain(mode=[RGmode])
mask = core.std.Lut(mask, function=f16)
for i in range(expandN):
mask = core.std.Maximum(mask, planes=[0])
for i in range(inflateN):
mask = core.std.Inflate(mask, planes=[0])
mask = core.fmtc.resample(mask, *target, taps=taps)
if blur_more:
mask = core.rgvs.RemoveGrain(mask, mode=[12,0,0])
mask = core.std.ShufflePlanes(mask, planes=0, colorfamily=vs.GRAY)
return core.fmtc.bitdepth(mask, bits=depth, dmode=1)