Modders QoL and improvements for clothes
* Added an automatic scalar method for undersized/oversized textures * Added the ability to dye clothes without having to convert them into greyscale * Added scalar ratio support for whitespace generation * Improved whitespace generation for objects with nested children displayables
This commit is contained in:
parent
14c678117d
commit
012084ca10
@ -132,10 +132,10 @@ init python:
|
||||
matrix = self.char.body.hue
|
||||
|
||||
processors = {
|
||||
"skin": lambda file, _: Transform(file, matrixcolor=matrix),
|
||||
"armfix": lambda file, _: Transform(file, matrixcolor=matrix),
|
||||
"skin": lambda file, _: Transform(file, maxsize=(1010, 1200), matrixcolor=matrix),
|
||||
"armfix": lambda file, _: Transform(file, maxsize=(1010, 1200), matrixcolor=matrix),
|
||||
"colored": lambda file, n: self.apply_color(file, int(n)),
|
||||
"default": lambda file, _: Image(file),
|
||||
"default": lambda file, _: Transform(file, maxsize=(1010, 1200)),
|
||||
}
|
||||
|
||||
layers = self.get_layers(hash, subpath)
|
||||
@ -206,7 +206,30 @@ init python:
|
||||
"""Takes image and int layer number. Used internally."""
|
||||
try:
|
||||
c = self.color[n]
|
||||
return Transform(img, matrixcolor=TintMatrix(c)*OpacityMatrix(c.alpha))
|
||||
|
||||
# Method 1
|
||||
|
||||
# r,g,b = c.rgb
|
||||
# average = max(min((r + g + b) / 3, 0.6666), 0.3333)
|
||||
|
||||
# Method 2
|
||||
|
||||
# lightness = max(min(c.hls[1], 0.6666), 0.3333)
|
||||
# average = (lightness, lightness, lightness)
|
||||
|
||||
# Method 3
|
||||
|
||||
# r,g,b = c.rgb
|
||||
# luster = (max(r,g,b)+min(r,g,b)) / 2
|
||||
# c = c.replace_lightness(luster)
|
||||
# average = (luster, luster, luster)
|
||||
|
||||
# Method 4
|
||||
|
||||
average = (0.3333, 0.3333, 0.3333)
|
||||
|
||||
return Transform(img, maxsize=(1010, 1200), matrixcolor=SepiaMatrix(c, desat=average)*OpacityMatrix(c.alpha))
|
||||
|
||||
except TypeError:
|
||||
print(f"Item doesn't support colors but was supplied with a list; Try removing numbered files in its directory:\n{self.__repr__()}")
|
||||
d = At(Frame(Text("TypeErr", color="#ffff00"), tile=True), blink_repeat)
|
||||
|
@ -9,16 +9,25 @@ init python:
|
||||
|
||||
def crop_whitespace(path):
|
||||
# Return box from whitespace_dict, or calculate and store it
|
||||
|
||||
def _find_file_surface(obj):
|
||||
if isinstance(obj, str):
|
||||
return obj, Image(obj).load()
|
||||
elif isinstance(obj, im.Image):
|
||||
return obj.filename, obj.load()
|
||||
elif isinstance(obj, Transform):
|
||||
return _find_file_surface(obj.child)
|
||||
|
||||
if path in whitespace_dict:
|
||||
box = whitespace_dict[path]
|
||||
else:
|
||||
if isinstance(path, str):
|
||||
surf = Image(path).load()
|
||||
elif isinstance(path, im.Image):
|
||||
surf = path.load()
|
||||
elif isinstance(path, Transform):
|
||||
surf = path.child.load()
|
||||
path, surf = _find_file_surface(path)
|
||||
size = surf.get_size()
|
||||
box = tuple(surf.get_bounding_rect())
|
||||
|
||||
if size[0] != 1010:
|
||||
ratio = size[0] / 1010
|
||||
box = tuple(v/ratio for v in box)
|
||||
whitespace_dict[path] = box
|
||||
return box
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user