Skip to content

Instantly share code, notes, and snippets.

@frnsys
Last active September 3, 2024 13:59
Show Gist options
  • Save frnsys/cda159e439e93ab4597ab4569a206479 to your computer and use it in GitHub Desktop.
Save frnsys/cda159e439e93ab4597ab4569a206479 to your computer and use it in GitHub Desktop.
Blender auto-merging objects and textures
"""
To install additional libraries:
cd /opt/blender/3.2/python/bin
./python3.10 -m ensurepip
./python3.10 -m pip install pillow shapely networkx cairosvg
Python headers are required for compiling nest2d.
Download the source for your blender python version
(e.g. 3.10.8) from <https://www.python.org/downloads/source/>
Extract, then:
cp -r Python-3.10.8/Include/* /opt/blender/3.2/python/include/python3.10/
To install nest2d:
git clone [email protected]:frnsys/nest2D.git
sudo apt install libboost-dev libpolyclipping-dev libnlopt-cxx-dev
git submodule update --init --recursive
/opt/blender/3.2/python/bin/python3.10 -m pip install .
"""
import bpy
import math
import bmesh
import networkx as nx
from shapely import affinity
from shapely.ops import unary_union
from shapely.validation import make_valid
from shapely.geometry import Polygon, MultiPoint, MultiPolygon
from nest2D import Point, Box, Item, nest
from collections import defaultdict
from PIL import Image, ImageDraw
from . import util
DEBUG_SCALE = 800 # Scale up UV coords for drawing
DEBUG_COLORS = [
(111, 45, 214),
(214, 45, 90),
(45, 62, 214),
(45, 214, 118),
(214, 135, 45),
(242, 150, 224),
(76, 87, 117),
]
UV = tuple[float, float]
class UVIndexer:
"""UVs don't have any ids;
we need to know when two UVs are actually the same UV.
We do this by seeing if two UVs are within some small range;
if so we consider them to be the same UV."""
eps = 1e-6
def __init__(self):
self.uvs: list[UV] = []
self.refs: list[bmesh.types.BMLoopUV] = []
def add(self, uv: UV, ref: bmesh.types.BMLoopUV):
"""Add a UV to the index, with a reference
to its BMLoopUV, so we can manipulate it later."""
id = self.get_id(uv)
self.refs[id].append(ref)
def get_id(self, uv: UV):
# Find the closest UV, within eps distance
x, y = uv
for i, (x_, y_) in enumerate(self.uvs):
x_diff = abs(x - x_)
y_diff = abs(y - y_)
if x_diff < self.eps and y_diff < self.eps:
return i
else:
self.uvs.append(uv)
self.refs.append([])
return len(self.uvs) - 1
def get_uv(self, id: int):
return self.uvs[id]
def get_refs(self, id: int):
return self.refs[id]
def adjust_uv(uv: UV, scale: tuple[float, float]):
"""Adjust UVs for non-square textures,
so that they are square, and so
that the origin is in the top-left"""
x, y = uv
w_scale, h_scale = scale
# Blender's texture coordinates are flipped
# than most image processing;
# i.e. top-left is (0, 1) rather than (0, 0).
# So we reflect everything over y=0.5 (i.e. flip vertically)
# to compensate.
y = 1 - y
# Scale around the center
cx, cy = 1/2, 1/2
x = (x-cx)*w_scale + cx
y = (y-cy)*h_scale + cy
return x, y
def extract_regions(obj):
"""Extract UV regions from a UV map.
UV regions are groups of overlapping UV islands.
In particular we want the borders of these regions
and lists of the UVs that belong to each."""
# Preliminaries
# Select the correct object
# and get Blender in the right state for everything
util.select_obj(obj)
bpy.ops.object.mode_set(mode = 'EDIT')
bm = bmesh.from_edit_mesh(obj.data)
uv_layers = bm.loops.layers.uv.verify()
# For non-square textures,
# load texture so we know how much
# to scale UVs by to achieve a square map
tex_path = util.get_texture(obj)
tex = Image.open(tex_path)
scale_w = min(tex.width/tex.height, 1)
scale_h = min(tex.height/tex.width, 1)
scale = (scale_w, scale_h)
uvs = UVIndexer()
# Get faces for identifying island borders/outlines
# and get edges to generate the UV network,
# used for identifying islands
faces: list[Polygon] = []
edges: list[tuple[UV, UV]] = []
for f in bm.faces:
face = []
for l in f.loops:
uv_ref_a = l[uv_layers]
uv_ref_b = l.link_loop_next[uv_layers]
a = adjust_uv((uv_ref_a.uv.x, uv_ref_a.uv.y), scale)
b = adjust_uv((uv_ref_b.uv.x, uv_ref_b.uv.y), scale)
uvs.add(a, uv_ref_a)
uvs.add(b, uv_ref_b)
edges.append((a, b))
if not face: face.append(a)
face.append(b)
faces.append(Polygon(face))
# Build the UV network
# and map UVs to their parent islands
g = nx.Graph()
for a, b in edges:
a_id = uvs.get_id(a)
b_id = uvs.get_id(b)
g.add_edge(a_id, b_id)
uvs_to_islands: dict[int, int] = {}
islands_to_uvs: dict[int, list[int]] = {}
components = nx.connected_components(g)
for island_id, uv_ids in enumerate(components):
islands_to_uvs[island_id] = []
for id in uv_ids:
uvs_to_islands[id] = island_id
islands_to_uvs[island_id].append(id)
# Then map faces to their parent islands
island_faces: dict[int, list[Polygon]] = defaultdict(list)
for face in faces:
# Since the UVs are all connected,
# we can use any UV from the face to identify
# its parent island
uv = face.exterior.coords[0]
uv_id = uvs.get_id(uv)
island_id = uvs_to_islands[uv_id]
# Fix face geometry as needed
if not face.is_valid:
face = make_valid(face)
island_faces[island_id].append(face)
# Merge island faces
islands: list[Polygon] = []
for island_id, faces in island_faces.items():
merged = unary_union(faces)
islands.append(merged)
# Then merge overlapping islands into regions
regions: list[Polygon] = []
regions_to_uv_ids: dict[int, list[int]] = defaultdict(list)
for island_id, shape in enumerate(islands):
for i, reg in enumerate(regions):
if reg.intersects(shape):
regions[i] = unary_union([reg, shape])
regions_to_uv_ids[i] += islands_to_uvs[island_id]
break
else:
regions.append(shape)
region_id = len(regions) - 1
regions_to_uv_ids[region_id] += islands_to_uvs[island_id]
# Map of region ids to their children: (UV, BMLoopUV[])[]
regions_to_uvs: dict[int, (UV, list[bmesh.types.BMLoopUV])] = {}
for region_id, uv_ids in regions_to_uv_ids.items():
uv_refs = []
for id in uv_ids:
uv_refs.append((uvs.get_uv(id), uvs.get_refs(id)))
regions_to_uvs[region_id] = uv_refs
return regions, regions_to_uvs, islands, bm
def clip_regions(tex_path: str, regions: list[Polygon], padding: int):
"""Clip the provided regions out of the specifed texture."""
im_orig = util.load_texture(tex_path, as_rgba=True)
# Convert to square
scale = max(im_orig.height, im_orig.width)
size = (scale, scale)
im = Image.new("RGBA", size, (255,255,255,0))
x = round(scale/2 - im_orig.width/2)
y = round(scale/2 - im_orig.height/2)
im.paste(im_orig, (x, y), im_orig)
clips = []
for i, region in enumerate(regions):
mask_im = Image.new("L", size, 0)
mask_draw = ImageDraw.Draw(mask_im)
scaled_region = affinity.scale(region,
xfact=scale, yfact=scale, origin=(0, 0))
if isinstance(scaled_region, MultiPolygon):
geoms = scaled_region.geoms
else:
geoms = [scaled_region]
for geom in geoms:
poly = geom.convex_hull.exterior.coords
# Poly outline/width doesn't seem to work?
# Have to do it manually
mask_draw.polygon(poly, fill=255, outline=None)
mask_draw.line(list(poly) + [poly[0]],
width=padding, joint='curve', fill=255)
r = round(padding/2)
for pt in poly:
mask_draw.ellipse(
(pt[0]-r, pt[1]-r, pt[0]+r, pt[1]+r),
fill=255)
black = Image.new("RGBA", size, 0)
clip = Image.composite(im, black, mask_im)
clips.append(clip)
return clips
def pack_regions(regions: list[Polygon], clips: list[Image],
regions_to_uvs: dict[int, (UV, list[bmesh.types.BMLoopUV])],
padding: int):
"""Pack the regions, merging clips into a single image reflecting
the packing results, and update the UVs to match"""
# Need to have int points for nest2d, so scale up
# This scale value is arbitrary, though I suppose
# higher values give you more precision.
box_scale = 3200
box = Box(box_scale, box_scale)
# Scale regions to match the box scale.
# nest2d can't pack concave shapes
# so instead we use their convex hulls.
# Then create items to pack for each.
items = []
for region in regions:
scaled_region = affinity.scale(region,
xfact=box_scale, yfact=box_scale, origin=(0, 0))
hull = scaled_region.convex_hull
item = Item([
Point(round(x), round(y))
for (x, y) in hull.exterior.coords])
items.append(item)
# Do the bin packing
# Set min distance to 1 to avoid overlaps
pgrp = nest(items, box, padding+1)
packed = pgrp[0]
# Prepare the output image
output_img = Image.new(
size=clips[0].size,
color=(255,255,255,0),
mode='RGBA')
# Adjust clips and UVs based on the packing results
transformed_uv_shapes = []
for i, (clip, packed_item) in enumerate(zip(clips, packed)):
trans = packed_item.translation
rot = packed_item.rotation
# Apply the transformation to the texture clip
x = round(trans.x/box_scale * clip.width)
y = round(trans.y/box_scale * clip.height)
if rot != 0:
deg = math.degrees(rot)
clip = clip.rotate(-deg, center=(0, 0),
expand=True, translate=(x, y))
output_img.paste(clip, (0, 0), clip)
else:
output_img.paste(clip, (x, y), clip)
# Apply the transformation to the UV island
# Basically we group the UV points together and
# transform them in bulk with shapely,
# then apply the transformed coordinates to each UV.
t_uv_x = trans.x/box_scale
t_uv_y = trans.y/box_scale
uv_points = [uv for uv, refs in regions_to_uvs[i]]
uv_shape = MultiPoint(uv_points)
if rot != 0:
uv_shape = affinity.rotate(uv_shape,
angle=rot,
origin=(0, 0),
use_radians=True)
uv_shape = affinity.translate(uv_shape, xoff=t_uv_x, yoff=t_uv_y)
transformed_uv_shapes.append(uv_shape)
for (_, refs), pt in zip(regions_to_uvs[i], uv_shape.geoms):
for uv_ref in refs:
uv_ref.uv.x = pt.x
uv_ref.uv.y = 1 - pt.y
return output_img, transformed_uv_shapes
def draw_debug(shapes: list[Polygon], save_path: str, width=1, label=False):
"""Draw the provide shapes, for debugging"""
img = Image.new("RGBA",
(DEBUG_SCALE, DEBUG_SCALE), (255,255,255,0))
draw = ImageDraw.Draw(img)
for i, shape in enumerate(shapes):
color = DEBUG_COLORS[i % len(DEBUG_COLORS)]
if isinstance(shape, MultiPoint):
r = 1
for pt in shape.geoms:
x = pt.x * DEBUG_SCALE
y = pt.y * DEBUG_SCALE
draw.ellipse((x-r, y-r, x+r, y+r),
fill=color)
else:
if isinstance(shape, MultiPolygon):
geoms = shape.geoms
else:
geoms = [shape]
for geom in geoms:
points = geom.exterior.coords
for (x, y), (x_, y_) in zip(points, points[1:]):
x *= DEBUG_SCALE
y *= DEBUG_SCALE
x_ *= DEBUG_SCALE
y_ *= DEBUG_SCALE
draw.line((x, y, x_, y_),
fill=color,
width=width)
if label:
bounds = geom.bounds
cx = bounds[0] + (bounds[2] - bounds[0])
cy = bounds[1] + (bounds[3] - bounds[1])
draw.text((cx*DEBUG_SCALE, cy*DEBUG_SCALE),
str(i), fill=(0,0,0))
img.save(save_path)
def repack_texture(obj, padding=10, debug=False):
regions, regions_to_uvs, islands, bm = extract_regions(obj)
if debug:
draw_debug(islands, '/tmp/debug.islands.png', label=True)
draw_debug(regions, '/tmp/debug.regions.png', label=True)
hulls = [r.convex_hull for r in regions]
draw_debug(hulls, '/tmp/debug.hulls.png')
tex_path = util.get_texture(obj)
clips = clip_regions(tex_path, regions, padding)
if debug:
img = clips[0].copy()
for clip in clips[1:]:
img.paste(clip, (0, 0), clip)
img.save('/tmp/debug.clips.png')
output_img, transformed_uv_shapes = pack_regions(regions, clips, regions_to_uvs, padding)
if debug:
# Overlay the modified UV layout onto the final texture,
# to check that it's been adjusted properly.
preview = util.export_uv_layout(obj)
preview = preview.resize(output_img.size)
img = output_img.copy()
img.paste(preview, (0, 0), preview)
img.save('/tmp/debug.combined.png')
draw_debug(transformed_uv_shapes, '/tmp/debug.transformed.png')
return output_img
from merge import merge_objects
save_dir = "/tmp/"
# Get the fugue-base mesh
# and any clothes (objects starting with "Human.")
objs = [bpy.data.objects['fugue-base']] +\
[obj for obj in bpy.data.objects if obj.name.startswith('Human.')]
# Save the merged texture at this path
merged_texture_path = os.path.join(save_dir, 'texture.merged.png')
# Merge the objects and materials
merge_objects(objs, merged_texture_path, debug=True)
import os
import bpy
import math
import bmesh
from PIL import Image
from collections import defaultdict
from nest2D import Point, Box, Item, nest
from . import atlas, util
MAX_SIZE = 512
MAX_2_POW = math.log2(MAX_SIZE)
assert MAX_2_POW.is_integer() # Must be a power of 2
MAX_2_POW = int(MAX_2_POW)
def merge_textures(objs):
# Collect the primary texture for each object.
# Because multiple objects may use the same texture,
# we might have fewer textures than objects.
# So keep track of what objects are using which textures.
texpaths = []
texpaths_to_objs = defaultdict(list)
for obj in objs:
tex_path = util.get_texture(obj)
if tex_path not in texpaths:
texpaths.append(tex_path)
texpaths_to_objs[tex_path].append(obj)
# Bin pack the textures into a single texture.
items = [] # Items for packing
# The maximum width is the sum of all the texture widths;
# similar for max height. This is to ensure we have enough
# space to fit everything.
max_width, max_height = 0, 0
for tex_path in texpaths:
img = Image.open(tex_path)
w, h = img.size
max_width += w
max_height += h
item = Item([
Point(0, 0),
Point(w, 0),
Point(w, h),
Point(0, h)
])
items.append(item)
# Pick the largest dimension
# and use for both width and height,
# so the box remains a square.
side = max(max_width, max_height)
box = Box(side, side)
# Do the bin packing
# Set min distance to 1 to avoid overlaps
pgrp = nest(items, box, 1)
packed = pgrp[0]
# Create the merged texture
merged_tex = Image.new(
size=(side, side),
color=(255,255,255,0),
mode='RGBA')
for item, tex_path in zip(packed, texpaths):
trans = item.translation
rot = item.rotation
tex = util.load_texture(tex_path, as_rgba=True)
tex_w, tex_h = tex.size
if rot != 0:
# Easiest way to do rotation
# is to paste the texture onto
# something the same a size as the target
# and transform that.
deg = math.degrees(rot)
tmp = Image.new(
size=(side, side),
color=(255,255,255,0),
mode='RGBA')
tmp.paste(tex, (0, 0), tex)
tmp = tmp.rotate(-deg, center=(0, 0),
expand=True, translate=(trans.x, trans.y))
merged_tex.paste(tmp, (0, 0), tmp)
else:
merged_tex.paste(tex, (trans.x, trans.y), tex)
# Transform the UVs to match
# where the texture was moved to
scale_x = tex_w/side
scale_y = tex_h/side
t_uv_x = trans.x/side
t_uv_y = trans.y/side
for obj in texpaths_to_objs[tex_path]:
util.transform_uvs(obj, [
('scale', (scale_x, scale_y)),
('rotation', rot),
('translate', (t_uv_x, t_uv_y)),
], origin=(0, 0))
return merged_tex
def merge_objects(objs, tex_save_path, debug=False):
merged_tex = merge_textures(objs)
if debug:
d_img = merged_tex.copy()
for obj in objs:
layout = util.export_uv_layout(obj)
layout = layout.resize(d_img.size)
d_img.paste(layout, (0,0), layout)
d_img.save('/tmp/debug.merged.png')
# Use the first object's material as the common material
mat = objs[0].active_material
# Save the merged texture, then set as
# the material texture
tmp_save_path = '/tmp/.texture.merged.png'
merged_tex.save(tmp_save_path)
util.set_material_image(tmp_save_path, mat, rel_path=False)
# Now we're ready to actually merge the objects
bpy.context.view_layer.objects.active = objs[0]
for obj in objs:
obj.select_set(True)
obj.data.materials[0] = mat
bpy.ops.object.join()
# The first object is the merged object
merged_obj = objs[0]
packed_texture = atlas.repack_texture(merged_obj,
padding=10, debug=debug)
# Save the repacked texture,
# then assign to material
tmp_save_path = '/tmp/.texture.packed.png'
packed_texture.save(tmp_save_path)
util.set_material_image(tmp_save_path, mat, rel_path=False)
# Trim whitespace and adjust the texture dimension
# so that it's a power of 2.
util.select_obj(merged_obj)
# Figure out UV map boundaries so we can
# identify excess whitespace.
bm = bmesh.from_edit_mesh(merged_obj.data)
uv_layers = bm.loops.layers.uv.verify()
min_uv_x = float('inf')
min_uv_y = float('inf')
max_uv_x = 0
max_uv_y = 0
for f in bm.faces:
for l in f.loops:
uv = l[uv_layers].uv
if uv.x < min_uv_x: min_uv_x = uv.x
if uv.y < min_uv_y: min_uv_y = uv.y
if uv.x > max_uv_x: max_uv_x = uv.x
if uv.y > max_uv_y: max_uv_y = uv.y
min_width = max_uv_x - min_uv_x
min_height = max_uv_y - min_uv_y
scale = max(min_width, min_height)
scaled_size = (
packed_texture.size[0] * scale,
packed_texture.size[1] * scale)
# Should still be a square!
assert scaled_size[0] == scaled_size[1]
# First out the best power-of-2
# size for the texture
side = scaled_size[0]
target = 2**MAX_2_POW
for i in range(1, MAX_2_POW):
if side - 2**i <= 0:
target = 2**i
break
# Remove whitespace around the texture
packed_texture = packed_texture.crop((
round(min_uv_x*packed_texture.size[0]),
round(min_uv_y*packed_texture.size[1]),
round((min_uv_x+scale)*packed_texture.size[0]),
round((min_uv_y+scale)*packed_texture.size[1]),
))
if debug:
packed_texture.save('/tmp/debug.cropped.png')
texture = Image.new(
size=(target, target),
color=(255,255,255,0),
mode='RGBA')
# Scale the texture down, if necessary
texture_scale = min(1, target/side)
if texture_scale < 1:
packed_texture = packed_texture.resize((target, target))
texture.paste(packed_texture, (0, 0), packed_texture)
# Save and assign the final texture
texture.save(tex_save_path)
util.set_material_image(tex_save_path, mat)
# Update UVs to match these last transformations
util.transform_uvs(merged_obj, [
('translate', (-min_uv_x, -min_uv_y)),
('scale', (1/scale, 1/scale)),
('scale', (
packed_texture.size[0]/texture.size[0],
packed_texture.size[1]/texture.size[1])),
], origin=(0, 0))
if debug:
d_img = texture.copy()
layout = util.export_uv_layout(merged_obj)
layout = layout.resize(d_img.size)
d_img.paste(layout, (0,0), layout)
d_img.save('/tmp/debug.final.png')
import os
import bpy
import bmesh
import cairosvg
import subprocess
from PIL import Image
from shapely import affinity
from shapely.geometry import MultiPoint
def select_obj(obj):
for o in bpy.context.selected_objects:
o.select_set(False)
bpy.context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode = 'EDIT')
def get_texture(obj):
"""Get the texture image
Assume that each material is a 'Principled BSDF' shader
with a texture image for the 'Base Color'"""
textures = []
for mat_slot in obj.material_slots:
images = []
for node in mat_slot.material.node_tree.nodes:
if isinstance(node, bpy.types.ShaderNodeTexImage):
images.append(node.image)
for img in images:
full_path = bpy.path.abspath(img.filepath, library=img.library)
textures.append(full_path)
return textures[0]
def load_texture(path: str, as_rgba=False):
# For some reason Pillow running in Blender segfaults
# when converting a JPG to RGBA (but works fine outside of Blender
# using Blender's python binary/environment).
# So this is a hacky way to ensure that we're always working
# with an RGBA image.
if as_rgba:
subprocess.run(['convert', path, '/tmp/tex.rgba.png'])
return Image.open('/tmp/tex.rgba.png').convert('RGBA')
else:
return Image.open(path)
def transform_uvs(obj, transforms, origin=(0, 0)):
select_obj(obj)
bm = bmesh.from_edit_mesh(obj.data)
uv_layers = bm.loops.layers.uv.verify()
uvs = []
pts = []
for f in bm.faces:
for l in f.loops:
uv = l[uv_layers]
pt = (uv.uv.x, 1 - uv.uv.y)
pts.append(pt)
uvs.append(uv)
uv_shape = MultiPoint(pts)
for typ, data in transforms:
if typ == 'scale':
scale_x, scale_y = data
uv_shape = affinity.scale(uv_shape,
xfact=scale_x, yfact=scale_y,
origin=origin)
elif typ == 'rotation':
rot = data
if rot != 0:
uv_shape = affinity.rotate(uv_shape,
angle=rot,
origin=origin,
use_radians=True)
elif typ == 'translate':
trans_x, trans_y = data
uv_shape = affinity.translate(uv_shape,
xoff=trans_x, yoff=trans_y)
for uv, pt in zip(uvs, uv_shape.geoms):
uv.uv.x = pt.x
uv.uv.y = 1 - pt.y
def export_uv_layout(obj):
select_obj(obj)
# Export the modified UV layout.
# Have to save as SVG b/c Blender "can't use the GPU in background mode"
bpy.ops.uv.export_layout(filepath="/tmp/debug.uv.svg",
mode='SVG', size=(1024, 1024))
cairosvg.svg2png(url='/tmp/debug.uv.svg',
write_to='/tmp/debug.uv.png',
output_width=1024, output_height=1024)
# Then overlay the modified UV layout onto the final texture,
# to check that it's been adjusted properly.
return Image.open('/tmp/debug.uv.png')
def set_material_image(path, mat, rel_path=True):
fname = os.path.basename(path)
if rel_path:
path = bpy.path.relpath(path)
bpy.data.images.load(path)
for node in mat.node_tree.nodes:
if isinstance(node, bpy.types.ShaderNodeTexImage):
node.image = bpy.data.images[fname]
break
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment