Created
September 23, 2024 04:39
-
-
Save sina-mansour/2f4b59332ac82e2a74810d27935e37d6 to your computer and use it in GitHub Desktop.
Example scripts linking Python and Blender for neuroimaging visualization
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import bpy | |
import math | |
import numpy as np | |
import nibabel as nib | |
import matplotlib.pyplot as plt | |
from Connectome_Spatial_Smoothing import CSS as css | |
from cerebro import cerebro_brain_utils as cbu | |
from cerebro import cerebro_brain_viewer as cbv | |
def generate_vertex_colors(data, colormap=plt.cm.coolwarm): | |
# Normalize the data and map to colors | |
scaled_data = (data - data.min()) / (data.max() - data.min()) | |
vertex_data = np.zeros(left_vertices_white.shape[0]) * np.nan | |
vertex_data[np.array(left_cortical_surface_model.vertex_indices)] = scaled_data[left_cortical_surface_model.index_offset:left_cortical_surface_model.index_count] | |
vertex_colors = colormap(vertex_data) | |
# adjust alpha | |
vertex_colors[:, 3] = 0.3 | |
return vertex_colors | |
# Function to update vertex colors on a mesh | |
def update_vertex_colors(mesh, vertex_colors): | |
color_layer = mesh.vertex_colors.active.data | |
for loop_index, loop in enumerate(mesh.loops): | |
vertex_index = loop.vertex_index | |
color_layer[loop_index].color = vertex_colors[vertex_index] | |
# Function to add vertex colors to a mesh's vertex color layer | |
def create_layer(mesh, layer_name): | |
# Create a new vertex color layer or use an existing one | |
if layer_name not in mesh.vertex_colors: | |
mesh.vertex_colors.new(name=layer_name) | |
vertex_color_layer = mesh.vertex_colors[layer_name] | |
mesh.vertex_colors.active_index = [x[0] for x in mesh.vertex_colors.items()].index(layer_name) | |
return vertex_color_layer | |
# Function to add vertex colors to a mesh's vertex color layer | |
def add_vertex_colors_to_layer(mesh, vertex_colors, layer_name): | |
create_layer(mesh, layer_name) | |
update_vertex_colors(mesh, vertex_colors) | |
mesh.update() | |
surface = 'white' | |
left_surface_file, right_surface_file = cbu.get_left_and_right_GIFTI_template_surface(surface) | |
left_surface_white = nib.load(left_surface_file) | |
left_vertices_white, left_triangles_white = cbu.load_GIFTI_surface(left_surface_file) | |
surface = 'inflated' | |
left_surface_file, right_surface_file = cbu.get_left_and_right_GIFTI_template_surface(surface) | |
left_surface_pial = nib.load(left_surface_file) | |
left_vertices_pial, left_triangles_pial = cbu.load_GIFTI_surface(left_surface_file) | |
dscalar = nib.load(cbu.cifti_template_file) | |
brain_models = [x for x in dscalar.header.get_index_map(1).brain_models] | |
left_cortical_surface_model, right_cortical_surface_model = brain_models[0], brain_models[1] | |
left_cortical_surface_model.vertex_indices | |
gradients = nib.load("/mnt/local_storage/Research/Codes/fMRI/DataStore/Templates/principal_gradient/hcp.gradients.dscalar.nii") | |
# data = gradients.get_fdata()[0] | |
# scaled_data = (data - data.min()) / (data.max() - data.min()) | |
# vertex_data = np.zeros(left_vertices.shape[0]) * np.nan | |
# vertex_data[np.array(left_cortical_surface_model.vertex_indices)] = scaled_data[left_cortical_surface_model.index_offset:left_cortical_surface_model.index_count] | |
# my_vertex_colors = plt.cm.coolwarm(vertex_data) | |
# Blender scripting starts here | |
# First clean the scene | |
# make sure the active object is not in Edit Mode | |
if bpy.context.active_object and bpy.context.active_object.mode == "EDIT": | |
bpy.ops.object.editmode_toggle() | |
# make sure non of the objects are hidden from the viewport, selection, or disabled | |
for obj in bpy.data.objects: | |
obj.hide_set(False) | |
obj.hide_select = False | |
obj.hide_viewport = False | |
# select all the object and delete them (just like pressing A + X + D in the viewport) | |
bpy.ops.object.select_all(action="SELECT") | |
bpy.ops.object.delete() | |
# find all the collections and remove them | |
collection_names = [col.name for col in bpy.data.collections] | |
for name in collection_names: | |
bpy.data.collections.remove(bpy.data.collections[name]) | |
# in the case when you modify the world shader | |
# delete and recreate the world object | |
world_names = [world.name for world in bpy.data.worlds] | |
for name in world_names: | |
bpy.data.worlds.remove(bpy.data.worlds[name]) | |
# create a new world data block | |
bpy.ops.world.new() | |
bpy.context.scene.world = bpy.data.worlds["World"] | |
bpy.ops.outliner.orphans_purge(do_local_ids=True, do_linked_ids=True, do_recursive=True) | |
# Frame timing | |
# Set the start and end frames of the animation | |
start_frame = 1 | |
end_frame = 400 | |
bpy.context.scene.frame_start = start_frame | |
bpy.context.scene.frame_end = end_frame | |
# Now create the mesh | |
mesh = bpy.data.meshes.new("MyMesh") | |
left_cortex_obj = bpy.data.objects.new("MyObject", mesh) | |
bpy.context.collection.objects.link(left_cortex_obj) | |
mesh.from_pydata(left_vertices_white, [], left_triangles_white) | |
# shape keys can be used to alternate between shapes | |
# Ensure the object has an active mesh and is in object mode | |
bpy.context.view_layer.objects.active = left_cortex_obj | |
bpy.ops.object.mode_set(mode='OBJECT') | |
# Add the Basis shape key (initial shape from left_vertices_1) | |
if not left_cortex_obj.data.shape_keys: | |
bpy.ops.object.shape_key_add(from_mix=False) | |
white_shape = left_cortex_obj.data.shape_keys.key_blocks[-1] | |
white_shape.name = "white_shape" | |
# Add the Target shape key for the second set of vertices (left_vertices_2) | |
bpy.ops.object.shape_key_add(from_mix=False) | |
pial_shape = left_cortex_obj.data.shape_keys.key_blocks[-1] | |
pial_shape.name = "pial_shape" | |
# Set the vertex coordinates of the TargetShape (from left_vertices_2) | |
for i, vertex in enumerate(pial_shape.data): | |
vertex.co = left_vertices_pial[i] # Transition to the second set of vertices | |
# At frame 1, white | |
pial_shape.value = 0.0 | |
pial_shape.keyframe_insert(data_path="value", frame=start_frame) | |
# At frame 50, pial | |
pial_shape.value = 1.0 | |
pial_shape.keyframe_insert(data_path="value", frame=end_frame // 2) | |
# At frame 1, white | |
pial_shape.value = 0.0 | |
pial_shape.keyframe_insert(data_path="value", frame=end_frame) | |
# control animation type | |
fcurve = left_cortex_obj.data.shape_keys.animation_data.action.fcurves[0] | |
# Set Elastic interpolation and ease in/out on each keyframe | |
for keyframe in fcurve.keyframe_points: | |
keyframe.interpolation = 'BOUNCE' # Set the interpolation type to Elastic | |
keyframe.easing = 'AUTO' # Smooth ease in and out | |
keyframe.handle_left_type = 'AUTO_CLAMPED' # Smooth handle types | |
keyframe.handle_right_type = 'AUTO_CLAMPED' | |
# Update the scene | |
bpy.context.view_layer.update() | |
### smooth shading | |
# Select the left cortex object and make it active | |
bpy.context.view_layer.objects.active = left_cortex_obj | |
left_cortex_obj.select_set(True) | |
# Apply smooth shading | |
bpy.ops.object.shade_smooth() | |
# handling colors | |
vertex_color_layer_1 = add_vertex_colors_to_layer( | |
mesh, generate_vertex_colors(gradients.get_fdata()[0], colormap=plt.cm.autumn), | |
"VertexColor1" | |
) | |
vertex_color_layer_2 = add_vertex_colors_to_layer( | |
mesh, generate_vertex_colors(gradients.get_fdata()[1], colormap=plt.cm.summer), | |
"VertexColor2" | |
) | |
# Create a new material if it doesn't exist | |
material = bpy.data.materials.new("VertexColorMaterial") | |
material.use_nodes = True | |
nodes = material.node_tree.nodes | |
links = material.node_tree.links | |
bsdf = material.node_tree.nodes["Principled BSDF"] | |
# # Clear existing nodes | |
# for node in nodes: | |
# nodes.remove(node) | |
# Create nodes: Vertex Color nodes, MixRGB, and Material Output | |
vc_node_1 = nodes.new(type='ShaderNodeVertexColor') | |
vc_node_1.layer_name = "VertexColor1" | |
vc_node_2 = nodes.new(type='ShaderNodeVertexColor') | |
vc_node_2.layer_name = "VertexColor2" | |
mix_rgb_node = nodes.new(type='ShaderNodeMixRGB') | |
mix_rgb_node.blend_type = 'MIX' | |
# material_output = nodes.new(type='ShaderNodeOutputMaterial') | |
# principled_bsdf = nodes.new(type='ShaderNodeBsdfPrincipled') | |
# Connect vertex color layers to the MixRGB node | |
links.new(vc_node_1.outputs['Color'], mix_rgb_node.inputs[1]) # First vertex color to MixRGB | |
links.new(vc_node_2.outputs['Color'], mix_rgb_node.inputs[2]) # Second vertex color to MixRGB | |
# Connect MixRGB to the Principled BSDF shader and then to the material output | |
links.new(mix_rgb_node.outputs['Color'], bsdf.inputs['Base Color']) | |
# links.new(principled_bsdf.outputs['BSDF'], material_output.inputs['Surface']) | |
# Add keyframes to the MixRGB 'Fac' value to animate between vertex colors | |
mix_rgb_node.inputs['Fac'].default_value = 0.0 # Start with the first vertex color | |
mix_rgb_node.inputs['Fac'].keyframe_insert(data_path="default_value", frame=start_frame) | |
mix_rgb_node.inputs['Fac'].default_value = 1.0 # Transition to the second vertex color | |
mix_rgb_node.inputs['Fac'].keyframe_insert(data_path="default_value", frame=end_frame // 2) | |
mix_rgb_node.inputs['Fac'].default_value = 0.0 # Transition to the second vertex color | |
mix_rgb_node.inputs['Fac'].keyframe_insert(data_path="default_value", frame=end_frame) | |
# Other material properties | |
bsdf.inputs["Metallic"].default_value = 0.8 | |
bsdf.inputs["Specular"].default_value = 0.6 | |
bsdf.inputs["Roughness"].default_value = 0.6 | |
bsdf.inputs["Roughness"].keyframe_insert(data_path="default_value", frame=start_frame) | |
bsdf.inputs["Roughness"].default_value = 0.1 | |
bsdf.inputs["Roughness"].keyframe_insert(data_path="default_value", frame=end_frame // 2) | |
bsdf.inputs["Roughness"].default_value = 0.6 | |
bsdf.inputs["Roughness"].keyframe_insert(data_path="default_value", frame=end_frame) | |
# Assign the material to the object | |
if len(left_cortex_obj.data.materials) > 0: | |
left_cortex_obj.data.materials[0] = material | |
else: | |
left_cortex_obj.data.materials.append(material) | |
# # Add a vertex color layer | |
# if not mesh.vertex_colors: | |
# mesh.vertex_colors.new() | |
# # Insert a keyframe for the starting vertex colors at frame 1 | |
# bpy.context.scene.frame_set(1) # Set frame to 1 | |
# update_vertex_colors(mesh, generate_vertex_colors(gradients.get_fdata()[0])) | |
# mesh.update() | |
# # Insert a keyframe for the ending vertex colors at frame 50 | |
# bpy.context.scene.frame_set(50) # Set frame to 50 | |
# update_vertex_colors(mesh, generate_vertex_colors(gradients.get_fdata()[1])) | |
# mesh.update() | |
# Set the material to display the vertex colors | |
# material = bpy.data.materials.new(name="VertexColorMaterial") | |
# material.use_nodes = True | |
# bsdf = material.node_tree.nodes["Principled BSDF"] | |
# # Add a new node for vertex colors | |
# vertex_color_node = material.node_tree.nodes.new(type="ShaderNodeVertexColor") | |
# vertex_color_node.layer_name = mesh.vertex_colors.active.name | |
# # Connect the vertex color node to the base color of the BSDF shader | |
# material.node_tree.links.new(vertex_color_node.outputs['Color'], bsdf.inputs['Base Color']) | |
# # Assign the material to the object | |
# if left_cortex_obj.data.materials: | |
# left_cortex_obj.data.materials[0] = material | |
# else: | |
# left_cortex_obj.data.materials.append(material) | |
# # Set shading to render vertex colors in the viewport | |
# for area in bpy.context.screen.areas: | |
# if area.type == 'VIEW_3D': | |
# space = area.spaces.active | |
# space.shading.color_type = 'VERTEX' | |
# Add a camera object | |
camera_data = bpy.data.cameras.new(name="Camera") | |
camera_object = bpy.data.objects.new("Camera", camera_data) | |
bpy.context.collection.objects.link(camera_object) | |
camera_object.location = (-400, 0, 0) | |
# Add a target (empty object) at the center of the brain for the camera to focus on | |
target = bpy.data.objects.new("CameraTarget", None) | |
target.location = left_vertices_white.mean(0) | |
bpy.context.collection.objects.link(target) | |
# Add the "Track To" constraint to the camera to make it always look at the target | |
constraint = camera_object.constraints.new(type='TRACK_TO') | |
constraint.target = target | |
# Add a pivot (empty object) to aid camera rotations | |
pivot = bpy.data.objects.new("Pivot_Rotation_Center", None) | |
pivot.location = left_vertices_white.mean(0) | |
bpy.context.collection.objects.link(pivot) | |
camera_object.parent = pivot | |
# Keyframe the rotation of the empty (animate around the Z axis) | |
# pivot.rotation_euler = (0, math.radians(30), math.radians(60)) # Start rotation | |
# pivot.keyframe_insert(data_path="rotation_euler", frame=start_frame) | |
# pivot.rotation_euler = (0, math.radians(-30), math.radians(-60)) # End rotation (360 degrees) | |
# pivot.keyframe_insert(data_path="rotation_euler", frame=end_frame // 2) | |
# pivot.rotation_euler = (0, math.radians(30), math.radians(60)) # End rotation (360 degrees) | |
# pivot.keyframe_insert(data_path="rotation_euler", frame=end_frame) | |
for i in range(end_frame): | |
# Keyframe the rotation of the empty (animate around the Z axis) | |
pivot.rotation_euler = ( | |
0, | |
np.cos(2 * np.pi * i / end_frame)**3 * math.radians(4), | |
np.sin(4 * np.pi * i / end_frame) * math.radians(2) | |
) | |
pivot.keyframe_insert(data_path="rotation_euler", frame=i + 1) | |
# Set the camera as the active camera for the scene | |
bpy.context.scene.camera = camera_object | |
# add multiple light sources | |
light_count = 4 | |
for i in range(light_count): | |
# Add a new sun light to the scene | |
light_data = bpy.data.lights.new(name=f"SunLight_{i}", type='SUN') | |
light_object = bpy.data.objects.new(name=f"SunLight_{i}", object_data=light_data) | |
bpy.context.collection.objects.link(light_object) | |
# Position the light in the scene | |
light_object.rotation_euler = (np.pi / 2, 0, (np.pi * 2 * i / light_count)) | |
# Set the strength of the light | |
light_data.energy = 1.0 # You can adjust this value to make the scene brighter or dimmer | |
# Set the world background color | |
# bpy.context.scene.world.use_nodes = True | |
# bg = bpy.context.scene.world.node_tree.nodes.get("Background") | |
# bg.inputs[0].default_value = (0, 0, 0, 1) # Change to desired RGB (1,1,1) for white ambient light | |
# bg.inputs[1].default_value = 0.5 # Strength of the ambient light | |
# HDRI background | |
bpy.context.scene.world.use_nodes = True | |
node_tree = bpy.context.scene.world.node_tree | |
# Clear any existing nodes in the world | |
nodes = node_tree.nodes | |
nodes.clear() | |
# Add Background node and set it as the output | |
background_node = nodes.new(type="ShaderNodeBackground") | |
# Add Environment Texture node (for the HDRI) | |
env_texture_node = nodes.new(type="ShaderNodeTexEnvironment") | |
# Load your HDRI image here (replace with your file path) | |
hdr_image_path = "/mnt/local_storage/Research/Codes/fMRI/DataStore/Templates/HDRI/evening_road_01_puresky_8k.hdr" | |
env_texture_node.image = bpy.data.images.load(hdr_image_path) | |
# Add World Output node | |
world_output_node = nodes.new(type="ShaderNodeOutputWorld") | |
# Connect the Environment Texture node to the Background node | |
node_tree.links.new(env_texture_node.outputs["Color"], background_node.inputs["Color"]) | |
# Connect the Background node to the World Output node | |
node_tree.links.new(background_node.outputs["Background"], world_output_node.inputs["Surface"]) | |
# Optional: Adjust the strength of the HDRI light | |
background_node.inputs["Strength"].default_value = 1.5 # You can change this value | |
# Set render resolution (optional) | |
bpy.context.scene.render.resolution_x = 1920 # Width in pixels | |
bpy.context.scene.render.resolution_y = 1080 # Height in pixels | |
bpy.context.scene.render.resolution_percentage = 100 # Scale | |
# render a single frame and store as image | |
# # Set render output file path (adjust path as needed) | |
# bpy.context.scene.render.filepath = "/mnt/local_storage/Research/Codes/fMRI/DataStore/tmp/blender/rendered_image.png" | |
# # Set render engine to 'CYCLES' or 'BLENDER_EEVEE' (depends on your need) | |
# # bpy.context.scene.render.engine = 'CYCLES' # You can switch to 'BLENDER_EEVEE' if you want real-time rendering | |
# bpy.context.scene.render.engine = 'BLENDER_EEVEE' # You can switch to 'BLENDER_EEVEE' if you want real-time rendering | |
# # Trigger the render and save the image | |
# bpy.ops.render.render(write_still=True) | |
# render animation and store as movie | |
# Set render output file path (adjust path as needed) | |
bpy.context.scene.render.filepath = "/mnt/local_storage/Research/Codes/fMRI/DataStore/tmp/blender/rendered_movie.mp4" | |
# Set rendering options for video output | |
bpy.context.scene.render.image_settings.file_format = 'FFMPEG' # Output format | |
bpy.context.scene.render.ffmpeg.format = 'MPEG4' | |
bpy.context.scene.render.ffmpeg.codec = 'H264' | |
bpy.context.scene.render.ffmpeg.constant_rate_factor = 'HIGH' | |
# Set render engine to 'CYCLES' or 'BLENDER_EEVEE' (depends on your need) | |
# bpy.context.scene.render.engine = 'CYCLES' # You can switch to 'BLENDER_EEVEE' if you want real-time rendering | |
bpy.context.scene.render.engine = 'BLENDER_EEVEE' # You can switch to 'BLENDER_EEVEE' if you want real-time rendering | |
# bpy.ops.screen.animation_play() | |
# Render the animation | |
# bpy.ops.render.render(animation=True) | |
# Finally save the project | |
# Specify the file path where you want to save the Blender project | |
file_path = "/mnt/local_storage/Research/Codes/fMRI/DataStore/tmp/blender//project.blend" | |
# Save the current Blender file | |
bpy.ops.wm.save_as_mainfile(filepath=file_path) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Check out this video.
rendered_movie.mp4