Skip to content

Instantly share code, notes, and snippets.

@JupyterJones
Created July 2, 2025 11:21
Show Gist options
  • Save JupyterJones/5baeab1ccb25e75e7f3d1af07a805e7c to your computer and use it in GitHub Desktop.
Save JupyterJones/5baeab1ccb25e75e7f3d1af07a805e7c to your computer and use it in GitHub Desktop.
Flask image morphing with music

🎭 Arcadian Face Morpher & Video Studio

A Flask-based multimedia tool powered by MoviePy, OpenCV, and Dlib for morphing faces, generating AI-enhanced video sequences, adding mood-based background music, and concatenating video clips into artistic content.

💡 Features

  • 🌀 Face Morphing: Upload two facial images and generate a smooth morph transition.
  • 🎶 Musical Morphs: Select a background music track to sync with the morph animation.
  • 🖼 Add Video Frame Overlay: Automatically embeds a glowing border to the final morph.
  • 🎞 Video Concatenation: Combine 4 separate video files into a single clip with optional music.
  • 🔊 Mood-Based Music Player: Toggle between "Party" and "Sad" moods with a built-in preview player.
  • 🎥 Inline Video Preview: Instantly preview your most recently processed videos on the page.

📁 Folder Structure

import cv2
import numpy as np
import dlib
import scipy.spatial
import os
import requests
from flask import Flask, request, render_template, send_from_directory, flash, redirect, url_for, send_file, render_template_string
from werkzeug.utils import secure_filename
import bz2
from moviepy.editor import VideoFileClip, concatenate_videoclips, AudioFileClip, ImageClip, CompositeVideoClip
from uuid import uuid4
import uuid
from icecream import ic
# --- Configuration ---
UPLOAD_FOLDER = 'static/uploads'
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'mp4'}
DLIB_MODEL_FILE = "shape_predictor_68_face_landmarks.dat"
DLIB_MODEL_URL = "http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2"
MUSIC_FOLDER = os.path.join('static', 'music')
MUSIC_SAD_FOLDER = os.path.join('static', 'music_sad')
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.secret_key = 'supersecretkey'
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
# --- Core Morphing Logic ---
def download_dlib_model():
"""Downloads and extracts the dlib facial landmark model if not present."""
if os.path.exists(DLIB_MODEL_FILE):
print("dlib model already exists.")
return True
print(f"Downloading {DLIB_MODEL_URL}...")
try:
r = requests.get(DLIB_MODEL_URL, stream=True)
r.raise_for_status()
decompressor = bz2.BZ2Decompressor()
with open(DLIB_MODEL_FILE, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(decompressor.decompress(chunk))
print("Model downloaded and extracted successfully.")
return True
except Exception as e:
print(f"Failed to download or extract the model: {e}")
return False
download_dlib_model()
detector = dlib.get_frontal_face_detector()
try:
predictor = dlib.shape_predictor(DLIB_MODEL_FILE)
except RuntimeError as e:
print(f"Error loading dlib predictor: {e}")
predictor = None
HTML='''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
<title>✨ Arcadian Morph & Media Studio</title>
<style>
body { font-family: 'Segoe UI', sans-serif; background-color: #101820; color: #f0f0f0; margin: 0; padding: 2rem; }
@keyframes pulse { 0% { box-shadow: 0 0 20px rgba(0,255,255,0.5); } 50% { box-shadow: 0 0 40px rgba(0,255,255,0.8); } 100% { box-shadow: 0 0 20px rgba(0,255,255,0.5); } }
.pcontainer { background: #1c1f2b; padding: 2rem; margin: 2rem auto; max-width: 860px; border-radius: 10px; animation: pulse 1.5s ease-in-out infinite; }
.pcontainer h2 { color: #00ffff; text-shadow: 0 0 5px #0ff, 0 0 10px #0ff, 0 0 20px #0ff; animation: flicker 3s infinite alternate; }
@keyframes flicker { 0% { opacity: 1; } 40% { opacity: 0.8; } 41% { opacity: 1; } 42% { opacity: 0.6; } 43% { opacity: 1; } 70% { opacity: 0.9; } 100% { opacity: 1; } }
.container { background: #1c1f2b; padding: 2rem; margin: 2rem auto; max-width: 860px; border-radius: 10px; box-shadow: 0 0 20px rgba(0,255,255,0.5); }
.pcontainer:hover { box-shadow: 0 0 40px rgba(0, 255, 255, 0.9), inset 0 0 20px rgba(0, 255, 255, 0.3); transition: box-shadow 0.3s ease-in-out; }
h1, h2 { text-align: center; color: #00ffe1; margin-bottom: 1rem; }
form { margin-top: 1rem; }
.upload-section { display: flex; flex-wrap: wrap; justify-content: center; gap: 2rem; margin: 1rem 0; }
.upload-area { border: 2px dashed #888; padding: 1rem; border-radius: 5px; text-align: center; width: 220px; background: #20232e; }
input[type="file"], input[type="range"], select { margin-top: 0.5rem; width: 100%; }
.options { text-align: center; margin: 1rem 0; }
button { display: block; background-color: #00bfff; color: white; border: none; padding: 0.75rem 1.5rem; border-radius: 5px; font-size: 1rem; cursor: pointer; margin: 0 auto; transition: background-color 0.3s; }
button:hover { background-color: #0088cc; }
.status { margin-top: 1rem; text-align: center; font-style: italic; color: #aaa; }
video { display: block; margin: 1rem auto; max-width: 100%; border-radius: 10px; box-shadow: 0 0 30px rgba(0, 255, 255, 0.2); }
label { display: block; margin-top: 0.5rem; }
select { padding: 0.4rem; background: #222; color: #fff; border: 1px solid #555; border-radius: 5px; }
audio { display: none; }
/* Style for flashed messages */
.flash-message { padding: 1rem; margin: 1rem auto; background: #334; border-left: 5px solid #00bfff; color: #f0f0f0; max-width: 860px; border-radius: 5px; }
</style>
</head>
<body>
<!-- This block will show messages from Flask's flash() -->
{% with messages = get_flashed_messages() %}
{% if messages %}
{% for message in messages %}
<div class="flash-message">{{ message }}</div>
{% endfor %}
{% endif %}
{% endwith %}
<div class="pcontainer">
<form method="POST" action="/music_morph" enctype="multipart/form-data">
<input type="file" name="img1" required>
<input type="file" name="img2" required>
<label for="music">Choose Background Music 🎵</label>
<div style="margin-bottom: 1rem;"><select id="formMoodSelector" style="font-size: 1rem; padding: 0.5rem;"><option value="party">🎉 Party Vibes</option><option value="sad">💔 Broken Heart</option></select></div>
<div><select id="formTrackSelector" name="music" style="font-size: 1rem; padding: 0.5rem;"></select></div>
<button type="submit">Create Musical Morph</button>
</form>
</div>
<div class="pcontainer">
<div id="esperanza-player" style="margin-top: 3rem; text-align: center;"><h2 style="color: #333;">🎵 Esperanza Mood Player</h2>
<div style="margin-bottom: 1rem;"><select id="moodSelector" style="font-size: 1rem; padding: 0.5rem;"><option value="party">🎉 Party Vibes</option><option value="sad">💔 Broken Heart</option></select></div>
<div><select id="trackSelector" style="font-size: 1rem; padding: 0.5rem;"></select></div>
<div style="margin-top: 1rem;"><button id="playPauseBtn" style="padding: 0.5rem 1rem;">▶️ Play</button><input type="range" id="volumeSlider" min="0" max="1" step="0.01" value="0.3" style="width:150px; margin-left: 1rem;"></div>
<audio id="audioPlayer" loop></audio>
</div>
</div>
<div class="container">
<h1>🌀 Face Morpher</h1>
<form id="morph-form" action="/morph" method="post" enctype="multipart/form-data">
<div class="upload-section">
<div class="upload-area"><label for="img1">Start Image</label><input type="file" id="img1" name="img1" accept="image/*" required /></div>
<div class="upload-area"><label for="img2">End Image</label><input type="file" id="img2" name="img2" accept="image/*" required /></div>
</div>
<div class="options">
<label for="frames">Frames (speed/smoothness):</label><input type="range" id="frames" name="frames" min="15" max="150" value="150" /><span id="frames-value">150</span>
</div>
<button type="submit">✨ Generate Morph</button>
<div class="status" id="status-morph">Select two face images to morph.</div>
</form>
</div>
<script>
const moodSelector = document.getElementById('moodSelector'), trackSelector = document.getElementById('trackSelector'), playBtn = document.getElementById('playPauseBtn'), volumeSlider = document.getElementById('volumeSlider'), audio = document.getElementById('audioPlayer'), formMoodSelector = document.getElementById('formMoodSelector'), formTrackSelector = document.getElementById('formTrackSelector');
let currentMood = 'party', tracks = { party: [], sad: [] }, isPlaying = false;
fetch('/music-list').then(res => res.json()).then(data => { tracks = data; updateTrackList(currentMood); updateFormTrackList(formMoodSelector.value); });
function updateFormTrackList(mood) { formTrackSelector.innerHTML = ''; tracks[mood].forEach(file => { const opt = document.createElement('option'); opt.value = file; opt.textContent = file; formTrackSelector.appendChild(opt); }); }
formMoodSelector.addEventListener('change', () => { updateFormTrackList(formMoodSelector.value); });
function updateTrackList(mood) { trackSelector.innerHTML = ''; tracks[mood].forEach(file => { const opt = document.createElement('option'); opt.value = file; opt.textContent = file; trackSelector.appendChild(opt); }); setTrack(mood, trackSelector.value); }
function setTrack(mood, filename) { const path = mood === 'party' ? `/static/music/${filename}` : `/static/music_sad/${filename}`; audio.src = path; if (isPlaying) audio.play(); }
moodSelector.addEventListener('change', () => { currentMood = moodSelector.value; updateTrackList(currentMood); });
trackSelector.addEventListener('change', () => { setTrack(currentMood, trackSelector.value); });
playBtn.addEventListener('click', () => { if (isPlaying) { audio.pause(); playBtn.textContent = '▶️ Play'; } else { audio.play(); playBtn.textContent = '⏸️ Pause'; } isPlaying = !isPlaying; });
volumeSlider.addEventListener('input', () => { audio.volume = volumeSlider.value; });
audio.volume = volumeSlider.value;
</script>
<div class="container">
<h1>🎞 Video Concatenator</h1>
<form id="concat-form" action="/concatenate" method="post" enctype="multipart/form-data">
<div class="upload-section">
<div class="upload-area"><label for="video1">Video 1</label><input type="file" id="video1" name="video1" accept="video/*" required /><video id="preview1" controls width="150" style="display:none;"></video></div>
<div class="upload-area"><label for="video2">Video 2</label><input type="file" id="video2" name="video2" accept="video/*" required /><video id="preview2" controls width="150" style="display:none;"></video></div>
</div>
<div class="upload-section">
<div class="upload-area"><label for="video3">Video 3</label><input type="file" id="video3" name="video3" accept="video/*" required /><video id="preview3" controls width="150" style="display:none;"></video></div>
<div class="upload-area"><label for="video4">Video 4</label><input type="file" id="video4" name="video4" accept="video/*" required /><video id="preview4" controls width="150" style="display:none;"></video></div>
</div>
<button type="submit">🎬 Concatenate Videos</button>
<div class="status" id="status-concat">Combine four videos into one.</div>
</form>
</div>
<script>
function setupPreview(inputId, previewId) { const input = document.getElementById(inputId), preview = document.getElementById(previewId); input.addEventListener('change', function () { const file = this.files[0]; if (file) { const url = URL.createObjectURL(file); preview.src = url; preview.style.display = 'block'; } else { preview.src = ''; preview.style.display = 'none'; } }); }
setupPreview('video1', 'preview1'); setupPreview('video2', 'preview2'); setupPreview('video3', 'preview3'); setupPreview('video4', 'preview4');
</script>
<div class="container">
<h2>🎧 Preview Uploaded Video</h2>
<label for="videoSelect">Select video:</label>
<select id="videoSelect"><option selected disabled>-- Choose Video --</option>{% for video in videos %}<option value="{{ video }}">{{ video }}</option>{% endfor %}</select>
<video id="previewPlayer" controls loop muted style="display:none;"><source id="videoSource" src="" type="video/mp4">Your browser does not support the video tag.</video>
</div>
<div class="container">
<video controls loop muted style="display:none;"><source id="videoSource" src="final_with_frame_path" type="video/mp4">Your browser does not support the video tag.</video>
</div>
<script>
const framesSlider = document.getElementById('frames'), framesValue = document.getElementById('frames-value');
framesSlider.addEventListener('input', () => { framesValue.textContent = framesSlider.value; });
document.getElementById('morph-form').addEventListener('submit', () => { document.getElementById('status-morph').textContent = 'Morphing in progress...'; });
document.getElementById('concat-form').addEventListener('submit', () => { document.getElementById('status-concat').textContent = 'Concatenation in progress...'; });
document.getElementById('videoSelect').addEventListener('change', function () {
const filename = this.value, videoPath = `static/uploads/${filename}`, source = document.getElementById('videoSource'), player = document.getElementById('previewPlayer');
source.src = videoPath; player.load(); player.style.display = 'block';
});
</script>
<!-- *** NEW SCRIPT BLOCK FOR AUTO-PLAYING THE CONCATENATED VIDEO *** -->
<script>
window.addEventListener('DOMContentLoaded', () => {
// Get the new video's filename from the URL (e.g., ?new_video=concatenated_123.mp4)
const urlParams = new URLSearchParams(window.location.search);
const newVideoFile = urlParams.get('new_video');
// If a new video filename is found in the URL
if (newVideoFile) {
const videoSelect = document.getElementById('videoSelect');
const player = document.getElementById('previewPlayer');
const source = document.getElementById('videoSource');
// Set the dropdown to show the new video
videoSelect.value = newVideoFile;
// Update the player source and display it
source.src = `/static/uploads/${newVideoFile}`;
player.load();
player.style.display = 'block';
// Unmute, play, and scroll to the player
player.muted = false;
player.play();
player.scrollIntoView({ behavior: 'smooth', block: 'center' });
}
});
</script>
</body>
</html>
'''
def get_landmarks(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 1)
if len(rects) == 0: return None
shape = predictor(gray, rects[0])
return np.array([[p.x, p.y] for p in shape.parts()])
def apply_affine_transform(src, src_tri, dst_tri, size):
warp_mat = cv2.getAffineTransform(np.float32(src_tri), np.float32(dst_tri))
return cv2.warpAffine(src, warp_mat, (size[0], size[1]), None, flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT_101)
def morph_triangle(img1, img2, t1, t2, t, alpha):
r1, r2, r = cv2.boundingRect(np.float32([t1])), cv2.boundingRect(np.float32([t2])), cv2.boundingRect(np.float32([t]))
t1_rect = [(t1[i][0] - r1[0], t1[i][1] - r1[1]) for i in range(3)]
t2_rect = [(t2[i][0] - r2[0], t2[i][1] - r2[1]) for i in range(3)]
t_rect = [(t[i][0] - r[0], t[i][1] - r[1]) for i in range(3)]
mask = np.zeros((r[3], r[2], 3), dtype=np.float32)
cv2.fillConvexPoly(mask, np.int32(t_rect), (1.0, 1.0, 1.0), 16, 0)
img1_rect, img2_rect = img1[r1[1]:r1[1] + r1[3], r1[0]:r1[0] + r1[2]], img2[r2[1]:r2[1] + r2[3], r2[0]:r2[0] + r2[2]]
size = (r[2], r[3])
warp_img1 = apply_affine_transform(img1_rect, t1_rect, t_rect, size)
warp_img2 = apply_affine_transform(img2_rect, t2_rect, t_rect, size)
img_rect = (1.0 - alpha) * warp_img1 + alpha * warp_img2
return img_rect, mask, r
def generate_morphed_frame(img1, img2, landmarks1, landmarks2, tri_indices, alpha):
morphed_landmarks = (1 - alpha) * landmarks1 + alpha * landmarks2
morphed_img = np.zeros(img1.shape, dtype=img1.dtype)
for tri in tri_indices:
t1, t2, t = [landmarks1[i] for i in tri], [landmarks2[i] for i in tri], [morphed_landmarks[i] for i in tri]
img_rect, mask, r = morph_triangle(img1, img2, t1, t2, t, alpha)
morphed_img[r[1]:r[1]+r[3], r[0]:r[0]+r[2]] = morphed_img[r[1]:r[1]+r[3], r[0]:r[0]+r[2]] * (1 - mask) + img_rect * mask
return np.uint8(morphed_img)
# --- Flask Routes ---
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def index():
video_files = [f for f in os.listdir(UPLOAD_FOLDER) if f.lower().endswith('.mp4')]
video_files.sort(key=lambda x: os.path.getmtime(os.path.join(UPLOAD_FOLDER, x)), reverse=True)
return render_template_string(HTML, videos=video_files)
@app.route('/morph', methods=['POST'])
def morph():
# This function is kept exactly as it was in your original code.
if 'img1' not in request.files or 'img2' not in request.files:
flash('No file part'); return redirect(request.url)
file1, file2 = request.files['img1'], request.files['img2']
if file1.filename == '' or file2.filename == '':
flash('No selected file'); return redirect(request.url)
if not (file1 and allowed_file(file1.filename) and file2 and allowed_file(file2.filename)):
flash('Invalid file type'); return redirect(request.url)
if not predictor:
flash('Dlib predictor model is not loaded. Cannot proceed.'); return redirect(url_for('index'))
filename1, filename2 = secure_filename(file1.filename), secure_filename(file2.filename)
path1, path2 = os.path.join(app.config['UPLOAD_FOLDER'], filename1), os.path.join(app.config['UPLOAD_FOLDER'], filename2)
file1.save(path1); file2.save(path2)
num_frames = int(request.form.get('frames', 60))
unique_id = uuid4().hex[:8]
raw_output = os.path.join(app.config['UPLOAD_FOLDER'], f"morph_raw_{unique_id}_.mp4")
final_name = f"morph_output_{unique_id}.mp4"
final_output = os.path.join(app.config['UPLOAD_FOLDER'], final_name)
try:
img1, img2 = cv2.imread(path1), cv2.imread(path2)
if img1 is None or img2 is None:
flash('Could not read one of the images.'); return redirect(url_for('index'))
landmarks1, landmarks2 = get_landmarks(img1), get_landmarks(img2)
if landmarks1 is None:
flash('No face detected in the start image.'); return redirect(url_for('index'))
if landmarks2 is None:
flash('No face detected in the end image.'); return redirect(url_for('index'))
h, w, _ = img1.shape
img2 = cv2.resize(img2, (w, h))
boundary_pts = np.array([(0,0), (w//2,0), (w-1,0), (w-1,h//2), (w-1,h-1), (w//2,h-1), (0,h-1), (0,h//2)])
landmarks1 = np.concatenate((landmarks1, boundary_pts)); landmarks2 = np.concatenate((landmarks2, boundary_pts))
delaunay = scipy.spatial.Delaunay((landmarks1 + landmarks2) / 2)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_out = cv2.VideoWriter(raw_output, fourcc, 30, (w, h))
for i in range(num_frames):
video_out.write(generate_morphed_frame(img1, img2, landmarks1, landmarks2, delaunay.simplices, i / (num_frames - 1)))
video_out.release()
video = VideoFileClip(raw_output)
music_filename = request.form.get('music') or 'long_text_40min.mp3'
music_path = os.path.join('static', 'music', music_filename)
if os.path.exists(music_path):
audio = AudioFileClip(music_path).subclip(0, video.duration)
video = video.set_audio(audio)
video.write_videofile(final_output, codec='libx264', audio_codec='aac')
os.remove(raw_output)
return send_from_directory(app.config['UPLOAD_FOLDER'], final_name, as_attachment=False)
except Exception as e:
flash(f'An error occurred: {e}'); return redirect(url_for('index'))
@app.route('/concatenate', methods=['POST'])
def concatenate():
files = [request.files.get(f'video{i}') for i in range(1, 5)]
if not all(files):
flash('Missing one or more video files'); return redirect(request.url)
for file in files:
if file.filename == '' or not allowed_file(file.filename):
flash('Invalid file type or missing file'); return redirect(request.url)
paths = []
# Use unique names for uploaded segments to avoid conflicts
for file in files:
filename = secure_filename(f"{uuid4().hex[:8]}_{file.filename}")
path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(path)
paths.append(path)
unique_id = uuid4().hex[:8]
raw_output = os.path.join(app.config['UPLOAD_FOLDER'], f"concat_raw_{unique_id}.mp4")
final_name = f"concatenated_output_{unique_id}.mp4"
final_output = os.path.join(app.config['UPLOAD_FOLDER'], final_name)
try:
clips = [VideoFileClip(p) for p in paths]
final_clip = concatenate_videoclips(clips, method="compose")
# This part for adding music is from your original code
music_filename = request.form.get('music') or 'long_text_40min.mp3'
music_path = os.path.join('static', 'music', music_filename)
if os.path.exists(music_path):
audio = AudioFileClip(music_path).subclip(0, final_clip.duration)
final_clip = final_clip.set_audio(audio)
final_clip.write_videofile(
final_output,
codec='libx264',
audio_codec='aac',
temp_audiofile='temp-audio.m4a',
remove_temp=True
)
# *** THE ONLY CHANGE IS HERE ***
flash(f'Successfully created video: {final_name}')
# Instead of serving the file, we redirect to the homepage
# and pass the new video's name in the URL.
return redirect(url_for('index', new_video=final_name))
except Exception as e:
flash(f'An error occurred during concatenation: {e}'); return redirect(url_for('index'))
finally:
# Clean up the temporary uploaded video segments
for p in paths:
if os.path.exists(p):
os.remove(p)
# The rest of your original code remains untouched.
@app.route('/music-list')
def music_list():
party_dir = os.path.join('static', 'music')
sad_dir = os.path.join('static', 'music_sad')
party = [f for f in os.listdir(party_dir) if f.lower().endswith('.mp3')]
sad = [f for f in os.listdir(sad_dir) if f.lower().endswith('.mp3')]
return {'party': sorted(party), 'sad': sorted(sad)}
def face_morph(img1_path, img2_path, output_path, frames=60):
try:
img1, img2 = cv2.imread(img1_path), cv2.imread(img2_path)
if img1 is None or img2 is None:
ic("❌ Could not load one or both input images."); raise ValueError("Image load failure")
landmarks1, landmarks2 = get_landmarks(img1), get_landmarks(img2)
if landmarks1 is None:
ic("❌ No face detected in img1."); raise ValueError("No face in img1")
if landmarks2 is None:
ic("❌ No face detected in img2."); raise ValueError("No face in img2")
h, w, _ = img1.shape
img2 = cv2.resize(img2, (w, h))
boundary_pts = np.array([(0, 0), (w // 2, 0), (w - 1, 0), (w - 1, h // 2), (w - 1, h - 1), (w // 2, h - 1), (0, h - 1), (0, h // 2)])
landmarks1 = np.concatenate((landmarks1, boundary_pts)); landmarks2 = np.concatenate((landmarks2, boundary_pts))
delaunay = scipy.spatial.Delaunay((landmarks1 + landmarks2) / 2)
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_out = cv2.VideoWriter(output_path, fourcc, 30, (w, h))
for i in range(frames):
video_out.write(generate_morphed_frame(img1, img2, landmarks1, landmarks2, delaunay.simplices, i / (frames - 1)))
video_out.release()
ic("✅ Morph video created:", output_path)
except Exception as e:
ic("❌ face_morph error:", e); raise
@app.route('/music_morph', methods=['POST'])
def music_morph():
img1, img2 = request.files.get('img1'), request.files.get('img2')
if not img1 or not img2: return "Both images are required", 400
uid = str(uuid.uuid4())[:8]
img1_filename = secure_filename(f"{uid}_img1.png")
img2_filename = secure_filename(f"{uid}_img2.png")
img1_path, img2_path = os.path.join(UPLOAD_FOLDER, img1_filename), os.path.join(UPLOAD_FOLDER, img2_filename)
img1.save(img1_path); img2.save(img2_path)
ic(img1_path, img2_path, request.form.get('music', ''))
raw_video_path = os.path.join(UPLOAD_FOLDER, f"{uid}_raw_morph.mp4")
final_video_path = os.path.join(UPLOAD_FOLDER, f"{uid}_final_morph.mp4")
try:
face_morph(img1_path, img2_path, raw_video_path, frames=int(request.form.get('frames', 350)))
ic("✅ Raw morph video created:", raw_video_path)
video_clip = VideoFileClip(raw_video_path)
music_choice = request.form.get('music', '')
if music_choice:
music_path = os.path.join(MUSIC_FOLDER, music_choice)
if not os.path.exists(music_path): music_path = os.path.join(MUSIC_SAD_FOLDER, music_choice)
if os.path.exists(music_path):
ic("Adding music:", music_path)
audio_clip = AudioFileClip(music_path).set_duration(video_clip.duration)
video_clip = video_clip.set_audio(audio_clip)
else:
ic("⚠️ Music file not found, proceeding without audio.")
video_clip.write_videofile(final_video_path, codec='libx264', audio_codec='aac')
ic("✅ Final video created:", final_video_path)
video_clip.close()
final_clip = VideoFileClip(final_video_path)
frame_image = (ImageClip("static/assets/512x512-frame.png").set_duration(final_clip.duration).resize(height=final_clip.h).set_pos(("center", "center")))
final_with_frame = CompositeVideoClip([final_clip, frame_image])
final_with_frame_path = os.path.join(UPLOAD_FOLDER, f"{uid}_musical_morph.mp4")
final_with_frame.write_videofile(final_with_frame_path, codec='libx264', audio_codec='aac')
video_files = [f for f in os.listdir(UPLOAD_FOLDER) if f.lower().endswith('.mp4')]
video_files.sort(key=lambda x: os.path.getctime(os.path.join(UPLOAD_FOLDER, x)), reverse=True)
return render_template_string(HTML, videos=video_files,final_with_frame_path=final_with_frame_path)
except Exception as e:
ic("❌ Failed during video processing:", e)
if os.path.exists(raw_video_path): return send_file(raw_video_path, as_attachment=False)
return "Failed to process video", 500
finally:
if os.path.exists(raw_video_path): os.remove(raw_video_path)
if __name__ == '__main__':
app.run(debug=True,host="0.0.0.0",port=5100)
Flask==3.0.3
Werkzeug==3.0.2
moviepy==1.0.3
numpy==1.26.4
opencv-python==4.9.0.80
scipy==1.13.1
dlib==19.24.2
requests==2.31.0
icecream==2.1.3
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment