Skip to content

Instantly share code, notes, and snippets.

@tatesuke
Created April 10, 2019 08:28
Show Gist options
  • Save tatesuke/9796babdbb855d90c8d1a47b6ed200b2 to your computer and use it in GitHub Desktop.
Save tatesuke/9796babdbb855d90c8d1a47b6ed200b2 to your computer and use it in GitHub Desktop.
pythonde録画(音声つき)
import pyaudio #録音機能を使うためのライブラリ
import wave #wavファイルを扱うためのライブラリ
import datetime
import numpy as np
import cv2
import datetime
import concurrent.futures
import ffmpeg
from pprint import pprint
def recordSound(outputFileName, recordSeconds=10):
WAVE_OUTPUT_FILENAME = outputFileName #音声を保存するファイル名
iDeviceIndex = 0 #録音デバイスのインデックス番号
#基本情報の設定
FORMAT = pyaudio.paInt16 #音声のフォーマット
CHANNELS = 1 #モノラル
RATE = 44100 #サンプルレート
CHUNK = 2**11 #データ点数
audio = pyaudio.PyAudio() #pyaudio.PyAudio()
startTime = datetime.datetime.now()
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
input_device_index = iDeviceIndex, #録音デバイスのインデックス番号
frames_per_buffer=CHUNK)
#--------------録音開始---------------
print ("recording...")
frames = []
for i in range(0, int(RATE / CHUNK * recordSeconds)):
data = stream.read(CHUNK)
frames.append(data)
print ("finished recording")
#--------------録音終了---------------
stream.stop_stream()
stream.close()
audio.terminate()
waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(audio.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
waveFile.writeframes(b''.join(frames))
waveFile.close()
return startTime
def recordVideo(outputFileName, recordSeconds=10):
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
if ret==True:
frame = cv2.flip(frame,0)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(outputFileName ,fourcc, 30.0, (frame.shape[1],frame.shape[0]))
startDateTime = None
startTime = datetime.datetime.now()
previous = None
while(cap.isOpened()):
now = datetime.datetime.now()
if (now - startTime).total_seconds() >= recordSeconds:
break
if previous is None:
previous = now - datetime.timedelta(seconds=1/30)
td = now - previous
if td.total_seconds() < 1/30:
print("無視")
continue
for i in range(int(td.total_seconds() / (1/ 30)) - 1):
print("埋め合わせ")
out.write(frame)
ret, frame = cap.read()
startDateTime = datetime.datetime.now() if startDateTime is None else startDateTime
if ret==True:
frame = cv2.flip(frame,0)
out.write(frame)
previous = now
else:
raise Exception("hoge")
# Release everything if job is finished
cap.release()
out.release()
return startDateTime
def merge(soundFileName,soundStartTime, videoFileName, videoStartTime, length, outputFileName):
dt = (videoStartTime - soundStartTime).total_seconds()
# ffmpeg.input(soundFileName).output("temp.aac", ss=dt, t=length, **{"c:v":"aac"}).run()
# ffmpeg.input(videoFileName).output("temp.mp4", ss=0, t=length, **{"c:v":"h264"}).run()
ffmpeg.output(
ffmpeg.input(videoFileName, ss=0)["v"],
ffmpeg.input(soundFileName, ss=dt)["a"],
outputFileName, t=length).run()
# print(dt)
# pprint(ffmpeg.probe("temp.mp4"))
# pprint(ffmpeg.probe("temp.aac"))
# pprint(ffmpeg.probe("output.mp4"))
if __name__ == "__main__":
# print(ffmpeg.probe("output.mp4"))
# # print(ffmpeg.probe("temp.aac"))
# # print(ffmpeg.probe("temp.mp4"))
executor = concurrent.futures.ProcessPoolExecutor(max_workers=4)
future1 = executor.submit(recordSound, "sound.wav", 60)
future2 = executor.submit(recordVideo, "video.avi", 60)
soundStartTime = future1.result()
videoStartTime = future2.result()
executor.submit(merge, "sound.wav",soundStartTime, "video.avi", videoStartTime, 59, "output2.mp4").result()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment