-
-
Save pmdevita/3850c1d95b682754df3d0bf9ea194aee to your computer and use it in GitHub Desktop.
# FFMPEG example of Blocking Mode Audio I/O https://people.csail.mit.edu/hubert/pyaudio/docs/ | |
"""PyAudio Example: Play a wave file.""" | |
import pyaudio | |
import wave | |
import sys | |
import subprocess | |
CHUNK = 1024 | |
if len(sys.argv) < 2: | |
print("Plays an audio file.\n\nUsage: %s filename.wav" % sys.argv[0]) | |
sys.exit(-1) | |
song = subprocess.Popen(["ffmpeg.exe", "-i", sys.argv[1], "-loglevel", "panic", "-vn", "-f", "s16le", "pipe:1"], | |
stdout=subprocess.PIPE) | |
# instantiate PyAudio (1) | |
p = pyaudio.PyAudio() | |
# open stream (2) | |
stream = p.open(format=pyaudio.paInt16, | |
channels=2, # use ffprobe to get this from the file beforehand | |
rate=44100, # use ffprobe to get this from the file beforehand | |
output=True) | |
# read data | |
data = song.stdout.read(CHUNK) | |
# play stream (3) | |
while len(data) > 0: | |
stream.write(data) | |
data = song.stdout.read(CHUNK) | |
# stop stream (4) | |
stream.stop_stream() | |
stream.close() | |
# close PyAudio (5) | |
p.terminate() |
Hello,
I want to do the exact opposite, process audio in PyAudio and write to an ffmpeg stream. Is that possible?
I think you should be able to just feed it through stdin, you'll need to specify the incoming codec type with the -f
flag before your -i
flag, and your -i
flag should be -i pipe:
to tell FFmpeg to read from stdin. Here's a general stackoverflow article on that https://stackoverflow.com/questions/45899585/pipe-input-in-to-ffmpeg-stdin I think PyAudio will probably output a raw PCM stream but I don't know exactly what kind, s16le
might be a good first try
This should work on windows not tested on Unix yet.
channels=2
audiofps=44100
chunk=1024*2
sample_format=pyaudio.paInt16
audio=pyaudio.PyAudio()
stream = audio.open(format=sample_format,
channels=channels,
rate=audiofps,
frames_per_buffer=chunk,
input=True)
def audiorun():
recordcommand=[ffmpeg,
'-f', 's16le',
'-c:a' ,'pcm_s16le',
"-ac",str(channels),
"-i","pipe:0",
'-vn',
#"","",
# 'b:a',"22k",
# '-strict','2',
# "-ac",str(channels),
"-sample_size",str(chunk),
"-sample_rate","44100",
# '-ar','44100',
# '-c:a' ,'pcm_s16le',
# '-f','s16le',
# '-acodec','acc',
#"pipe:1"]
#"pipe:1",
"-acodec","libmp3lame",
"-f","mp3",
"game.mp3"
]
B=time.time()
try:
proc=sp.Popen(recordcommand,stderr=sp.PIPE,stdin=sp.PIPE,stdout=sp.PIPE)
except Exception as E:
print("Error",proc.stderr.read())
raise E
while True:
try:
proc.stdin.write(stream.read(chunk))
except:
break
print("Audio running")
if int(time.time()-B)==20:
print("Breaking")
break
cap.release()
proc.stdin.close()
proc.stderr.close()
`proc.stdout.close()```
I found using a simple one-liner os.popen
call to ffplay
which is bundled with ffmpeg
works well.
I found using a simple one-liner
os.popen
call toffplay
which is bundled withffmpeg
works well.
Are there any advantages of using pyaudio over just that? Ffplay sure seems easier, but I'm not sure if it is the better solution
The
wave
library is only able to read .wav files. By using ffmpeg, you can decode and play as many audio types as it can.