Last active
November 14, 2018 04:23
-
-
Save nomissbowling/d85eba671473e479f9932e9477d368f5 to your computer and use it in GitHub Desktop.
AudioDat.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/local/bin/python | |
# -*- coding: utf-8 -*- | |
'''_test_pyaudio | |
''' | |
import sys, os | |
from matplotlib import pyplot as plt | |
from AudioDat import AudioDat | |
OUTF = 'c:/tmp/_A_.wav' | |
def test_pyaudio(): | |
notes = ( # note, velocity, ticks | |
( 60, 76, 48), ( 62, 89, 48), ( 64, 102, 48), ( 65, 114, 48), | |
( 67, 114, 96), ( 69, 127, 48), ( 65, 89, 48), | |
( 64, 64, 96), ( 62, 51, 96), ( 60, 38, 192)) | |
ad = AudioDat() | |
ad.play(notes, 120, .6) | |
ad.close() | |
fig = plt.figure() | |
ax = fig.add_subplot(111) | |
ad = AudioDat() | |
ad.mkwave(ad.a4n, 127, 384) | |
l = int(2.5 * ad.fsample / ad.a4f) # 0 to 5 pi | |
ax.plot(ad.t[:l], ad.dat[:l], 'r-') | |
# ax.set_xlim((-10, 20)) # -3 pi to 6 pi | |
ax.set_ylim((-32768, 32767)) # -32768 to 32767 bits=16 | |
ad.save(OUTF) | |
ad.close() | |
plt.show() | |
if __name__ == '__main__': | |
test_pyaudio() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/local/bin/python | |
# -*- coding: utf-8 -*- | |
'''AudioDat | |
a sin(2 pi f0 t) | |
print(pyaudio.paInt8) # 16 00010000 | |
print(pyaudio.paInt16) # 8 00001000 | |
print(pyaudio.paInt24) # 4 00000100 | |
print(pyaudio.paInt32) # 2 00000010 | |
print(pyaudio.paInt48) # no attribute | |
print(pyaudio.paInt64) # no attribute | |
samples / 2 pi f = 48000 / 440 = 109.0909... (20 = 2400) (2k = 24) (20k = 2.4) | |
ticks per quarter | |
1/1 = 480, 1/4 = 120, 1/16 = 30, 1/32 = 15 | |
1/1 = 384, 1/4 = 96, 1/16 = 24, 1/32 = 12, 1/64 = 6, 1/128 = 3 | |
note 0 - 127 (A4 = 69) | |
''' | |
import sys, os | |
import numpy as np | |
import struct | |
import pyaudio | |
import wave | |
LRCH = 1 | |
BITS = 16 | |
FSAMPLE = 48000 # 8000 | |
CHUNK = 4096 # 1024 # words | |
TPQ = 96 | |
TEMPO = 120 | |
VOL = 1.0 | |
A4F = 440 # 432 # tune | |
A4N = 69 | |
class AudioDat(object): | |
def __init__(self, lrch=LRCH, bits=BITS, fsample=FSAMPLE, | |
tpq=TPQ, a4f=A4F, a4n=A4N): | |
self.t, self.dat = np.array([]), np.array([]) | |
self.lrch, self.bits, self.fsample = lrch, bits, fsample | |
self.tpq, self.a4f, self.a4n = tpq, a4f, a4n | |
self.setparam(TEMPO, VOL) | |
self.pa = pyaudio.PyAudio() | |
self.strm = self.pa.open( | |
format=pyaudio.paInt16, # always bits=16 as pyaudio.paInt16 | |
channels=self.lrch, rate=self.fsample, output=True) | |
def close(self): | |
self.strm.close() | |
self.pa.terminate() | |
@staticmethod | |
def w2b(buf): | |
return struct.pack('h' * len(buf), *buf) # 'h' always bits=16 | |
def setparam(self, tempo=TEMPO, vol=VOL): | |
self.tempo, self.vol = tempo, vol | |
self.qps = 2 * self.tempo / self.tpq | |
self.freq = self.a4f * 2**((np.arange(128) - self.a4n) / 12) | |
def mkwave(self, note, velocity, ticks): | |
fbase = self.freq[note] | |
amp = velocity / 127 | |
sec = ticks / (self.tpq * self.qps) | |
t = 2 * np.pi * fbase * np.arange(sec * self.fsample) / self.fsample | |
# dat = np.sin(t) | |
# dat = np.cos(3 * t) - 2 * np.cos(2 * t) + np.cos(t) | |
dat = np.cos(7 * t) - np.cos(6 * t) + np.cos(2 * t) - np.cos(t) | |
min, max = np.abs(np.min(dat)), np.abs(np.max(dat)) | |
gain = self.vol * amp / (max if max > min else min) | |
self.dat = (gain * dat * 32767).astype(np.int16) # -32768 to 32767 bits=16 | |
self.t = t | |
def flush(self, chunk=CHUNK): | |
sp = 0 | |
while True: | |
buf = self.dat[sp:sp+chunk] | |
if len(buf) == 0: break | |
self.strm.write(self.w2b(buf)) # words | |
sp += chunk | |
def save(self, fn): | |
wf = wave.open(fn, 'w') | |
wf.setnchannels(self.lrch) | |
wf.setsampwidth(self.bits // 8) | |
wf.setframerate(self.fsample) | |
wf.writeframes(self.w2b(self.dat)) # words | |
wf.close() | |
def play(self, notes, tempo=TEMPO, vol=VOL): | |
self.setparam(tempo=tempo, vol=vol) | |
for note, velocity, ticks in notes: | |
self.mkwave(note, velocity, ticks) | |
self.flush() | |
if __name__ == '__main__': | |
pass |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment