|
|
|
@ -24,12 +24,22 @@ class Frame(object):
|
|
|
|
|
|
|
|
|
|
class ChunkBuffer(object):
|
|
|
|
|
def __init__(self,
|
|
|
|
|
window_n=7, # frame
|
|
|
|
|
shift_n=4, # frame
|
|
|
|
|
window_ms=20, # ms
|
|
|
|
|
shift_ms=10, # ms
|
|
|
|
|
window_n=7,
|
|
|
|
|
shift_n=4,
|
|
|
|
|
window_ms=20,
|
|
|
|
|
shift_ms=10,
|
|
|
|
|
sample_rate=16000,
|
|
|
|
|
sample_width=2):
|
|
|
|
|
"""audio sample data point buffer
|
|
|
|
|
|
|
|
|
|
Args:
|
|
|
|
|
window_n (int, optional): decode window frame length. Defaults to 7 frame.
|
|
|
|
|
shift_n (int, optional): decode shift frame length. Defaults to 4 frame.
|
|
|
|
|
window_ms (int, optional): frame length, ms. Defaults to 20 ms.
|
|
|
|
|
shift_ms (int, optional): shift length, ms. Defaults to 10 ms.
|
|
|
|
|
sample_rate (int, optional): audio sample rate. Defaults to 16000.
|
|
|
|
|
sample_width (int, optional): sample point bytes. Defaults to 2 bytes.
|
|
|
|
|
"""
|
|
|
|
|
self.window_n = window_n
|
|
|
|
|
self.shift_n = shift_n
|
|
|
|
|
self.window_ms = window_ms
|
|
|
|
@ -38,11 +48,14 @@ class ChunkBuffer(object):
|
|
|
|
|
self.sample_width = sample_width # int16 = 2; float32 = 4
|
|
|
|
|
self.remained_audio = b''
|
|
|
|
|
|
|
|
|
|
self.window_sec = float((self.window_n - 1) * self.shift_ms + self.window_ms) / 1000.0
|
|
|
|
|
self.window_sec = float((self.window_n - 1) * self.shift_ms +
|
|
|
|
|
self.window_ms) / 1000.0
|
|
|
|
|
self.shift_sec = float(self.shift_n * self.shift_ms / 1000.0)
|
|
|
|
|
|
|
|
|
|
self.window_bytes = int(self.window_sec * self.sample_rate * self.sample_width)
|
|
|
|
|
self.shift_bytes = int(self.shift_sec * self.sample_rate * self.sample_width)
|
|
|
|
|
self.window_bytes = int(self.window_sec * self.sample_rate *
|
|
|
|
|
self.sample_width)
|
|
|
|
|
self.shift_bytes = int(self.shift_sec * self.sample_rate *
|
|
|
|
|
self.sample_width)
|
|
|
|
|
|
|
|
|
|
def frame_generator(self, audio):
|
|
|
|
|
"""Generates audio frames from PCM audio data.
|
|
|
|
@ -57,7 +70,8 @@ class ChunkBuffer(object):
|
|
|
|
|
timestamp = 0.0
|
|
|
|
|
|
|
|
|
|
while offset + self.window_bytes <= len(audio):
|
|
|
|
|
yield Frame(audio[offset:offset + self.window_bytes], timestamp, self.window_sec)
|
|
|
|
|
yield Frame(audio[offset:offset + self.window_bytes], timestamp,
|
|
|
|
|
self.window_sec)
|
|
|
|
|
timestamp += self.shift_sec
|
|
|
|
|
offset += self.shift_bytes
|
|
|
|
|
|
|
|
|
|