forked from tyiannak/pyAudioAnalysis
-
Notifications
You must be signed in to change notification settings - Fork 0
/
audioAnalysisRecordAlsa.py
149 lines (127 loc) · 4.99 KB
/
audioAnalysisRecordAlsa.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import sys, os, alsaaudio, time, audioop, numpy, glob, scipy, subprocess, wave, cPickle, threading, shutil
import matplotlib.pyplot as plt
import scipy.io.wavfile as wavfile
from scipy.fftpack import rfft
import audioFeatureExtraction as aF
import audioTrainTest as aT
import audioSegmentation as aS
from scipy.fftpack import fft
import matplotlib
matplotlib.use('TkAgg')
Fs = 16000
def recordAudioSegments(RecordPath, BLOCKSIZE):
# This function is used for recording audio segments (until ctr+c is pressed)
# ARGUMENTS:
# - RecordPath: the path where the wav segments will be stored
# - BLOCKSIZE: segment recording size (in seconds)
#
# NOTE: filenames are based on clock() value
print "Press Ctr+C to stop recording"
RecordPath += os.sep
d = os.path.dirname(RecordPath)
if os.path.exists(d) and RecordPath!=".":
shutil.rmtree(RecordPath)
os.makedirs(RecordPath)
inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE,alsaaudio.PCM_NONBLOCK)
inp.setchannels(1)
inp.setrate(Fs)
inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)
inp.setperiodsize(512)
midTermBufferSize = int(Fs*BLOCKSIZE)
midTermBuffer = []
curWindow = []
elapsedTime = "%08.3f" % (time.time())
while 1:
l,data = inp.read()
if l:
for i in range(len(data)/2):
curWindow.append(audioop.getsample(data, 2, i))
if (len(curWindow)+len(midTermBuffer)>midTermBufferSize):
samplesToCopyToMidBuffer = midTermBufferSize - len(midTermBuffer)
else:
samplesToCopyToMidBuffer = len(curWindow)
midTermBuffer = midTermBuffer + curWindow[0:samplesToCopyToMidBuffer];
del(curWindow[0:samplesToCopyToMidBuffer])
if len(midTermBuffer) == midTermBufferSize:
# allData = allData + midTermBuffer
curWavFileName = RecordPath + os.sep + str(elapsedTime) + ".wav"
midTermBufferArray = numpy.int16(midTermBuffer)
wavfile.write(curWavFileName, Fs, midTermBufferArray)
print "AUDIO OUTPUT: Saved " + curWavFileName
midTermBuffer = []
elapsedTime = "%08.3f" % (time.time())
def recordAnalyzeAudio(duration, outputWavFile, midTermBufferSizeSec, modelName, modelType):
'''
recordAnalyzeAudio(duration, outputWavFile, midTermBufferSizeSec, modelName, modelType)
This function is used to record and analyze audio segments, in a fix window basis.
ARGUMENTS:
- duration total recording duration
- outputWavFile path of the output WAV file
- midTermBufferSizeSec (fix)segment length in seconds
- modelName classification model name
- modelType classification model type
'''
if modelType=='svm':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadSVModel(modelName)
elif modelType=='knn':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadKNNModel(modelName)
else:
Classifier = None
inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NONBLOCK)
inp.setchannels(1)
inp.setrate(Fs)
inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)
inp.setperiodsize(512)
midTermBufferSize = int(midTermBufferSizeSec * Fs)
allData = []
midTermBuffer = []
curWindow = []
count = 0
while len(allData)<duration*Fs:
# Read data from device
l,data = inp.read()
if l:
for i in range(l):
curWindow.append(audioop.getsample(data, 2, i))
if (len(curWindow)+len(midTermBuffer)>midTermBufferSize):
samplesToCopyToMidBuffer = midTermBufferSize - len(midTermBuffer)
else:
samplesToCopyToMidBuffer = len(curWindow)
midTermBuffer = midTermBuffer + curWindow[0:samplesToCopyToMidBuffer];
del(curWindow[0:samplesToCopyToMidBuffer])
if len(midTermBuffer) == midTermBufferSize:
count += 1
if Classifier!=None:
[mtFeatures, stFeatures] = aF.mtFeatureExtraction(midTermBuffer, Fs, 2.0*Fs, 2.0*Fs, 0.020*Fs, 0.020*Fs)
curFV = (mtFeatures[:,0] - MEAN) / STD;
[result, P] = aT.classifierWrapper(Classifier, modelType, curFV)
print classNames[int(result)]
allData = allData + midTermBuffer
plt.clf()
plt.plot(midTermBuffer)
plt.show(block = False)
plt.draw()
midTermBuffer = []
allDataArray = numpy.int16(allData)
wavfile.write(outputWavFile, Fs, allDataArray)
def main(argv):
if argv[1] == '-recordSegments': # record input
if (len(argv)==4): # record segments (until ctrl+c pressed)
recordAudioSegments(argv[2], float(argv[3]))
else:
print "Error.\nSyntax: " + argv[0] + " -recordSegments <recordingPath> <segmentDuration>"
if argv[1] == '-recordAndClassifySegments': # record input
if (len(argv)==6): # recording + audio analysis
duration = int(argv[2])
outputWavFile = argv[3]
modelName = argv[4]
modelType = argv[5]
if modelType not in ["svm", "knn"]:
raise Exception("ModelType has to be either svm or knn!")
if not os.path.isfile(modelName):
raise Exception("Input modelName not found!")
recordAnalyzeAudio(duration, outputWavFile, 2.0, modelName, modelType)
else:
print "Error.\nSyntax: " + argv[0] + " -recordAndClassifySegments <duration> <outputWafFile> <modelName> <modelType>"
if __name__ == '__main__':
main(sys.argv)