本文整理汇总了Python中scipy.io.wavfile.read函数的典型用法代码示例。如果您正苦于以下问题:Python read函数的具体用法?Python read怎么用?Python read使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了read函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_realFile
def test_realFile(self):
original_file = self.auxiliary_files_url + "/nai_sample.wav"
denoised_file = self.auxiliary_files_url + "/nai_sample_sox_denoised.wav"
user = 'rapp'
audio_type = 'nao_wav_1_ch'
scale = 0.2
result = self.sox_denoise_module.soxDenoise(\
user,\
audio_type,\
original_file,\
denoised_file,\
scale)
# The function thinks the denoising succeded
self.assertEqual(result, "true")
# Check for the denoised file
denoised_exists = os.path.isfile(denoised_file)
self.assertEqual(denoised_exists, True)
# Check if denoised energy is lower than the initial one
samp_freq, signal_orig = wavfile.read(original_file)
energy_orig = 0.0
for i in signal_orig:
energy_orig += i * 1.0 * i
samp_freq, signal_denoised = wavfile.read(denoised_file)
energy_denoised = 0.0
for i in signal_denoised:
energy_denoised += i * 1.0 * i
self.assertGreater(energy_orig, energy_denoised)
# erase the denoised file
os.remove(denoised_file)
开发者ID:gitter-badger,项目名称:rapp-platform,代码行数:31,代码来源:denoise_unit_tests.py
示例2: generate_mixture
def generate_mixture(src1, src2, fname, attn1, attn2):
"""
mixes 10 seconds of two sources of the same sample rate and saves them as fname
Args:
src1: filename for the first source
src2: filename for the second source
fname: output filename to save as
attn1: relative attenuation for the first source
attn2: relative attenuation for the second source
Returns:
"""
sr1, data1 = wav.read(src1)
if data1.dtype == np.dtype("int16"):
data1 = data1 / float(np.iinfo(data1.dtype).max)
sr2, data2 = wav.read(src2)
if data2.dtype == np.dtype("int16"):
data2 = data2 / float(np.iinfo(data2.dtype).max)
if sr1 != sr2:
raise ValueError("Both sources muse have same sample rate")
attn1 = float(attn1 + 1) / 2
attn2 = float(attn2 + 1) / 2
sample1 = data1[0:10 * sr1]
sample2 = data2[0:10 * sr1]
left = attenuate(sample1, attn1) + attenuate(sample2, attn2)
right = attenuate(sample1, 1-attn1) + attenuate(sample2, 1-attn2)
signal = np.vstack((left, right))
scipy.io.wavfile.write(fname, sr1, signal.T)
开发者ID:ethman,项目名称:prediction,代码行数:34,代码来源:generate_duet_signals.py
示例3: generate_reverb
def generate_reverb(signal, reverb, fname, iter_range):
"""
Adds reverb from the path reverb to the data in the path signal and saves it as fname. Applies reverb iteratively over
iter_range
:param signal: the filename for the stereo input signal
:param reverb: the filename for the stereo impulse response
:param fname: the output filename to save as
:param iter_range: the max number of iterations to convolve with the signal
:return:
"""
sr, data = wav.read(signal)
if data.dtype == np.dtype("int16"):
data = data / float(np.iinfo(data.dtype).max)
sr_ir, data_ir = wav.read(reverb)
if data_ir.dtype == np.dtype("int16"):
data_ir = data_ir / float(np.iinfo(data_ir.dtype).max)
if sr_ir != sr:
raise ValueError("Impulse Response must have same sample rate as signal")
prev_data = data
for i in xrange(0, iter_range+1):
if i > 0:
mix = add_reverb(prev_data.T, data_ir.T)
prev_data = np.copy(mix).T
else:
mix = data.T
if not os.path.exists(os.path.splitext(fname)[0]+'-'+str(i)+'.wav'):
scipy.io.wavfile.write(os.path.splitext(fname)[0]+'-'+str(i)+'.wav', sr, mix.T)
开发者ID:ethman,项目名称:prediction,代码行数:31,代码来源:generate_duet_signals.py
示例4: load_data
def load_data(syllable, N, used_samples, snr, sample_order = None):
"""Function that goes through all N samples of syllable and loads its wave data.
:param syllable: complete path name of syllable (string)
:param N: number of samples to load
:param used_samples: number of samples to skip in the beginning
:param snr: the strength of the noise
:param sample_order: if not None should be vector of indices of samples to be loaded (default = None)
:returns syllable_waves: list of N sample waves of syllable
"""
samples = [files for files in os.listdir(syllable)]
syllable_waves = []
if sample_order is None:
for i in range(int(N)):
rate, wave = wav.read(syllable + '/' + samples[i + used_samples])
if (snr != 0.0):
noiseLvl = np.sqrt(np.var(wave) / snr)
else:
noiseLvl = 0.0
wave = wave + noiseLvl * np.random.randn(len(wave))
syllable_waves.append([wave,rate])
else:
for i in sample_order:
rate, wave = wav.read(syllable + '/' + samples[i])
if(snr != 0.0):
noiseLvl = np.sqrt(np.var(wave) / snr)
else:
noiseLvl = 0.0
wave = wave + noiseLvl * np.random.randn(len(wave))
syllable_waves.append([wave,rate])
return syllable_waves
开发者ID:CogSciUOS,项目名称:Conceptors,代码行数:33,代码来源:preprocessing.py
示例5: simple_noise_filter
def simple_noise_filter(target, files, method=median_by_intensity, combination=flatten, section_length=4096):
# load all .mp3 files into an arrays
# bin each to a certain length
#print time()
feeds = [section_by_length(wavfile.read(file)[1], section_length) for file in files]
samplerate = wavfile.read(files[0])[0]
#print time()
# perform fft on each bin, select median of each
max_len = len(max(feeds, key=len))
sections = []
for i in range(max_len):
begin = time()
freqs = [fft.fft(feed[i], axis=0) for feed in feeds]
#print "Fourier per ~.1s feed: ",
#print (time()-begin)/3.
begin = time()
#filtered_freqs = [median_by_intensity(freqs, j) for j in range(len(freqs[0]))] # traverse the arrays in parallel
filtered_freqs = [method(freqs, j) for j in range(len(freqs[0]))]
#print "Filtering: ",
#print (time()-begin)
begin = time()
sections += [real(fft.ifft(filtered_freqs, axis=0)).astype(feeds[0][0].dtype)]
#print "Inversing per ~.1s feed: ",
#print (time() - begin)
# output
#print time()
samples = combination(sections)
wavfile.write(target, samplerate, samples)
开发者ID:alv53,项目名称:ClassCapture_Synth,代码行数:28,代码来源:ccsaux.py
示例6: test_ubm_var_channel
def test_ubm_var_channel():
ubm = GMM.load('model/ubm.mixture-32.person-20.immature.model')
train_duration = 8.
nr_test = 5
test_duration = 3.
audio_files = ['xinyu.vad.wav', 'wyx.wav']
X_train, y_train, X_test, y_test = [], [], [], []
for audio_file in audio_files:
fs, signal = wavfile.read(audio_file)
signal = monotize_signal(signal)
train_len = int(fs * train_duration)
test_len = int(fs * test_duration)
X_train.append(mix_feature((fs, signal[:train_len])))
y_train.append(audio_file)
for i in range(nr_test):
start = random.randint(train_len, len(signal) - test_len)
X_test.append(mix_feature((fs, signal[start:start+train_len])))
y_test.append(audio_file)
gmmset = GMMSet(32, ubm=ubm)
gmmset.fit(X_train, y_train)
y_pred = gmmset.predict_with_reject(X_test)
for i in xrange(len(y_pred)):
print y_test[i], y_pred[i], '' if y_test[i] == y_pred[i] else 'wrong'
for imposter_audio_file in map(
lambda x: 'test-{}.wav'.format(x), range(5)):
fs, signal = wavfile.read(imposter_audio_file)
signal = monotize_signal(signal)
imposter_x = mix_feature((fs, signal))
print gmmset.predict_one_with_rejection(imposter_x)
开发者ID:Dagiopia,项目名称:speaker-recognition,代码行数:35,代码来源:test-reject.py
示例7: estim_diff
def estim_diff(percent=256):
sound_counter=0
res=np.empty(len(input_file_names))
for i in range(res.shape[0]):
input_rate,input_sig=wavfile.read(input_dir+'Segments/'+input_file_names[i])
output_rate,output_sig=wavfile.read(output_dir+'Segments/'+output_file_names[i])
input_sig=pcm2float(input_sig,'float32')
output_sig=pcm2float(output_sig,'float32')
min_size=np.min((input_sig[:,0].shape[0],output_sig[:,0].shape[0]))
#print min_size,min_size*percent
#S_inp=np.absolute(fft(input_sig[:min_size,0]-np.mean(input_sig[:min_size,0])))
#S_out=np.absolute(fft(output_sig[:min_size,0]-np.mean(output_sig[:min_size,0])))
t=time()
nperseg=int(min_size*percent)-np.mod(int(min_size*percent),10)
real_perc=float(float(nperseg)/int(min_size*percent))
S_inp=signal.welch(input_sig[:min_size,0],nperseg=nperseg)[1]
S_out=signal.welch(output_sig[:min_size,0],nperseg=nperseg)[1]
#S_inp=ndim_welch(input_sig[:min_size,0][None,...],nperseg=int(min_size*percent))[1]
#S_out=ndim_welch(output_sig[:min_size,0][None,...],nperseg=int(min_size*percent))[1]
#print time()-t
#print S_inp_1,S_inp_2
res[sound_counter]=delta_estimator_3(S_out/S_inp,S_inp)-delta_estimator_3(S_inp/S_out,S_out)
#out=float2pcm(output_sig,'int16')
sound_counter+=1
return real_perc,int(min_size*percent),res
开发者ID:najishajari,项目名称:SIC,代码行数:28,代码来源:man_echo.py
示例8: find_offset
def find_offset(file1, file2, fs=8000, trim=60*15, correl_nframes=1000):
tmp1 = convert_and_trim(file1, fs, trim)
tmp2 = convert_and_trim(file2, fs, trim)
# Removing warnings because of 18 bits block size
# outputted by ffmpeg
# https://trac.ffmpeg.org/ticket/1843
warnings.simplefilter("ignore", wavfile.WavFileWarning)
a1 = wavfile.read(tmp1, mmap=True)[1] / (2.0 ** 15)
a2 = wavfile.read(tmp2, mmap=True)[1] / (2.0 ** 15)
# We truncate zeroes off the beginning of each signals
# (only seems to happen in ffmpeg, not in sox)
a1 = ensure_non_zero(a1)
a2 = ensure_non_zero(a2)
mfcc1 = mfcc(a1, nwin=256, nfft=512, fs=fs, nceps=13)[0]
mfcc2 = mfcc(a2, nwin=256, nfft=512, fs=fs, nceps=13)[0]
mfcc1 = std_mfcc(mfcc1)
mfcc2 = std_mfcc(mfcc2)
c = cross_correlation(mfcc1, mfcc2, nframes=correl_nframes)
max_k_index = np.argmax(c)
# The MFCC window overlap is hardcoded in scikits.talkbox
offset = max_k_index * 160.0 / float(fs) # * over / sample rate
score = (c[max_k_index] - np.mean(c)) / np.std(c) # standard score of peak
os.remove(tmp1)
os.remove(tmp2)
return offset, score
开发者ID:MaPePeR,项目名称:audio-offset-finder,代码行数:25,代码来源:audio_offset_finder.py
示例9: perf_eval
def perf_eval(param):
# wrtitten in 3000 basis, finding the nperseg value from param
nperseg=param % 3000
# wrtitten in 3000 basis, finding the number of music segments
num_of_seg_idx=(param-nperseg)/3000
num_of_seg=num_of_segs[num_of_seg_idx]
input_rate,input_sig=wavfile.read(input_dir+song_name+'.wav')
output_rate,output_sig=wavfile.read(output_dir+song_name+'.wav')
#the +1 in denominator is because we exclude the last piece of music to only
# consider music pieces of the same size.
input_seg_len=input_sig.shape[0]/(num_of_seg+1)
output_seg_len=output_sig.shape[0]/(num_of_seg+1)
if input_rate!=output_rate:
print ("Rate Mistmatch!")
sys.exit(0)
#print (nperseg,nperseg_step,input_seg_len,output_seg_len)
if np.min((input_seg_len,output_seg_len))*0.7 < nperseg * nperseg_step:
print ("Nothing to do!")
sys.exit(0)
res=estim_diff(input_sig, input_seg_len, output_sig, output_seg_len, nperseg, num_of_seg, nperseg_step)
f=open('/agbs/cluster/naji/Linear Filters/Echo/out/Winter/Room/'+str(num_of_seg)+'/'+str(nperseg)+'.txt','w')
print (nperseg,file=f)
print (np.mean(res>0),file=f)
开发者ID:najishajari,项目名称:SIC,代码行数:30,代码来源:man_echo.py
示例10: get_offset_wav
def get_offset_wav(wav_filename1, wav_filename2, time_limit=300):
"""Return offset in seconds between wav_filename1 and
wav_filename2, which are recordings of the same event
with potentially different starting times. Returns the
number of seconds that wav_filename2 starts after wav_filename1
(possibly negative).
If time_limit is provided, clip files
to first time_limit seconds. This can substantially speed up
offset detection"""
rate1, data1 = sp_wav.read(wav_filename1)
rate2, data2 = sp_wav.read(wav_filename2)
# the two files must have the same sampling rate
assert(rate1==rate2)
if time_limit is not None:
data1 = data1[0:rate1 * time_limit]
data2 = data2[0:rate2 * time_limit]
offset_samples = get_offset_xcorr(data1, data2)
offset_seconds = offset_samples / float(rate1)
return offset_seconds
开发者ID:pcallier,项目名称:livingroom,代码行数:25,代码来源:get_offset.py
示例11: mix_files
def mix_files(f1,f2):
base1 = f1.split('/')[-1].split('.wav')[0]
base2 = f2.split('/')[-1].split('.wav')[0]
(fs,sig) = wav.read(f1)
s1 = sig.reshape((len(sig),1))
del sig
(fs,sig) = wav.read(f2)
s2 = sig.reshape((len(sig),1))
del sig
block_length = 5*fs
s1_blocks = enframe(s1,block_length,block_length)
s2_blocks = enframe(s2,block_length,block_length)
del s1, s2
nrg1 = 0.707*np.sqrt(np.sum(np.power(s1_blocks,2),axis=1))
nrg2 = 0.707*np.sqrt(np.sum(np.power(s2_blocks,2),axis=1))
for i in range(len(nrg1)):
db1 = np.log(nrg1[i])
db2 = np.log(nrg2[i])
if (db1 >= 9) and (db2 >= 9) and (0.1 < abs(db1 - db2) < 5):
sir = '%.2f' % (db1 - db2)
ovl_name = '/erasable/nxs113020/wav_ovl/'+base1+'_'+base2+'_sir'+sir+'_'+str(i)+'.wav'
overlapped = s1_blocks[i,:] + s2_blocks[i,:]
nrg_ovl = 0.707*np.sqrt(np.sum(np.power(overlapped,2)))
scikits.audiolab.wavwrite(overlapped/nrg_ovl, ovl_name, fs, 'pcm16')
开发者ID:idnavid,项目名称:hmm_overlap_detection,代码行数:28,代码来源:mix_channels.py
示例12: test_write_edge_values
def test_write_edge_values(self):
# Write edge values 1.0
samples = numpy.ones((441, 1), dtype=numpy.float32)
dest_file = NamedTemporaryFile(delete=True)
wfile, infos = wav.open_write_mode(dest_file.name, 44100, 1)
wav.write_block(wfile, samples)
wfile._file.flush() # To force the file to be written to the disk
frame_rate, samples_written = sp_wavfile.read(dest_file.name)
numpy.testing.assert_array_equal(samples_written, numpy.array([2**15 - 1] * 441, dtype=numpy.int16))
dest_file.close()
# Write value 2.0, clipped to 1.0
samples = numpy.ones((441, 1), dtype=numpy.float32) * 2.0
dest_file = NamedTemporaryFile(delete=True)
wfile, infos = wav.open_write_mode(dest_file.name, 44100, 1)
wav.write_block(wfile, samples)
wfile._file.flush() # To force the file to be written to the disk
frame_rate, samples_written = sp_wavfile.read(dest_file.name)
numpy.testing.assert_array_equal(samples_written, numpy.array([2**15 - 1] * 441, dtype=numpy.int16))
dest_file.close()
# Write edge values -1.0
samples = numpy.ones((441, 1), dtype=numpy.float32) * -1
dest_file = NamedTemporaryFile(delete=True)
wfile, infos = wav.open_write_mode(dest_file.name, 44100, 1)
wav.write_block(wfile, samples)
wfile._file.flush() # To force the file to be written to the disk
frame_rate, samples_written = sp_wavfile.read(dest_file.name)
numpy.testing.assert_array_equal(samples_written, numpy.array([-2**15] * 441, dtype=numpy.int16))
dest_file.close()
开发者ID:accraze,项目名称:pychedelic,代码行数:33,代码来源:wav_tests.py
示例13: read_sound
def read_sound(fp):
"""
create a normalized float array and datarate from any audo file
"""
if fp.endswith('mp3'):
try:
oname = 'temp.wav'
#cmd = 'lame --decode "{0}" {1}'.format( fp ,oname )
result = subprocess.call(['lame', '--decode', fp, oname])
assert(result is 0)
samplerate, data = wav.read(oname)
except:
print "couldn't run lame"
try:
import moviepy.editor as mpy
aud_clip = mpy.AudioFileClip(fp)
samplerate = aud_clip.fps
data = aud_clip.to_soundarray()
except:
print "moviepy not installed?"
if fp.endswith('aif'):
#sf = aifc.open(fp)
oname = fp
sf = Sndfile(fp, 'r')
sf.seek(0)
data = sf.read_frames(sf.nframes)
samplerate = sf.samplerate
if fp.endswith('wav'):
samplerate, data = wav.read(fp)
if len(data.shape)>1: data = data[:,0]
data = data.astype('float64')
data /= data.max()
return data, samplerate
开发者ID:paul-bauer-rfcx,项目名称:sound-analysis,代码行数:34,代码来源:rfcx_sounds.py
示例14: plot_from_wavfile
def plot_from_wavfile(file1, file2):
'''
Given two wav files, plot their frequency spectrums
'''
rate1, data1 = wavefile.read(file1)
rate2, data2 = wavefile.read(file2)
plot_from_rawdata(data1, data2, rate1)
开发者ID:jiang0131,项目名称:pr2_pretouch,代码行数:7,代码来源:helper.py
示例15: processing
def processing():
"""post-processing of MLSbuf and recBuf, using the matched filter functions"""
# -- start recording and playback in async. mode
play_while_recording()
global SAMPLE_RATE
# -- latency for input and output devs, obtained using portaudio pa_devs script
inputLatency = 0.0087
outputLatency = 0.0087
# -- convert latencies to num. of samples
latencySamples = math.ceil((inputLatency+outputLatency)*SAMPLE_RATE)
# -- calibration samples (uncomment for debugging)
calSamp = 52
# -- load recording buffer into numpy array
recData = read("recBuf.wav")
recBuf = np.array(recData[1],dtype =float)
# -- index of internal delays & calibritation samples to subtract
interDelaySamp = np.s_[0:(latencySamples + calSamp)]
recBuf = np.delete(recBuf,interDelaySamp)
# -- remove excess samples from the recording buffer
removeExcessSamples = np.s_[6000:]
recBuf = np.delete(recBuf,removeExcessSamples)
# -- load playback buffer
MLSdata = read("MLS.wav")
MLSbuf = np.array(MLSdata[1],dtype =float)
# -- compute delay using Matched Filters & normalize
xcorr = matched_filter(MLSbuf,recBuf)/50000000000.0
# -- get gain
gain = get_gain(MLSbuf,recBuf)
# -- peak detector
prop_delay = peak_detector(xcorr)
# -- plot recorded seq, Tx MLS seq. (uncomment for debugging)
plt.figure(1)
plt.plot(MLSbuf)
plt.title("MLS sequence")
plt.xlabel("samples")
plt.grid(True)
plt.figure(2)
plt.plot(recBuf)
plt.title("Recorded MLS sequence")
plt.xlabel("samples")
plt.ylabel("Amplitude")
plt.grid(True)
plt.figure(3)
plt.plot(abs(xcorr))
plt.title("Matched Filter Output")
plt.xlabel("delay (samples)")
plt.ylabel("Rxy")
plt.grid(True)
plt.show()
开发者ID:abnercv,项目名称:Acoustic-Ruler-Project-Firmware,代码行数:60,代码来源:debugging_ver.py
示例16: __init__
def __init__(self, snd, fps=None, bitrate=3000):
Clip.__init__(self)
if isinstance(snd, str):
if not snd.endswith('.wav'):
temp = 'temp.wav'
ffmpeg.extract_sound(snd, temp, fps, bitrate)
fps, arr = wavfile.read(temp)
# os.remove(temp)
else:
fps, arr = wavfile.read(snd)
self.array = arr
self.fps = fps
else:
self.array = snd
self.fps = fps
self.duration = 1.0 * len(self.array) / self.fps
def gf(t):
i = int(self.fps * t)
if i < 0 or i >= len(self.array):
return 0
else:
return self.array[i]
self.get_frame = gf
开发者ID:muteokie,项目名称:moviepy,代码行数:29,代码来源:AudioClip.py
示例17: remove_silence
def remove_silence(filename):
(rate,sig) = wav.read(filename)
framelength = int(round(FRAMELENGTH * rate))
frameamount = int(math.ceil(len(sig) / framelength))
newsig = np.array([])
for i in xrange(0,frameamount+1):
start = i * framelength
end = start + framelength
print end
if (end > len(sig)):
end = len(sig)
if (start >= len(sig)):
start = len(sig) - 1
length = end - start
energy = 0.0
for j in xrange(start,end):
energy = energy + pow(float(sig[j]), 2)
energy = energy / length
if (energy >= TRESHOLD):
newsig = np.concatenate((newsig,sig[start:end]))
newsig = newsig.astype(sig.dtype)
print "silence removed, saving: "+ filename+".sr"
wav.write(filename+".sr", rate, newsig)
(rate,sig) = wav.read(filename+".sr")
开发者ID:matmater,项目名称:sop,代码行数:30,代码来源:silence_remover.py
示例18: output
def output(partIdx):
"""Uses the student code to compute the output for test cases."""
outputString = ''
if partIdx == 0: # This is ScaledFFTdB
from assignment1 import scaled_fft_db
r,x = wavfile.read('data/a1_submissionInput.wav')
X = scaled_fft_db(x)
for val in X:
outputString += '%.5f ' % (val)
elif partIdx == 1: # This is PrototypeFilter
from assignment2 import prototype_filter
h = prototype_filter()
# test signal
s = np.loadtxt('data/a2_submissionInput.txt')
r = np.convolve(h, s)[4*512:5*512]/2
for val in r:
outputString += '%.5f ' % val
elif partIdx == 2: # This is SubbandFiltering
from assignment3 import subband_filtering
r,x = wavfile.read('data/a3_submissionInput.wav')
h = np.hanning(512)
X = subband_filtering(x, h)
for val in X:
outputString += '%.5f ' % (val)
elif partIdx == 3: # This is Quantization
from assignment4 import quantization
from parameters import EncoderParameters
params = EncoderParameters(44100, 2, 64)
val_in = np.loadtxt('data/a4_submissionInput.txt')
for r,row in enumerate(val_in):
val = row[0]
scf = row[1]
ba = int(row[2])
QCa = params.table.qca[ba-2]
QCb = params.table.qcb[ba-2]
val = quantization(val, scf, ba, QCa, QCb)
outputString += '%d ' % (val)
return outputString.strip()
开发者ID:eaz120,项目名称:coursera_dsp_program_submission,代码行数:60,代码来源:submit.py
示例19: test_energy_denois
def test_energy_denois(self):
original_file = self.auxiliary_files_url + "/nai_sample.wav"
denoised_file = self.auxiliary_files_url + "/nai_sample_energy_denoised.wav"
result = self.energy_denoise_module.energyDenoise(\
original_file,\
0.2,\
denoised_file,\
False)
# The function thinks the denoising succeded
self.assertEqual(result, True)
# Check for the denoised file
denoised_exists = os.path.isfile(denoised_file)
self.assertEqual(denoised_exists, True)
# Check if denoised energy is lower than the initial one
samp_freq, signal_orig = wavfile.read(original_file)
energy_orig = 0.0
for i in signal_orig:
energy_orig += i * 1.0 * i
samp_freq, signal_denoised = wavfile.read(denoised_file)
energy_denoised = 0.0
for i in signal_denoised:
energy_denoised += i * 1.0 * i
self.assertGreater(energy_orig, energy_denoised)
# erase the denoised file
os.remove(denoised_file)
开发者ID:gitter-badger,项目名称:rapp-platform,代码行数:27,代码来源:energy_denoise_unit_tests.py
示例20: cut_video
def cut_video(recording_path, datapack_dir):
# Read the start/end pattern
sr1, pattern_wav = wav.read('pattern.wav')
workingdir = tempfile.mkdtemp()
# Open the video file
clip = VideoFileClip(recording_path)
# Save its audio track temporarily on disk
clip.audio.write_audiofile(os.path.join(workingdir,"temp_audio.wav"))
# Read the audio samples, mix down to mono (if necessary), and delete the temporary audio track
sr2, recording_wav = wav.read(os.path.join(workingdir,"temp_audio.wav"))
if recording_wav.shape[1]>1:
recording_wav = numpy.mean(recording_wav,1)
shutil.rmtree(workingdir)
# Detect the start and end audio pattern
start, end = detect_start_end_times(pattern_wav, recording_wav, sr2, 4)
# Cut the video and write it into two separate video and audio files
clip.subclip(start+0.4, end).write_videofile(os.path.join(datapack_dir, 'video.mp4'), codec='libx264')
clip.subclip(start+0.4, end).audio.write_audiofile(os.path.join(datapack_dir,'audio.wav'))
开发者ID:chaosct,项目名称:repoVizzRecorder,代码行数:25,代码来源:repoVizzRecorder.py
注:本文中的scipy.io.wavfile.read函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论