本文整理汇总了Python中soundfile.write函数的典型用法代码示例。如果您正苦于以下问题:Python write函数的具体用法?Python write怎么用?Python write使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了write函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _maybe_convert_wav
def _maybe_convert_wav(data_dir, original_data, converted_data):
source_dir = os.path.join(data_dir, original_data)
target_dir = os.path.join(data_dir, converted_data)
# Conditionally convert sph files to wav files
if os.path.exists(target_dir):
print("skipping maybe_convert_wav")
return
# Create target_dir
os.makedirs(target_dir)
# Loop over sph files in source_dir and convert each to 16-bit PCM wav
for root, dirnames, filenames in os.walk(source_dir):
for filename in fnmatch.filter(filenames, "*.sph"):
for channel in ['1', '2']:
sph_file = os.path.join(root, filename)
wav_filename = os.path.splitext(os.path.basename(sph_file))[0] + "-" + channel + ".wav"
wav_file = os.path.join(target_dir, wav_filename)
temp_wav_filename = os.path.splitext(os.path.basename(sph_file))[0] + "-" + channel + "-temp.wav"
temp_wav_file = os.path.join(target_dir, temp_wav_filename)
print("converting {} to {}".format(sph_file, temp_wav_file))
subprocess.check_call(["sph2pipe", "-c", channel, "-p", "-f", "rif", sph_file, temp_wav_file])
print("upsampling {} to {}".format(temp_wav_file, wav_file))
audioData, frameRate = librosa.load(temp_wav_file, sr=16000, mono=True)
soundfile.write(wav_file, audioData, frameRate, "PCM_16")
os.remove(temp_wav_file)
开发者ID:gulshan-mittal,项目名称:DeepSpeech,代码行数:27,代码来源:import_swb.py
示例2: saveTo
def saveTo(self, file):
with ZipFile(file, 'w') as zip:
song_file = configparser.ConfigParser()
song_file['DEFAULT'] = {'volume': self.volume,
'bpm': self.bpm,
'beat_per_bar': self.beat_per_bar,
'width': self.width,
'height': self.height}
for clip in self.clips:
clip_file = {'name': clip.name,
'volume': str(clip.volume),
'frame_offset': str(clip.frame_offset),
'beat_offset': str(clip.beat_offset),
'beat_diviser': str(clip.beat_diviser),
'audio_file': basename(
clip.audio_file)}
if clip_file['audio_file'] is None:
clip_file['audio_file'] = 'no-sound'
song_file["%s/%s" % (clip.x, clip.y)] = clip_file
buffer = StringIO()
song_file.write(buffer)
zip.writestr('metadata.ini', buffer.getvalue())
for member in self.data:
buffer = BytesIO()
sf.write(self.data[member], buffer,
self.samplerate[member],
subtype=sf.default_subtype('WAV'),
format='WAV')
zip.writestr(member, buffer.getvalue())
self.file_name = file
开发者ID:piloudsda,项目名称:superboucle,代码行数:33,代码来源:clip.py
示例3: play_message
def play_message(in_msg_fn):
"""
This method opens a decrypted in_msg and converts the data to an audio
stream. Then, it simply reads in the frames of the audio file and writes
the data to an output stream. In other words, it plays the message for you.
"""
try:
in_msg = open(in_msg_fn, 'rb')
data = pickle.load(in_msg)
in_msg.close()
print('Data pickled')
except IOError:
print("ERROR: Failed to open message file.")
return
sf.write(DECR_OUTPUT_FILENAME, data, samplerate=RATE)
##########################################################################
# For now, I just want to make sure the WAV file is written successfully.#
# Until then, this playback stuff will be on the backlog.#################
##########################################################################
# wf = wave.open(DECR_OUTPUT_FILENAME, 'rb')
# p = pyaudio.PyAudio()
# stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
# channels=wf.getnchannels(),
# rate=wf.getframerate(),
# output=True)
# data = wf.readframes(CHUNK)
# while data != '':
# stream.write(data)
# data = wf.readframes(CHUNK)
# stream.stop_stream()
# stream.close()
# p.terminate()
return DECR_OUTPUT_FILENAME
开发者ID:SacredData,项目名称:Lary,代码行数:33,代码来源:receive_laryngitis.py
示例4: test_write_int_data_to_float_file
def test_write_int_data_to_float_file(file_inmemory):
"""This is a very uncommon use case."""
sf.write(file_inmemory, data_mono, 44100, format='WAV', subtype='FLOAT')
file_inmemory.seek(0)
read, fs = sf.read(file_inmemory, always_2d=False, dtype='float32')
assert np.all(read == data_mono)
assert fs == 44100
开发者ID:dewabayu,项目名称:PySoundFile,代码行数:7,代码来源:test_pysoundfile.py
示例5: save
def save(filename_audio, filename_jam, jam, strict=True, **kwargs):
'''Save a muda jam to disk
Parameters
----------
filename_audio: str
The path to store the audio file
filename_jam: str
The path to store the jams object
strict: bool
Strict safety checking for jams output
kwargs
Additional parameters to `soundfile.write`
'''
y = jam.sandbox.muda._audio['y']
sr = jam.sandbox.muda._audio['sr']
# First, dump the audio file
psf.write(filename_audio, y, sr, **kwargs)
# Then dump the jam
jam.save(filename_jam, strict=strict)
开发者ID:jfsantos,项目名称:muda,代码行数:27,代码来源:core.py
示例6: __rubberband
def __rubberband(y, sr, **kwargs):
'''Execute rubberband
Parameters
----------
y : np.ndarray [shape=(n,) or (n, c)]
Audio time series, either single or multichannel
sr : int > 0
sampling rate of y
**kwargs
keyword arguments to rubberband
Returns
-------
y_mod : np.ndarray [shape=(n,) or (n, c)]
`y` after rubberband transformation
'''
assert sr > 0
# Get the input and output tempfile
fd, infile = tempfile.mkstemp(suffix='.wav')
os.close(fd)
fd, outfile = tempfile.mkstemp(suffix='.wav')
os.close(fd)
# dump the audio
sf.write(infile, y, sr)
try:
# Execute rubberband
arguments = ['rubberband', '-q']
for key, value in six.iteritems(kwargs):
arguments.append(str(key))
arguments.append(str(value))
arguments.extend([infile, outfile])
subprocess.check_call(arguments)
# Load the processed audio.
y_out, _ = sf.read(outfile, always_2d=True)
# make sure that output dimensions matches input
if y.ndim == 1:
y_out = np.squeeze(y_out)
finally:
# Remove temp files
os.unlink(infile)
os.unlink(outfile)
pass
return y_out
开发者ID:faroit,项目名称:pyrubberband,代码行数:58,代码来源:pyrb.py
示例7: play
def play(self):
log.debug('Play %r', self)
# FIXME change adhoc play to universal
fragment_filename = '/tmp/fragment.wav'
sub.check_call(['rm', '-rf', fragment_filename])
sf.write(self.samples, fragment_filename, self.samplerate)
sub.check_call(['play', fragment_filename])
开发者ID:ivanovwaltz,项目名称:wavelet_sound_microscope,代码行数:9,代码来源:sound.py
示例8: compute_combination
def compute_combination(args):
snr, signal, noise, target_rate, new_name, storage_name = args
noisy_signal = signal*snrdb2ratio(snr, signal, noise)+noise
noisy_signal = noisy_signal/peak(noisy_signal)
soundfile.write(storage_name, noisy_signal, target_rate)
shutil.copyfile(storage_name, new_name)
#soundfile = al.Sndfile(new_name, 'w', al.Format('flac'), 1, target_rate)
#soundfile.write_frames(noisy_signal)
#soundfile.sync()
print("Wrote", new_name)
开发者ID:jlep,项目名称:vad,代码行数:10,代码来源:speech_processing.py
示例9: _save_estimates
def _save_estimates(self, user_estimates, track, estimates_dir):
track_estimate_dir = op.join(
estimates_dir, track.subset, track.filename
)
if not os.path.exists(track_estimate_dir):
os.makedirs(track_estimate_dir)
# write out tracks to disk
for target, estimate in list(user_estimates.items()):
target_path = op.join(track_estimate_dir, target + '.wav')
sf.write(target_path, estimate, track.rate)
pass
开发者ID:faroit,项目名称:dsdtools,代码行数:12,代码来源:__init__.py
示例10: onExportClip
def onExportClip(self):
if self.last_clip and self.last_clip.audio_file:
audio_file = self.last_clip.audio_file
file_name, a = self.getSaveFileName(
'Export Clip : %s' % self.last_clip.name, 'WAVE (*.wav)')
if file_name:
file_name = verify_ext(file_name, 'wav')
sf.write(self.song.data[audio_file], file_name,
self.song.samplerate[audio_file],
subtype=sf.default_subtype('WAV'),
format='WAV')
开发者ID:sonejostudios,项目名称:superboucle,代码行数:12,代码来源:gui.py
示例11: main
def main():
logdir, ckpt = os.path.split(args.checkpoint)
arch = tf.gfile.Glob(os.path.join(logdir, 'architecture*.json'))[0] # should only be 1 file
with open(arch) as fp:
arch = json.load(fp)
normalizer = Tanhize(
xmax=np.fromfile('./etc/xmax.npf'),
xmin=np.fromfile('./etc/xmin.npf'),
)
features = read_whole_features(args.file_pattern.format(args.src))
x = normalizer.forward_process(features['sp'])
x = nh_to_nchw(x)
y_s = features['speaker']
y_t_id = tf.placeholder(dtype=tf.int64, shape=[1,])
y_t = y_t_id * tf.ones(shape=[tf.shape(x)[0],], dtype=tf.int64)
machine = MODEL(arch)
z = machine.encode(x)
x_t = machine.decode(z, y_t) # NOTE: the API yields NHWC format
x_t = tf.squeeze(x_t)
x_t = normalizer.backward_process(x_t)
# For sanity check (validation)
x_s = machine.decode(z, y_s)
x_s = tf.squeeze(x_s)
x_s = normalizer.backward_process(x_s)
f0_s = features['f0']
f0_t = convert_f0(f0_s, args.src, args.trg)
output_dir = get_default_output(args.output_dir)
saver = tf.train.Saver()
sv = tf.train.Supervisor(logdir=output_dir)
with sv.managed_session() as sess:
load(saver, sess, logdir, ckpt=ckpt)
while True:
try:
feat, f0, sp = sess.run(
[features, f0_t, x_t],
feed_dict={y_t_id: np.asarray([SPEAKERS.index(args.trg)])}
)
feat.update({'sp': sp, 'f0': f0})
y = pw2wav(feat)
oFilename = make_output_wav_name(output_dir, feat['filename'])
sf.write(oFilename, y, FS)
except:
break
开发者ID:QianQQ,项目名称:Voice-Conversion,代码行数:51,代码来源:convert.py
示例12: write_audio_file
def write_audio_file(filepath, v_signal, fs, norm=0.98):
'''
norm: If None, no normalisation is applied. If it is a float number,
it is the target value (absolute) for the normalisation.
'''
# Normalisation:
if norm is not None:
v_signal = norm * v_signal / np.max(np.abs(v_signal)) # default
# Write:
sf.write(filepath, v_signal, fs)
return
开发者ID:suldier,项目名称:magphase,代码行数:14,代码来源:libaudio.py
示例13: main
def main():
args = get_args()
if args.lin: cFreq = makeLinearCFs(args.band, args.space, args.low, args.high)
else: cFreq = makeErbCFs(args.band, args.space, args.low, args.high)
compTone = genComplex(cFreq, args.rate, args.time)
ampTone = ampModulate(compTone, args.mod, args.rate)
# -1 : balance to not go above '1'.
# > 0 : balance to the specified value.
if args.rms <= 0.0:
ampTone *= ( 1 / np.max( np.abs(ampTone) ) )
else:
ampTone *= (args.rms / rms(ampTone))
sf.write(args.save, ampTone, args.rate)
开发者ID:TataLab,项目名称:iCubAudioAttention,代码行数:16,代码来源:generateComplexAudio.py
示例14: output
def output(self, filename, format=None):
"""
Write the samples out to the given filename.
Parameters
----------
filename : str
The path to write the audio on disk.
This can be any format supported by `pysoundfile`, including
`WAV`, `FLAC`, or `OGG` (but not `mp3`).
format : str
If provided, explicitly set the output encoding format.
See `soundfile.available_formats`.
"""
sf.write(filename, self.raw_samples.T, int(self.sample_rate), format=format)
开发者ID:Asudano,项目名称:amen,代码行数:16,代码来源:audio.py
示例15: test_write_float_data_to_pcm_file
def test_write_float_data_to_pcm_file(file_inmemory):
float_to_clipped_int16 = [
(-1.0 - 2**-15, -2**15 ),
(-1.0 , -2**15 ),
(-1.0 + 2**-15, -2**15 + 1),
( 0.0 , 0 ),
( 1.0 - 2**-14, 2**15 - 2),
( 1.0 - 2**-15, 2**15 - 1),
( 1.0 , 2**15 - 1),
]
written, expected = zip(*float_to_clipped_int16)
sf.write(file_inmemory, written, 44100, format='WAV', subtype='PCM_16')
file_inmemory.seek(0)
read, fs = sf.read(file_inmemory, dtype='int16')
assert np.all(read == expected)
assert fs == 44100
开发者ID:dewabayu,项目名称:PySoundFile,代码行数:16,代码来源:test_pysoundfile.py
示例16: sliceAudio
def sliceAudio(iFilename, names, times, verbose_en):
#open aduio
data, fs = sf.read(iFilename)
times.append(len(data)*fs)
# calculate time laps
for i in range(len(times)-1):
startPoint = times[i]*fs
endPoint = times[i+1]*fs
# write slice audio file
sf.write(names[i]+'.wav', data[startPoint:endPoint], fs)
if verbose_en == True:
print names[i]+'.wav'
开发者ID:agualdron,项目名称:splitaudio,代码行数:16,代码来源:splitaudio.py
示例17: save
def save(f, s, fs, subtype=None):
'''
Return
------
waveform (ndarray), sample rate (int)
'''
from soundfile import write
return write(f, s, fs, subtype=subtype)
开发者ID:trungnt13,项目名称:blocks,代码行数:8,代码来源:speech.py
示例18: export
def export(input, input_file, output_path, samplerate):
if not os.path.exists(output_path):
os.makedirs(output_path)
basepath = os.path.join(
output_path, os.path.splitext(os.path.basename(input_file))[0]
)
# Write out all components
for i in range(input.shape[0]):
sf.write(
basepath + "_cpnt-" + str(i) + ".wav",
input[i],
samplerate
)
out_sum = np.sum(input, axis=0)
sf.write(basepath + '_reconstruction.wav', out_sum, samplerate)
开发者ID:aliutkus,项目名称:commonfate,代码行数:18,代码来源:cfm_decompose.py
示例19: stereo_to_mono_and_extreme_silence_cropping
def stereo_to_mono_and_extreme_silence_cropping(source, target, subtype=None, print_progress=False):
if os.path.isdir(source) and os.path.isdir(target):
from glob import iglob
if source[-1] != '/':
source += '/'
for i, filepath in enumerate(iglob(source + '*.wav')):
filename = os.path.basename(filepath)
if print_progress:
printProgress("{}: {}".format(i, filename))
stereo_to_mono_and_extreme_silence_cropping(
filepath,
os.path.join(target, filename)
)
else:
wf, sr = wf_and_sr(source)
wf = ensure_mono(wf)
wf = crop_head_and_tail_silence(wf)
sf.write(data=wf, file=target, samplerate=sr, subtype=subtype)
开发者ID:yz-,项目名称:ut,代码行数:18,代码来源:util.py
示例20: unify_signals
def unify_signals(self, obj):
for signal_name in self.signal_names:
if signal_name in obj._save_names:
continue
signal = getattr(obj, signal_name)
if signal != []:
name = hashlib.md5(signal).hexdigest()
if name in self.signals:
setattr(obj, signal_name, self.signals[name])
# signal = self.signals[name]
else:
self.signals.update({name:signal})
#signal = self.signals[name]
setattr(obj, signal_name, self.signals[name])
sf.write(os.path.join(self.temp_path, name+'.wav'),
self.signals[name],
samplerate = obj.sample_rate)
obj._save_names[signal_name] = name
开发者ID:TGM-Oldenburg,项目名称:earyx,代码行数:18,代码来源:saveload.py
注:本文中的soundfile.write函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论