• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python pydub.AudioSegment类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pydub.AudioSegment的典型用法代码示例。如果您正苦于以下问题:Python AudioSegment类的具体用法?Python AudioSegment怎么用?Python AudioSegment使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了AudioSegment类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: render

def render(mix_id, *args):
    minimum = sys.maxint
    for arg in args:
        if arg[0] < minimum:
            minimum = arg[0]
    for arg in args:
        arg[0] -= minimum
    prio_queue = Queue.PriorityQueue()
    for arg in args:
        prio_queue.put(arg)
    base = prio_queue.get(0)
    base_track = AudioSegment.from_file(base[1], "m4a")
    gain = base[2]
    base_track = base_track.apply_gain(gain)
    while not prio_queue.empty():
        overlay = prio_queue.get(0)
        overlay_track = AudioSegment.from_file(overlay[1], "m4a")
        gain = overlay[2]
        if gain != 0:
            overlay_track = overlay_track.apply_gain(gain)
        base_track = base_track.overlay(overlay_track, position=overlay[0])
    base_track.export('mix.wav', format='wav')
    command = 'ffmpeg -b 66k -y -f wav -i ./mix.wav ./mix.aac'
    subprocess.call(command, shell=True)
    os.remove('mix.wav')

# concaac(mix_id, [0, "test1.m4a", 0], [5000, "test2.m4a", -10], [10000, "test3.m4a", 5])
开发者ID:spencerhitch,项目名称:radiotape,代码行数:27,代码来源:render.py


示例2: main

def main():
    converted_files = []
    if not os.path.exists(OUTPUT_DIR):
        try:
            os.makedirs(OUTPUT_DIR)
        except Exception as e:
            now = datetime.now().strftime('%Y.%m.%d %H:%M')
            logger.error(
                "{} Ошибка при создании директории, текст: {}".format(now, e)
            )
            sys.exit(1)
    while True:
        files = [f for f in os.listdir(DIRECTORY) if os.path.isfile(
            os.path.join(DIRECTORY, f))]
        for f in files:
            if f.split('.')[1] == EXTENSION and f not in converted_files:
                new_name = f.split('.')[0] + '.mp3'
                now = datetime.now().strftime('%Y.%m.%d %H:%M')
                try:
                    AudioSegment.from_wav(os.path.join(DIRECTORY, f)).export(
                        os.path.join(OUTPUT_DIR, new_name), format="mp3")
                    converted_files.append(f)
                    logger.debug(
                        "{} Успешно переконвертировали файл {} ".format(now, f)
                    )
                except Exception as e:
                    logger.error(
                        "{} Ошибка при конвертации файла {}, текст: {}".
                        format(now, f, e)
                    )
                    sys.exit(1)
开发者ID:BronzeCrab,项目名称:sound_service_task,代码行数:31,代码来源:daemon.py


示例3: start

	def start(self):
		# Server runs until killed
		while True:
			# If we have a request, play it
			if len(self.request_list) != 0:
				self.current_song = AudioSegment.from_mp3("../songs/" + self.request_list.popleft())
			# Otherwise, play a random song
			else:
				self.current_song = AudioSegment.from_mp3("../songs/" + random.choice(self.songlist))
			self.new_song()
			# Stream the entire song
			for chunk in self.current_song:
				# Simply skip the time for the client
				if not self.has_client:
					sleep(0.001)
				else:
					# Stream chunk to first client
					client, address = self.clients[0]
					try:
						chunk = chunk.raw_data
						chunk = chunk[:self.chunk_size].ljust(self.chunk_size)
						chunk_length = str(len(chunk))
						client.sendto(bytes("SC" + chunk_length + (4-len(chunk_length))*" ", "UTF-8"), address)
						client.sendto(chunk, address)
					# Disconnects will be handled, just maybe not on time to avoid
					#	this error a few times. We just ignore the error
					except BrokenPipeError:
						pass
开发者ID:kahns729,项目名称:comp112-final-project,代码行数:28,代码来源:stream.py


示例4: export_wav

    def export_wav(self, filename):
        n = self.song.num_tracks
        self.song.export_song("temp/song.abc")

        sounds = ["--syn_a", "--syn_b", "--syn_s", "--syn_e"]

        for i in range(n):
            os.system(
                "python read_abc.py temp/song.abc "
                + str(i + 1)
                + " temp/out_"
                + str(i + 1)
                + ".wav "
                + random.choice(sounds)
            )
        os.remove("temp/song.abc")

        combined = AudioSegment.from_file("temp/out_1.wav")
        if n >= 2:
            for i in range(1, n):
                sound = AudioSegment.from_file("temp/out_" + str(i + 1) + ".wav")
                combined = combined.overlay(sound)

        combined.export(filename, format="wav")

        for i in range(n):
            os.remove("temp/out_" + str(i + 1) + ".wav")
开发者ID:maxrosssp,项目名称:sculpture-final,代码行数:27,代码来源:staff.py


示例5: get_data

def get_data(path):
    """
    Gets the data associated with an audio file, converting to wav when necessary.
    :param path: path to audio file
    :return: sample rate, audio data
    """
    if path.endswith(".wav"):
        bee_rate, bee_data = read(path)
    else:
        temp = tempfile.NamedTemporaryFile(suffix=".wav")
        temp.close()
        if path.endswith(".flac"):
            sound = AudioSegment.from_file(path, "flac")
            sound.export(temp.name, format="wav")
        elif path.endswith(".mp3"):
            sound = AudioSegment.from_file(path, "mp3")
            sound.export(temp.name, format="wav")
        bee_rate, bee_data = read(temp.name)
        os.remove(temp.name)
    data_type = np.iinfo(bee_data.dtype)
    dmin = data_type.min
    dmax = data_type.max
    bee_data = bee_data.astype(np.float64)
    bee_data = 2.0 * ((bee_data - dmin) / (dmax - dmin)) - 1.0
    bee_data = bee_data.astype(np.float32)
    return bee_rate, bee_data
开发者ID:jlstack,项目名称:BeeVisualization,代码行数:26,代码来源:audio_to_numpy.py


示例6: audiodata_getter

def audiodata_getter(path, date, filedate, filename, index):
    #Check to see if it's a wav file. If not, convert in a temp file.
    splitname = os.path.splitext(filename)[0]
    if os.path.splitext(filename)[1] != ".wav":
        temp = tempfile.NamedTemporaryFile(suffix=".wav")
        if os.path.splitext(filename)[1] == ".mp3":
            if "mp3" in path and date is None:
                sound = AudioSegment.from_file(path + filedate[index] + "/" + filename, "mp3")
            else:
                sound = AudioSegment.from_file(path + filename, "mp3")
            sound.export(temp.name, format = "wav")
        if os.path.splitext(filename)[1] == ".flac":
            if "mp3" in path and date is None:
                sound = AudioSegment.from_file(path + filedate[index] + "/" + filename, "flac")
            else:
                sound = AudioSegment.from_file(path + filename, "flac")
            sound.export(temp.name, format = "flac")
        try:
            wav = wave.open(temp, 'r')
            return wav
        except:
            print(filename + " corrupted or not audio file.")
    else:
        try:
            #Open the .wav file and get the vital information
            wav = wave.open(path + "/audio/" + filename, 'r')
            return wav
        except:
            print(filename + " corrupted or not audio file.")
开发者ID:jlstack,项目名称:BeeVisualization,代码行数:29,代码来源:KMeansSpec.py


示例7: test_direct_instantiation_with_bytes

 def test_direct_instantiation_with_bytes(self):
     seg = AudioSegment(
         b'RIFF\x28\x00\x00\x00WAVEfmt \x10\x00\x00\x00\x01\x00\x02\x00\x00}\x00\x00\x00\xf4\x01\x00\x04\x00\x10\x00data\x04\x00\x00\x00\x00\x00\x00\x00')
     self.assertEqual(seg.frame_count(), 1)
     self.assertEqual(seg.channels, 2)
     self.assertEqual(seg.sample_width, 2)
     self.assertEqual(seg.frame_rate, 32000)
开发者ID:jiaaro,项目名称:pydub,代码行数:7,代码来源:test.py


示例8: talklang

def talklang(phrase,lang='FR'):
    try:
        language_dict = {"FR" : 'fr-FR',
            "US" : 'en-US',
           "GB" : 'en-GB',
           "DE" : 'de-DE',
           "ES" : 'es-ES',
           "IT" : 'it-IT'
        }
        language=language_dict[lang]
        phrase=phrase.encode('utf-8')
        cachepath=os.path.dirname(os.path.dirname(__file__))
        file = 'tts'
        filename=os.path.join(cachepath,file+'.wav')
        filenamemp3=os.path.join(cachepath,file+'.mp3')
        os.system('pico2wave -l '+language+' -w  '+filename+ ' "' +phrase+ '"')
        song = AudioSegment.from_wav(filename)
        songmodified=song
        songmodified.export(filenamemp3, format="mp3", bitrate="128k", tags={'albumartist': 'Talkie', 'title': 'TTS', 'artist':'Talkie'}, parameters=["-ar", "44100","-vol", "200"])
        song = AudioSegment.from_mp3(filenamemp3)
        cmd = ['mplayer']
        cmd.append(filenamemp3)
        if GPIO.input(17) != 0 :
            print 'GPIO 17 en cours d\'utilisation'
            while GPIO.input(17) != 0 :
                time.sleep(0.5)
        print 'GPIO 17 libre'
        GPIO.output(18, 1)
        print 'GPIO 18 ON et synthese du message'
        with open(os.devnull, 'wb') as nul:
            subprocess.call(cmd, stdout=nul, stderr=subprocess.STDOUT)
        GPIO.output(18, 0)
        print 'Synthese finie GPIO 18 OFF'
    except Exception, e:
        return str(e)
开发者ID:sarakha63,项目名称:TEST,代码行数:35,代码来源:alerte_tts.py


示例9: generateFile

	def generateFile(self):
		wav = default_storage.open('songs/' + str(self.pk) + '.wav', 'wb')

		final = None

		pitches = map(int, self.pitches.split(','))
		durations = map(int, self.durations.split(','))
		for pitch, duration in zip(pitches, durations):
			fn = 'pitches/' + pitchTable[pitch] + '.wav'
			pf = default_storage.open(fn)
			if final is None:
				final = AudioSegment(pf)[0:durationTable[duration]]
			else:
				final += AudioSegment(pf)[0:durationTable[duration]]

		# Copied from AudioSegment source...
		# I should have changed AudioSegment (getWaveFileContents() or something) and submitted a pull request but I have a deadline

		# Possibly optimize to just have a string packed with data then use ContentFile instead of File below
		wave_data = wave.open(wav, 'wb')
		wave_data.setnchannels(final.channels)
		wave_data.setsampwidth(final.sample_width)
		wave_data.setframerate(final.frame_rate)
		wave_data.setnframes(int(final.frame_count()))
		wave_data.writeframesraw(final._data)
		wave_data.close()
		wav.close() # ?

		wav_rb = default_storage.open('songs/' + str(self.pk) + '.wav', 'rb')
		self.wav.save('songs/' + str(self.pk) + '.wav', File(wav_rb))
		wav_rb.close()
开发者ID:masonsbro,项目名称:musicgen,代码行数:31,代码来源:models.py


示例10: overdub

def overdub(_files, _returnPath):
	s1, s2 = AudioSegment.from_wav(_files[0]), AudioSegment.from_wav(_files[1])
	_dubbed = s1.overlay(s2)
	_dubbed.export(_returnPath, format='wav')
	os.remove(_files[0])
	os.remove(_files[1])
	return True
开发者ID:showjackyang,项目名称:dubsrv,代码行数:7,代码来源:srv.py


示例11: outputTrack

def outputTrack(playList):
	au_file(name='master.au', freq=0, dur=playList[len(playList)-1][0][1], vol=0.2)
	masterSong = AudioSegment.from_file("master.au", "au")
	for item in playList:
		#obten la longitudDelSegmento
		longitudDelSegmento = int(item[0][1]) - int(item[0][0])
		#obten Si se loopea 
		loops = item[2]
		#crea los sonidos de esta seccion
		sonidoNum = 1 #integra un contador para los sonidos
		#crea un sonido temporal que contendra toda esta seccion
		au_file(name="instrumento.au", freq=0, dur=longitudDelSegmento, vol=1)
		
		for itemSonido in item[1]:
			nombre = 'sound' + str(sonidoNum) +".au"
			#print(nombre,itemSonido[2],itemSonido[1], float(itemSonido[0]))
			au_file(name=nombre, freq=int(itemSonido[2]), dur=int(itemSonido[1]), vol=float(itemSonido[0]))
			sonidoNum += 1
		instrumento = AudioSegment.from_file("instrumento.au", "au")
		
		
		
		for i in range(1, sonidoNum):
			nombre = 'sound' + str(i) +".au"
			#abreElArchivo
			temp = AudioSegment.from_file(nombre, "au")
			#insertaloEnElinstrumento
			instrumento =  instrumento.overlay(temp, position=0, loop=loops)
		#concatenaElInstrumento
		instrumento = instrumento[:longitudDelSegmento]
		
		#sobrelapa los sonidos en master
		masterSong = masterSong.overlay(instrumento, position=int(item[0][0]))
	#final = masterSong*2
	masterSong.export("testingSong.emepetres", format="mp3")
开发者ID:OblivionWielder,项目名称:acoustatic,代码行数:35,代码来源:soundGenerator.py


示例12: main

def main():
    wav_pat = re.compile(r'\.wav$')
    #print('End wav path:', end_path)
    #sound_end = AudioSegment.from_wav(end_path).set_frame_rate(16000)
    for pair in wav_folders:
        folder_name = pair[0]
        input_folder = input_folder_prefix + '/' + folder_name + '/' + input_folder_suffix
        output_folder = output_folder_prefix + '/' + folder_name + '/' + output_folder_suffix
        if not os.path.exists(output_folder):
            os.makedirs(output_folder)
        # find all files with wav suffix
        files = list(filter(lambda x: wav_pat.search(x),os.listdir(input_folder)))
        num_file = len(files)
        last_wav_pat = re.compile(str(num_file) + r'\.wav$')
        for filename in files:
            #run_single(input_folder + '/' + filename, output_folder + '/' + filename)
            print('------')
            print('Processing %s...' % (input_folder + '/' + filename))
            sound_input = AudioSegment.from_wav(input_folder + '/' + filename)
            if last_wav_pat.search(filename):
                end_filename = random_pick(end_wavs2)
            else:
                end_filename = random_pick(end_wavs1)
            print('End tone filename:%s' % (end_filename))
            sound_end = AudioSegment.from_wav(end_filename).set_frame_rate(16000)
            sound_combined = sound_input + sound_end
            sound_combined.export(output_folder + '/' + filename, format="wav")
开发者ID:sonicmisora,项目名称:audio_equalizer,代码行数:27,代码来源:audio_appending.py


示例13: GetVoice

def GetVoice(word):  # https://tts.voicetech.yandex.net/generate?text=text&key=3f874a4e-723d-48cd-a791-7401169035a0&format=mp3&speaker=zahar&emotion=good
    req =('https://tts.voicetech.yandex.net/generate?ie=UTF-8&text='+word+'&key='+API_KEY_VOICE+'&format=mp3&speaker=ermil&emotion=neutral')
    response = requests.get(req, stream=True) 
    with open("yasound.mp3", "wb") as handle:
       for data in tqdm(response.iter_content()):
          handle.write(data)
    AudioSegment.from_file('yasound.mp3').export("yasound.ogg", format="ogg")
开发者ID:chefdoeuvre,项目名称:EngCardBot,代码行数:7,代码来源:Utils.py


示例14: run

 def run(self):
     current_files = []
     while True:
         for file in os.listdir(self.scan_directory):
             if file.endswith('.wav') and file not in current_files:
                 AudioSegment.from_wav(self.scan_directory+file).export(self.mp3_directory + file[:-3] + 'mp3', format='mp3')
                 current_files.append(file)
开发者ID:difficultlogin,项目名称:soundservice,代码行数:7,代码来源:my_daemon.py


示例15: main

def main():
	global background
	global prettyGirls
	global oyeahs
	global marsOyeah
	global girlsPretty

	createBackground(song)
	prettyGirls(song)

	# we just so pretty
	soPretty = song[19990:21250]
	soPretty.export('soPretty.wav', 'wav')
	soPretty = wave.open('soPretty.wav', 'r')
	soPrettySlow = wave.open('soPrettySlow.wav', 'w')
	soPrettySlow.setparams(soPretty.getparams())
	writeFrames = soPretty.readframes(soPretty.getnframes())
	soPrettySlow.setframerate(soPretty.getframerate() / 2)
	soPrettySlow.writeframes(writeFrames)
	soPrettySlow.close()
	soPrettySlow = AudioSegment.from_wav('soPrettySlow.wav')

	#combine last two
	silent5 = AudioSegment.silent(duration=22000)
	smallSilent = AudioSegment.silent(90)
	girlsPretty = prettyGirls.append(smallSilent).append(soPrettySlow).append(silent5)

	ohYeah(song)
	mars(song)
	drums(song)
	delete()
开发者ID:brijshah,项目名称:Python-Media,代码行数:31,代码来源:audio.py


示例16: MangleLibrary

def MangleLibrary(src, dst, audio, splice=None):
	if os.path.splitext(audio)[1] != ".mp3":
		raise "Prank audio is not an mp3"
	prank = AudioSegment.from_mp3(audio)
	# Walk src
	for root, dirs, files in os.walk(src):
		# Loop through files in this dir
		for fn in files:
			# If file is an mp3
			if os.path.splitext(fn)[1] == ".mp3":
				# Import song
				fullsong = AudioSegment.from_mp3(root+"/"+fn)
				# Pick random location between 10s and end of song
				start = random.randint(15,60)
				print("Spliced {} after {} seconds".format(root+"/"+fn,start))
				# Splice in prank song
				if splice != None:
					r = random.randint(0,len(splice)-1)
					final = fullsong[:start*1000] + prank[splice[r][0]:splice[r][1]] + fullsong[start*1000:]
					# final = fullsong[:start*1000] + prank + fullsong[start*1000:]
				else:
					final = fullsong[:start*1000] + prank
				# Recreate directory structrue in dst
				if not os.path.exists(dst+"/"+root):
					os.makedirs(dst+"/"+root)
				# Export song with tags
				final.export(dst+"/"+root+"/"+fn, format="mp3", tags=mediainfo(root+"/"+fn).get('TAG', {}))
开发者ID:buchanan,项目名称:LM,代码行数:27,代码来源:__init__.py


示例17: morsesound

def morsesound(sentence, freq=1000, length=100, path ='output\\'):
    """Turns a sentence into a morse soundfile"""
    mor = morse(sentence)
    from pydub.generators import Sine
    from pydub import AudioSegment
    import re

    dot = Sine(freq).to_audio_segment(length)
    dash =  Sine(freq).to_audio_segment(length*3)
    sil1 = AudioSegment.silent(length)
    sil3 = AudioSegment.silent(length*3)

    result = AudioSegment.silent(length)
    for a in mor:
        if a == ".":
            result += dot
        elif a == "-":
            result += dash
        elif a == "/":
            result += sil1
        else:
            result += sil3
        result += sil1

    filename = path + re.sub(r'[/\?!:*|",.]','',sentence) + '.mp3'
    result.export(filename,format="mp3")
    return filename
开发者ID:KarlKastor,项目名称:TheMachine,代码行数:27,代码来源:machinetools.py


示例18: mixer

def mixer(first_path, second_path, mix_path, tag=None, f_format='wav'):
    '''
    ffmpeg or avconv are required for MP3 format mixing.
    WAV format must be 8, 16, or 32 bit (24 bit is not supported by pydub)
    '''
    with open(first_path, 'rb') as f:
        first = AudioSegment.from_file(f, format=f_format)
    with open(second_path, 'rb') as f:
        second = AudioSegment.from_file(f, format=f_format)

    if len(first) > len(second):
        mix = first.overlay(second)
    else:
        mix = second.overlay(first)
    mix.export(mix_path, format=f_format)

    metadata = {
        'tag': tag,
        'first_file': {
            'path': first_path,
            'length': first.duration_seconds
        },
        'second_file': {
            'path': second_path,
            'length': second.duration_seconds
        },
        'mix': {
            'path': mix_path,
            'length': mix.duration_seconds
        }
    }

    return metadata
开发者ID:eringee,项目名称:survivor_music,代码行数:33,代码来源:mixer.py


示例19: responseToAnAudioCachonism

	def responseToAnAudioCachonism(self, bot, update):
		message= update["message"]
		user= message.from_user["username"]
		if user=="":
			user= message.from_user["first_name"]

		responseText= "Hey %s. Soy TeofiBot. Mira lo que hago con tu nota de voz..." % user
		
		if user not in ["TeoGol29"]:		
			downAudio= bot.getFile(message.voice.file_id)
			urllib.urlretrieve (downAudio.file_path, downAudio.file_id)
			
			sound1 = AudioSegment.from_file(downAudio.file_id)
			source_path= self.get_resource_path("sounds", "mi_creador.mp3")
			sound2 = AudioSegment.from_file(source_path)

			sound1 = sound1 + 1
			sound2 = sound2 - 8
			combined = sound1.overlay(sound2)
			audio_mix_filename="mix_"+downAudio.file_id
			combined.export(audio_mix_filename , format='mp3')
			bot.sendMessage(chat_id=update.message.chat_id, text=responseText)
			bot.sendAudio(chat_id=update.message.chat_id, audio=open(audio_mix_filename, 'rb'), caption='TeofiBot saboteandote con sabor')
			os.remove(downAudio.file_id)
			os.remove(audio_mix_filename)
开发者ID:jtuburon,项目名称:teofibot,代码行数:25,代码来源:teofibot.py


示例20: setUp

    def setUp(self):
        global test1, test2, test3, testparty, testdcoffset
        if not test1:
            a = os.path.join(data_dir, 'test1.mp3')
            test1 = AudioSegment.from_mp3(os.path.join(data_dir, 'test1.mp3'))
            test2 = AudioSegment.from_mp3(os.path.join(data_dir, 'test2.mp3'))
            test3 = AudioSegment.from_mp3(os.path.join(data_dir, 'test3.mp3'))
            testdcoffset = AudioSegment.from_mp3(
                os.path.join(data_dir, 'test-dc_offset.wav'))
            testparty = AudioSegment.from_mp3(
                os.path.join(data_dir, 'party.mp3'))

        self.seg1 = test1
        self.seg2 = test2
        self.seg3 = test3
        self.mp3_seg_party = testparty
        self.seg_dc_offset = testdcoffset

        self.ogg_file_path = os.path.join(data_dir, 'bach.ogg')
        self.mp4_file_path = os.path.join(data_dir, 'creative_common.mp4')
        self.mp3_file_path = os.path.join(data_dir, 'party.mp3')
        self.webm_file_path = os.path.join(data_dir, 'test5.webm')

        self.jpg_cover_path = os.path.join(data_dir, 'cover.jpg')
        self.png_cover_path = os.path.join(data_dir, 'cover.png')
开发者ID:jiaaro,项目名称:pydub,代码行数:25,代码来源:test.py



注:本文中的pydub.AudioSegment类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python utils.ratio_to_db函数代码示例发布时间:2022-05-25
下一篇:
Python pyds.MassFunction类代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap