• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python uiObjects.UIAssistantUtteranceView类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中siriObjects.uiObjects.UIAssistantUtteranceView的典型用法代码示例。如果您正苦于以下问题:Python UIAssistantUtteranceView类的具体用法?Python UIAssistantUtteranceView怎么用?Python UIAssistantUtteranceView使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了UIAssistantUtteranceView类的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: play

 def play(self, results, language):
     collection = MPTitleCollection()
     collection.items = []
     for result in results:
         if not hasattr(result, "genre"):
             result.genre = ""
         if not hasattr(result, "trackNumber"):
             result.trackNumber = ""
         if not hasattr(result, "artist"):
             result.artist = ""
         if not hasattr(result, "title"):
             result.title = ""
         if not hasattr(result, "sortTitle"):
             result.sortTitle = ""
         if not hasattr(result, "playCount"):
             result.playCount = ""
         if not hasattr(result, "rating"):
             result.rating = ""
         if not hasattr(result, "album"):
             result.album = ""
         if not hasattr(result, "identifier"):
             result.identifier = ""
         song = MPSong()
         song.album = result.album
         song.artist = result.artist
         song.genre = result.genre
         song.playCount = result.playCount
         song.rating = result.rating
         song.sortTitle = result.sortTitle
         song.title = result.title
         song.trackNumber = result.trackNumber
         song.identifier = result.identifier
         collection.items.append(song)
         collection.sortTitle = result.title
         collection.title = result.sortTitle
     collection.identifier = result.identifier
     complete = MPSetQueue(self.refId)
     complete.mediaItems = collection
     self.getResponseForRequest(complete)
     commands = MPSetState(self.refId)
     commands.state = "Playing"
     commands2 = MPEnableShuffle(self.refId)
     commands2.enable = False
     code = 0
     root = UIAddViews(self.refId)
     root.dialogPhase = "Summary"
     assistant = UIAssistantUtteranceView()
     assistant.dialogIdentifier = "PlayMedia#nowPlayingMediaItemByTitle"
     assistant.speakableText = assistant.text = res["play"][language]
     root.views = [(assistant)]
     root.callbacks = [ResultCallback([commands, commands2], code)]
     callback = [ResultCallback([root], code)]
     self.send_object(RequestCompleted(self.refId, callback))
     self.complete_request()
开发者ID:sm0ker,项目名称:SiriServerCore,代码行数:54,代码来源:__init__.py


示例2: process_recognized_speech

 def process_recognized_speech(self, googleJson, requestId, dictation):
     possible_matches = googleJson['hypotheses']
     if len(possible_matches) > 0:
         best_match = possible_matches[0]['utterance']
         if len(best_match) == 1:
             best_match = best_match.upper()
         else:
             best_match = best_match[0].upper() + best_match[1:]
         best_match_confidence = possible_matches[0]['confidence']
         self.logger.info(u"Best matching result: \"{0}\" with a confidence of {1}%".format(best_match, round(float(best_match_confidence) * 100, 2)))
         # construct a SpeechRecognized
         token = Token(best_match, 0, 0, 1000.0, True, True)
         interpretation = Interpretation([token])
         phrase = Phrase(lowConfidence=False, interpretations=[interpretation])
         recognition = Recognition([phrase])
         recognized = SpeechRecognized(requestId, recognition)
         
         if not dictation:
             if self.current_running_plugin == None:
                 plugin = PluginManager.getPluginForImmediateExecution(self.assistant.assistantId, best_match, self.assistant.language, (self.send_object, self.send_plist, self.assistant, self.current_location))
                 if plugin != None:
                     plugin.refId = requestId
                     plugin.connection = self
                     self.current_running_plugin = plugin
                     self.send_object(recognized)
                     self.current_running_plugin.start()
                 else:
                     self.send_object(recognized)
                     view = UIAddViews(requestId)
                     errorText = SiriProtocolHandler.__not_recognized[self.assistant.language] if self.assistant.language in SiriProtocolHandler.__not_recognized else SiriProtocolHandler.__not_recognized["en-US"]
                     errorView = UIAssistantUtteranceView()
                     errorView.text = errorText.format(best_match)
                     errorView.speakableText = errorText.format(best_match)
                     view.views = [errorView]
                     websearchText = SiriProtocolHandler.__websearch[self.assistant.language] if self.assistant.language in SiriProtocolHandler.__websearch else SiriProtocolHandler.__websearch["en-US"]
                     button = UIButton()
                     button.text = websearchText
                     cmd = SendCommands()
                     cmd.commands = [StartRequest(utterance=u"^webSearchQuery^=^{0}^^webSearchConfirmation^=^Yes^".format(best_match))]
                     button.commands = [cmd]
                     view.views.append(button)
                     self.send_object(view)
                     self.send_object(RequestCompleted(requestId))
             elif self.current_running_plugin.waitForResponse != None:
                 # do we need to send a speech recognized here? i.d.k
                 self.current_running_plugin.response = best_match
                 self.current_running_plugin.refId = requestId
                 self.current_running_plugin.waitForResponse.set()
             else:
                 self.send_object(recognized)
                 self.send_object(RequestCompleted(requestId))
         else:
             self.send_object(recognized)
             self.send_object(RequestCompleted(requestId))
开发者ID:greenZebraQuest,项目名称:SiriServerCore,代码行数:54,代码来源:SiriProtocolHandler.py


示例3: pause

 def pause(self, language):
     commands = MPSetState(self.refId)
     commands.state = "Paused"
     code = 0
     root = UIAddViews(self.refId)
     root.dialogPhase = "Summary"
     assistant = UIAssistantUtteranceView()
     assistant.dialogIdentifier = "PlayMedia#Paused"
     assistant.speakableText = assistant.text = res["pause"][language]
     root.views = [(assistant)]
     root.callbacks = [ResultCallback([commands], code)]
     callback = [ResultCallback([root], code)]
     self.send_object(RequestCompleted(self.refId, callback))
     self.complete_request()
开发者ID:sm0ker,项目名称:SiriServerCore,代码行数:14,代码来源:__init__.py


示例4: beginning

 def beginning(self, language):
     commands = MPSetState(self.refId)
     commands.state = "Playing"
     commands2 = MPSetPlaybackPosition(self.refId)
     commands2.position = "Beginning"
     code = 0
     root = UIAddViews(self.refId)
     root.dialogPhase = "Summary"
     assistant = UIAssistantUtteranceView()
     assistant.dialogIdentifier = "PlayMedia#SkipToBeginning"
     assistant.speakableText = assistant.text = res["beginning"][language]
     root.views = [(assistant)]
     root.callbacks = [ResultCallback([commands, commands2], code)]
     callback = [ResultCallback([root], code)]
     self.send_object(RequestCompleted(self.refId, callback))
     self.complete_request()
开发者ID:sm0ker,项目名称:SiriServerCore,代码行数:16,代码来源:__init__.py



注:本文中的siriObjects.uiObjects.UIAssistantUtteranceView类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python interpreter.Interpreter类代码示例发布时间:2022-05-27
下一篇:
Python uiObjects.UIAddViews类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap