本文整理汇总了Python中xgoogle.search.GoogleSearch类的典型用法代码示例。如果您正苦于以下问题:Python GoogleSearch类的具体用法?Python GoogleSearch怎么用?Python GoogleSearch使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了GoogleSearch类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: getGooogleResults
def getGooogleResults(query, exclude):
try:
print "Searching for {0} but excluding these {1}".format(query, exclude)
page = 1
gs = GoogleSearch(query)
gs.results_per_page = 100
results = gs.get_results()
开发者ID:adamstein,项目名称:researcher.py,代码行数:7,代码来源:researcher.py
示例2: getUrls2
def getUrls2( page_num ):
gs = GoogleSearch('shareholder letter')
gs.results_per_page = 50
gs.page = page_num
results = gs.get_results()
for item in results:
print item.url.encode("utf8")
开发者ID:ammathew,项目名称:Letters,代码行数:7,代码来源:aa.py
示例3: google
def google(termtosearch, action):
#action = spam or phis
try:
gs = GoogleSearch(termtosearch)
gs.results_per_page = 100
results = []
while True:
tmp = gs.get_results()
if not tmp:
break
results.extend(tmp)
#TODO switch in this code block
if action == 'mal':
for res in results:
checkAgainstGoogle(res.url.encode('utf8'))
else:
if action == 'spam':
for res in results:
print '\033[1;34mLooking for SPAM in ......%s\033[1;m' % (res.url.encode('utf8'))
spam_detect(res.url.encode('utf8'))
elif action == 'phis':
for res in results:
print '\033[1;34mLooking for PHISHING in ......%s\033[1;m' % (res.url.encode('utf8'))
phishing_detect(res.url.encode('utf8'))
else:
for res in results:
print res.url.encode('utf8')
except SearchError, e:
print "Search failed: %s" % e
开发者ID:momolas,项目名称:urldigger,代码行数:35,代码来源:urldigger.py
示例4: google
def google(data): #In this fuction we will do the phrasing of the subject line ourselfs.
print "Called google"
tore = ''
search_string = ''
if data[1] == 'search':
for i in range(2,len(data)):
search_string = search_string + data[i] + ' '
try:
tore = "Here are the first 25 results from google when \'"+search_string+"\' is queried\n\n"
gs = GoogleSearch(search_string)
gs.results_per_page = 25
results = gs.get_results()
for res in results:
#print res.title.encode('utf8')
tore = tore+res.title.encode('utf8')+"\n"
#print res.desc.encode('utf8')
tore = tore+res.desc.encode('utf8')+"\n"
#print res.url.encode('utf8')
tore = tore+res.url.encode('utf8')+"\n\n--------------------------------------\n"
print
except:
print "Search failed: %s" % e
tore = "Search failed: %s" % e
return tore
开发者ID:jony123,项目名称:PyMail,代码行数:28,代码来源:functs.py
示例5: google
def google(self,text):
try:
print "Trying to search for "+text
g1 = GoogleSearch(text)
g1.results_per_page = 25
results = g1.get_results()
if len(results)==0:
print "No search result!!"
else:
print "Results FOund!!"
print type(results)
print len(results)
for res in results[:2]:
time.sleep(1)
url = res.url.encode("utf8")
response = self.search(url)
if response == "Kgpian":
self.close()
break
except SearchError, e:
print "Failed Once"
开发者ID:light94,项目名称:Ultimate-Search,代码行数:27,代码来源:alumnisearch.py
示例6: getGoogleResults
def getGoogleResults(self,pluginname,latest,cve):
try:
gs = GoogleSearch("inurl:'wp-content/plugins/" + pluginname + "'", random_agent=True)
gs.results_per_page = 100
numberOfprocessed = 0
self.all_run = []
for i in range(int(limitForSearch)):
results = gs.get_results()
if not results:
break
# Semaphore for write in order to screen
self.checkSimultaneus = threading.Semaphore(int(NumThreats))
# Semaphore for write to file
self.writeFile = threading.Semaphore(int(NumThreats)-1)
for res in results:
self.checkSimultaneus.acquire()
host_name = urlparse(res.url.encode()).hostname
# Create thread
t = threading.Thread(target=self.__getGoogleResults, args=(host_name,latest,pluginname,cve))
self.all_run.append(t)
# run thread
self.all_run[len(self.all_run)-1].start()
except SearchError, e:
print "Search failed: %s" % e
开发者ID:1EDTHEMAN1,项目名称:raspberry_pwn,代码行数:33,代码来源:plecost-0.2.2-9-beta.py
示例7: collect
def collect(self):
gs = GoogleSearch("site:"+self.target)
while True:
results = gs.get_results()
for res in results:
self.urls.append(res.url)
if len(results)<10:
break
开发者ID:file-citas,项目名称:pyhtoncrawler,代码行数:8,代码来源:google_collect.py
示例8: go
def go(self, query, pages):
search = GoogleSearch(query)
search.results_per_page = 10
for i in range(pages):
search.page = i
results = search.get_results()
for page in results:
self.scrape(page)
开发者ID:mhenes,项目名称:Google-EmailScraper,代码行数:9,代码来源:main.py
示例9: update_from_web
def update_from_web( model, film, year ):
search = "kinopoisk.ru " + year + " " + film
print "Search: %s" % search
browser=Browser(debug=True)
gs = GoogleSearch(search)
gs.results_per_page = 1
results = gs.get_results()
try:
for res in results:
pageurl = res.url.encode('utf8')
page = browser.get_page( pageurl )
soup = BeautifulStoneSoup( page[ page.find("<html"):], convertEntities=BeautifulStoneSoup.HTML_ENTITIES, fromEncoding="windows-1251" )
print "URL: %s" % pageurl
rating = soup.find('a',attrs={'class':'continue'})
if rating:
r = strip(rating).split(' ')
try:
model.rating = float( r[1] )
print "Rating: %s" % r[1]
except Exception, ex:
model.rating = 0.0
print "Can't parse rating"
title = soup.find('h1','moviename-big')
if title:
print "Title: %s" % strip(title)
model.title = strip(title)
info = soup.find('span','_reachbanner_')
if info:
print "Info: %s" % strip(info)
model.description = strip( info )
img = soup.find('img', attrs={"width" : "120"})
if img:
print "Image: %s" % img['src']
model.image = "http://www.kinopoisk.ru%s" % img['src']
#getTrailer("t26538","397494/kinopoisk.ru-District-9-36971.mp4","397494/1_36971.jpg","480","270","tr","","");
import re
m = re.search("getTrailer\((.*)\)",str(soup))
if not m:
pass
else:
parts = m.group(1).split('"')
url = "http://tr.kinopoisk.ru/%s" % parts[3]
model.trailer = url
image = "http://tr.kinopoisk.ru/%s" % parts[5]
model.trailer_image = image
print "Trailer: %s" % url
print "TrailerImage: %s" % image
break
except Exception,e:
print "WARNING: %s" % e
开发者ID:ATOM49,项目名称:django-voip,代码行数:57,代码来源:grabber.py
示例10: startSearch
def startSearch(self, domain="", target_keywords=[]):
gs = GoogleSearch(target_keyword)
gs.results_per_page = 100
results = gs.get_results()
for idx, res in enumerate(results):
parsed = urlparse(res.url)
domain = self.__makeUrl(parsed.netloc)
if domain == target_domain:
print "Ranking position %d for keyword %s on domain %s" % (idx + 1, target_keyword, target_domain)
开发者ID:xbox,项目名称:seoSuite,代码行数:9,代码来源:keywordTrackingPosition.py
示例11: searchPage
def searchPage(textToSearch, page):
items = []
gs = GoogleSearch(textToSearch)
gs.results_per_page = 100
gs.page = page
results = gs.get_results()
for res in results:
url = res.url.encode('utf8')
items.append(url);
return items
开发者ID:JhetoX,项目名称:CardingLocator,代码行数:10,代码来源:Locator.py
示例12: get_number_of_results
def get_number_of_results(term, ajax=False, verbose=True):
if not ajax:
gs = GoogleSearch(term)
page = str(gs._get_results_page())
match = reg.search(page)
if match:
if verbose: print(term, match.groups()[0])
return int(match.groups()[0].replace(',',''))
else:
raw_input((term, page))
return int(search(term)['responseData']['cursor']['estimatedResultCount'])
开发者ID:JasonGross,项目名称:characters,代码行数:11,代码来源:google.py
示例13: main
def main():
#the hardcoded search query:
gs = GoogleSearch("computer")
gs.result_per_page=10
results = gs.get_results()
for r in results:
Crow(r.url).select("a").to(SqlitePipeline()).async_start(50)
Crow.run()
f.close()
开发者ID:Rafe,项目名称:Crow,代码行数:11,代码来源:crow.py
示例14: get_results
def get_results(query):
gs = GoogleSearch(query);
gs.results_per_page = 9001;
results = gs.get_results();
ret = [];
for idx, res in enumerate(results):
domain = mk_nice_domain(res.url);
domain = domain.replace("pastebin.com/", "pastebin.com/raw.php?i=");
print 'Found codes at %s' % domain;
ret.append(domain);
return ret;
开发者ID:Yoshi-,项目名称:AuthLeech,代码行数:11,代码来源:AuthLeech.py
示例15: search_google
def search_google(term, domain):
try:
log.debug('Performing Google search for "{}"'.format(term))
gs = GoogleSearch(term, tld=domain)
gs.results_per_page = 10
results = gs.get_results()
log.debug('Got {} results'.format(len(results)))
return [Url(res.url) for res in results[:10]]
except SearchError as exc:
log.exception(exc)
return None
开发者ID:monkeylearn,项目名称:monkeylearn-seo-demo,代码行数:11,代码来源:server.py
示例16: search
def search( md5hash ):
urls = []
gs = GoogleSearch( md5hash )
gs.results_per_page = 100
results = gs.get_results()
for res in results:
urls.append( res.url.encode('utf8') )
return urls
开发者ID:assalw,项目名称:PythonScripts,代码行数:11,代码来源:hackmd5.py
示例17: scrape
def scrape(self, keyword, pages=2):
try:
gs = GoogleSearch(keyword)
gs.results_per_page = 10
gs.page = 0
results = gs.get_results()
for res in results:
url = res.url.encode('utf8')
Title = res.title
self.urls.append((url, Title))
except SearchError, e:
print "Search failed: %s" % e
开发者ID:javaongsan,项目名称:backlink_checker,代码行数:12,代码来源:google_url_scrapper.py
示例18: get_hits
def get_hits(term):
#data = fetch_data("http://api.thriftdb.com/api.hnsearch.com/items/_search?q=" + term)
#if data[0] is not None:
# if loads(data[0])['hits'] > 0: #loads() dumps a json file which is what the hnsearch api returns
# return loads(data[0])['hits']
# else:
# return 0.000001
# else:
# return data[1]
gs = GoogleSearch(key)
gs.results_per_page = 100
results = gs.get_results()
return results
开发者ID:LawDataProject,项目名称:Social_Media_Mining,代码行数:13,代码来源:GSPyLib.py
示例19: search_by_filename
def search_by_filename(args):
args_e=args.encode('utf8')
try:
gs = GoogleSearch('"' + args_e + '"')
gs.results_per_page = 50
results = gs.get_results()
for res in results:
if re_math_sites(allow_sites,res.url.encode('utf8')):
if re_math_sites(args_e,res.desc.encode('utf8')):
return clean_result(res.title.encode('utf8'))
except SearchError, e:
print "Search failed: %s" % e
开发者ID:s6d,项目名称:mv_renamer,代码行数:13,代码来源:movie_renamer.py
示例20: searchInSeretil
def searchInSeretil():
search_entered =''
keyboard = xbmc.Keyboard(search_entered, 'הכנס מילות חיפוש כאן')
keyboard.doModal()
if keyboard.isConfirmed():
search_entered = keyboard.getText()
if search_entered !='' :
try:
gs = GoogleSearch("site:seretil.me "+ search_entered)
gs.results_per_page = 100
results = gs.get_results()
for res in results:
title=res.title.encode('utf8')
url= res.url.encode('utf8')
title=title.replace('SERETIL.ME','')
title=title.replace('לצפייה ישירה','')
title=title.replace('וסדרות','')
title=title.replace('תרגום מובנה','')
title=title.replace('|','')
title=title.replace('.','')
title=title.replace('סרטים','')
title=title.replace('עם','')
title=title.replace('לצפיה','')
if 'עונה' in title :
if not 'page' in url and not 'tag' in url and not '?s' in url and not 'search' in url :
addDir(title,url,211,'')
else:
if not 'page' in url and not 'tag' in url and not '?s' in url and not 'search' in url:
image=''
req = urllib2.Request(url)
req.add_header('User-Agent', ' Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link3=response.read()
response.close()
block= re.compile('<div class="post-wrap post-wrap-single">(.*?)linkwithin_hook',re.M+re.I+re.S).findall(link3)
image=''
images= re.compile('src="http(.*?).?jpg').findall(block[0])
if images:
image='http'+images[0]+'.jpg'
addDir(title,url,5,image)
except SearchError, e:
print "Search failed: %s" % e
xbmcplugin.setContent(int(sys.argv[1]), 'tvshows')
开发者ID:AMIR27,项目名称:xbmc-israel,代码行数:51,代码来源:seretil.py
注:本文中的xgoogle.search.GoogleSearch类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论