• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python http.get_soup函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中util.http.get_soup函数的典型用法代码示例。如果您正苦于以下问题:Python get_soup函数的具体用法?Python get_soup怎么用?Python get_soup使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了get_soup函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: fourchanthread_url

def fourchanthread_url(match):
    soup = http.get_soup(match)
    title = soup.title.renderContents().strip()
    post = soup.find('div', {'class': 'opContainer'})
    comment = post.find('blockquote', {'class': 'postMessage'}).renderContents().strip()
    author = post.find_all('span', {'class': 'nameBlock'})[1]
    return http.process_text("\x02{}\x02 - posted by \x02{}\x02: {}".format(title, author, comment[:trimlength]))
开发者ID:Anonymike,项目名称:pasta-bot,代码行数:7,代码来源:urls.py


示例2: animetake

def animetake(inp):
    "animetake <list> | <get [query]> - searches animetake for the latest updates"
    error = u"not so lucky today.."
    try:
        inp_array = inp.split(" ")
        command = inp_array[0]
        query = inp_array[1]
    except:
        pass

    url = "http://www.animetake.com/"  #% (urllib.quote_plus(query))
    anime_updates = []
    response = ""

    soup = http.get_soup(url)
    page = soup.find("div", id="mainContent").ul

    for li in page.findAll("li"):
        anime_link = li.find("div", "updateinfo").h4.a
        anime_updates.append("%s : %s" % (anime_link["title"], anime_link["href"]))

    if command == "list":
        count = 1
        response = "Latest Anime Updates: "
        for anime_title in anime_updates:
            response += "%s | " % (anime_title.split(" : ")[0])
            count += 1
            if count == 11:
                break
    elif command == "get":
        indices = [i for i, x in enumerate(anime_updates) if query in x]
        for index in indices:
            response += "%s " % (anime_updates[index])
    return response
开发者ID:FrozenPigs,项目名称:uguubot,代码行数:34,代码来源:anime.py


示例3: horoscope

def horoscope(inp, db=None, notice=None, nick=None):
    """horoscope <sign> [save] -- Get your horoscope."""
    save = False
    database.init(db)
    
    if '@' in inp:
        nick = inp.split('@')[1].strip()
        sign = database.get(db,'users','horoscope','nick',nick)
        if not sign: return "No horoscope sign stored for {}.".format(nick)
    else:
        sign = database.get(db,'users','horoscope','nick',nick)
        if not inp: 
            if not sign:
                notice(horoscope.__doc__)
                return
        else:
            if not sign: save = True
            if " save" in inp: save = True
            sign = inp.split()[0]

    url = "http://my.horoscope.com/astrology/free-daily-horoscope-%s.html" % sign
    try:
        result = http.get_soup(url)
        title = result.find_all('h1', {'class': 'h1b'})[1].text
        horoscopetxt = result.find('div', {'id': 'textline'}).text
    except: return "Could not get the horoscope for {}.".format(sign.encode('utf8'))

    if sign and save: database.set(db,'users','horoscope',sign,'nick',nick)
    
    return u"\x02{}\x02 {}".format(title, horoscopetxt)
开发者ID:Noclip21,项目名称:uguubot,代码行数:30,代码来源:core_user.py


示例4: animetake

def animetake(inp):
    "animetake <list> | <get [query]> - searches animetake for the latest updates"
    error = u'not so lucky today..'
    try:
        inp_array = inp.split(' ')
        command = inp_array[0]
        query = inp_array[1]
    except:
        pass

    url = "http://www.animetake.com/" #% (urllib.quote_plus(query))
    anime_updates = []
    response = "" 

    soup = http.get_soup(url)
    page = soup.find('div', id='mainContent').ul
 
    for li in page.findAll('li'):
      anime_link = li.find('div', 'updateinfo').h4.a
      anime_updates.append('%s : %s' % (anime_link['title'], anime_link['href']))

    if command == 'list':
      count = 1
      response = "Latest Anime Updates: "       
      for anime_title in anime_updates:
        response += ("%s | " % (anime_title.split(' : ')[0]))
        count+=1
        if count == 11:
          break
    elif command == 'get':
      indices = [i for i, x in enumerate(anime_updates) if query in x]
      for index in indices:
        response += ("%s " % (anime_updates[index]))
    return response
开发者ID:Kiniamaro,项目名称:uguubot,代码行数:34,代码来源:anime.py


示例5: fourchan_url

def fourchan_url(match):
    soup = http.get_soup(match)
    title = soup.title.renderContents().strip()
    post = soup.find('div', {'class': 'opContainer'})
    comment = post.find('blockquote', {'class': 'postMessage'})
    author = post.find_all('span', {'class': 'nameBlock'})[1]
    return http.process_text('\x02%s\x02 - posted by \x02%s\x02: %s' % (title, author, comment))
开发者ID:Juboo,项目名称:UguuBot,代码行数:7,代码来源:parsers.py


示例6: get_woots

def get_woots(inp):
    woots = {}
    for k, v in inp.items():
        try:
            w = {}
            soup = http.get_soup(api + v)

            w['product'] = soup.find('woot:product').text
            w['wootoff'] = soup.find('woot:wootoff').text
            w['price'] = soup.find('woot:price').text
            w['pricerange'] = soup.find('woot:pricerange').text
            w['shipping'] = soup.find('woot:shipping').text
            w['url'] = "http://{}".format(v)
            w['soldout'] = soup.find('woot:soldout').text
            w['soldoutpercent'] =  soup.find('woot:soldoutpercentage').text

            category = text.capitalize_first(k if k == 'woot' else "%s woot" % k)
            if w['wootoff'] != "false":
                category += "off!"

            woots[category] = w
        except:
            continue

    return woots
开发者ID:Cameri,项目名称:Gary,代码行数:25,代码来源:woot.py


示例7: fact

def fact():
    """fact -- Gets a random fact from OMGFACTS."""

    attempts = 0

    # all of this is because omgfacts is fail
    while True:
        try:
            soup = http.get_soup("http://www.omg-facts.com/random")
        except (http.HTTPError, http.URLError):
            if attempts > 2:
                return "Could not find a fact!"
            else:
                attempts += 1
                continue

        response = soup.find("a", {"class": "surprise"})
        link = response["href"]
        fact_data = "".join(response.find(text=True))

        if fact_data:
            fact_data = fact_data.strip()
            break
        else:
            if attempts > 2:
                return "Could not find a fact!"
            else:
                attempts += 1
                continue

    url = web.try_isgd(link)

    return "{} - {}".format(fact_data, url)
开发者ID:nasonfish,项目名称:CloudBot,代码行数:33,代码来源:fact.py


示例8: fact

def fact(inp, say=False, nick=False):
    """fact -- Gets a random fact from OMGFACTS."""

    attempts = 0

    # all of this is because omgfacts is fail
    while True:
        try:
            soup = http.get_soup('http://www.omg-facts.com/random')
        except:
            if attempts > 2:
                return "Could not find a fact!"
            else:
                attempts += 1
                continue

        response = soup.find('a', {'class': 'surprise'})
        link = response['href']
        fact = ''.join(response.find(text=True))

        if fact:
            fact = fact.strip()
            break
        else:
            if attempts > 2:
                return "Could not find a fact!"
            else:
                attempts += 1
                continue

    url = web.try_isgd(link)

    return "{} - {}".format(fact, url)
开发者ID:thejordan95,项目名称:Groovebot2,代码行数:33,代码来源:fact.py


示例9: refresh_cache

def refresh_cache():
    """ gets a page of random FMLs and puts them into a dictionary """
    soup = http.get_soup('http://www.fmylife.com/random/')

    for e in soup.find_all('p', attrs={'class': 'block'}):
        id = int(e.find_all('a', href=True)[0]['href'].split('_')[1].split('.')[0])
        text = e.find_all('a')[0].text.strip()
        fml_cache.append((id, text))
开发者ID:inexist3nce,项目名称:Taigabot,代码行数:8,代码来源:fmylife.py


示例10: refresh_cache

def refresh_cache():
    """ gets a page of random FMLs and puts them into a dictionary """
    soup = http.get_soup('http://www.fmylife.com/random/')

    for e in soup.find_all('div', {'class': 'post article'}):
        fml_id = int(e['id'])
        text = ''.join(e.find('p').find_all(text=True))
        fml_cache.append((fml_id, text))
开发者ID:FurCode,项目名称:RoboCop2,代码行数:8,代码来源:fmylife.py


示例11: get_bash_quote

def get_bash_quote(inp):
    try:
        soup = http.get_soup('http://bash.org/?%s' % inp)
        quote_info = soup.find('p', {'class': 'quote'}).text
        quote = soup.find('p', {'class': 'qt'}).text
        return ('\x02#{}\x02 ({}): {}'.format(quote_info.split()[0].replace('#',''), quote_info.split()[1].split('(')[1].split(')')[0].strip(), quote.replace('\n', ' ').replace('\r', ' |')))
    except:
        return "No quote found."
开发者ID:bytebit-ch,项目名称:uguubot,代码行数:8,代码来源:bash.py


示例12: get_yandere_tags

def get_yandere_tags(inp):
    url = 'https://yande.re/post?tags=%s' % inp.replace(' ','_')
    soup = http.get_soup(url)
    imagelist = soup.find('ul', {'id': 'post-list-posts'}).findAll('li')
    image = imagelist[random.randint(0,len(imagelist)-1)]
    imageid = image["id"].replace('p','')
    title = image.find('img')['title']
    src = image.find('a', {'class': 'directlink'})["href"]
    return u"\x034NSFW\x03: \x02({})\x02 {}: {}".format(imageid, title, web.isgd(http.unquote(src)))
开发者ID:Boltovnya,项目名称:uguubot,代码行数:9,代码来源:yandere.py


示例13: refresh_cache

def refresh_cache():
    "gets a page of random MLIAs and puts them into a dictionary "
    url = 'http://mylifeisaverage.com/%s' % random.randint(1,11000)
    soup = http.get_soup(url)
    
    for story in soup.find_all('div', {'class': 'story '}):
        mlia_id = story.find('span', {'class': 'left'}).a.text
        mlia_text = story.find('div', {'class': 'sc'}).text.strip()
        mlia_cache.append((mlia_id, mlia_text))
开发者ID:Boltovnya,项目名称:uguubot,代码行数:9,代码来源:mylifeisaverage.py


示例14: get_yandere_tags

def get_yandere_tags(inp):
    url = "https://yande.re/post?tags=%s" % inp.replace(" ", "_")
    soup = http.get_soup(url)
    imagelist = soup.find("ul", {"id": "post-list-posts"}).findAll("li")
    image = imagelist[random.randint(0, len(imagelist) - 1)]
    imageid = image["id"].replace("p", "")
    title = image.find("img")["title"]
    src = image.find("a", {"class": "directlink"})["href"]
    return "\x034NSFW\x03: \x02({})\x02 {}: {}".format(imageid, title, web.isgd(http.unquote(src)))
开发者ID:bytebit-ch,项目名称:uguubot,代码行数:9,代码来源:yandere.py


示例15: steam

def steam(inp):
    """.steam [search] - Search for specified game/trailer/DLC."""
    soup = http.get_soup("http://store.steampowered.com/search/?term={}".format(inp))
    result = soup.find('a', {'class': 'search_result_row'})
    try:
        return (get_steam_info(result['href']) +
            " - " + web.try_googl(result['href']))
    except Exception as e:
        print "Steam search error: {}".format(e)
        return "Steam API error, please try again later."
开发者ID:Cameri,项目名称:Gary,代码行数:10,代码来源:steam.py


示例16: refresh_cache

def refresh_cache():
    "gets a page of random yande.re posts and puts them into a dictionary "
    url = "https://yande.re/post?page=%s" % random.randint(1, 11000)
    soup = http.get_soup(url)

    for result in soup.findAll("li"):
        title = result.find("img", {"class": re.compile(r"\bpreview\b")})  # ['title']
        img = result.find("a", {"class": re.compile(r"\bdirectlink\b")})  # ['href']
        if img and title:
            yandere_cache.append((result["id"].replace("p", ""), title["title"].split(" User")[0], img["href"]))
开发者ID:bytebit-ch,项目名称:uguubot,代码行数:10,代码来源:yandere.py


示例17: refresh_cache

def refresh_cache():
    "gets a page of random yande.re posts and puts them into a dictionary "
    url = 'https://yande.re/post?page=%s' % random.randint(1,11000)
    soup = http.get_soup(url)

    for result in soup.findAll('li'):
        title = result.find('img', {'class': re.compile(r'\bpreview\b')}) #['title']
        img = result.find('a', {'class': re.compile(r'\bdirectlink\b')}) #['href']
        if img and title:
            yandere_cache.append((result['id'].replace('p','') ,title['title'].split(' User')[0], img['href']))
开发者ID:Boltovnya,项目名称:uguubot,代码行数:10,代码来源:yandere.py


示例18: calc

def calc(inp):
    "calc <term> -- Calculate <term> with Google Calc."

    soup = http.get_soup("http://www.google.com/search", q=inp)

    result = soup.find("h2", {"class": "r"})
    if not result:
        return "Could not calculate '%s'" % inp

    return result.contents[0]
开发者ID:Juboo,项目名称:UguuBot,代码行数:10,代码来源:gcalc.py


示例19: gcalc

def gcalc(inp):
    "gcalc <term> -- Calculate <term> with Google Calc."
    soup = http.get_soup('http://www.google.com/search', q=inp)

    result = soup.find('span', {'class': 'cwcot'})
    formula = soup.find('span', {'class': 'cwclet'})
    if not result:
        return "Could not calculate '{}'".format(inp)

    return u"{} {}".format(formula.contents[0].strip(),result.contents[0].strip())
开发者ID:ewhal,项目名称:uguubot,代码行数:10,代码来源:google.py


示例20: get_title

def get_title(url):
    soup = http.get_soup(url)

    if "#" in url:
        postid = url.split("#")[1]
        post = soup.find("div", {"id": postid})
    else:
        post = soup.find("div", {"class": "opContainer"})

    comment = http.process_text(post.find("blockquote", {"class": "postMessage"}).renderContents().strip())
    return "{} - {}".format(url, comment)  #
开发者ID:bytebit-ch,项目名称:uguubot,代码行数:11,代码来源:4chan.py



注:本文中的util.http.get_soup函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python http.get_xml函数代码示例发布时间:2022-05-26
下一篇:
Python http.get_json函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap