• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python snudown.markdown函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中snudown.markdown函数的典型用法代码示例。如果您正苦于以下问题:Python markdown函数的具体用法?Python markdown怎么用?Python markdown使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了markdown函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: wikimarkdown

def wikimarkdown(text, include_toc=True, target=None):
    from r2.lib.cssfilter import legacy_s3_url
    
    def img_swap(tag):
        name = tag.get('src')
        name = custom_img_url.search(name)
        name = name and name.group(1)
        if name and c.site.images.has_key(name):
            url = c.site.images[name]
            url = legacy_s3_url(url, c.site)
            tag['src'] = url
        else:
            tag.extract()
    
    nofollow = True
    
    text = snudown.markdown(_force_utf8(text), nofollow, target, g.domain,
                            renderer=snudown.RENDERER_WIKI)
    
    # TODO: We should test how much of a load this adds to the app
    soup = BeautifulSoup(text.decode('utf-8'))
    images = soup.findAll('img')
    
    if images:
        [img_swap(image) for image in images]
    
    if include_toc:
        tocdiv = generate_table_of_contents(soup, prefix="wiki")
        if tocdiv:
            soup.insert(0, tocdiv)
    
    text = str(soup)
    
    return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
开发者ID:caseypatrickdriscoll,项目名称:reddit,代码行数:34,代码来源:filters.py


示例2: wikimarkdown

def wikimarkdown(text):
    from r2.lib.cssfilter import legacy_s3_url
    
    def img_swap(tag):
        name = tag.get('src')
        name = custom_img_url.search(name)
        name = name and name.group(1)
        if name and c.site.images.has_key(name):
            url = c.site.images[name]
            url = legacy_s3_url(url, c.site)
            tag['src'] = url
        else:
            tag.extract()
    
    nofollow = True
    target = None
    
    text = snudown.markdown(_force_utf8(text), nofollow, target,
                            renderer=snudown.RENDERER_WIKI, enable_toc=True)
    
    # TODO: We should test how much of a load this adds to the app
    soup = BeautifulSoup(text)
    images = soup.findAll('img')
    
    if images:
        [img_swap(image) for image in images]
        text = str(soup)
    
    return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
开发者ID:Anenome,项目名称:reddit,代码行数:29,代码来源:filters.py


示例3: normalize_markdown_text

def normalize_markdown_text(parser, source):
    rendered  = markdown(unicode(source).encode('utf-8'))
    html_body = ' '.join(rendered.splitlines())
    soup      = BeautifulSoup(html_body)
    text      = ' '.join(soup.findAll(text=True))
    text      = parser.unescape(text)
    return unicode(' '.join(text.splitlines()).replace(',', ' ')).encode('utf-8')
开发者ID:TomDunn,项目名称:RedditGraph,代码行数:7,代码来源:export_comments.py


示例4: safemarkdown

def safemarkdown(text, nofollow=False, target=None, lang=None, wrap=True):
    from r2.lib.c_markdown import c_markdown
    from r2.lib.py_markdown import py_markdown

    if c.user.pref_no_profanity:
        text = profanity_filter(text)

    if not text:
        return None

    if c.cname and not target:
        target = "_top"

    if lang is None:
        lang = g.markdown_backend

    if lang == "snudown":
        text = snudown.markdown(_force_utf8(text), nofollow, target)
    elif lang == "c":
        text = c_markdown(text, nofollow, target)
    elif lang == "py":
        text = py_markdown(text, nofollow, target)
    else:
        raise ValueError("weird lang [%s]" % lang)

    if wrap:
        return SC_OFF + MD_START + text + MD_END + SC_ON
    else:
        return text
开发者ID:jiayanju,项目名称:reddit,代码行数:29,代码来源:filters.py


示例5: safemarkdown

def safemarkdown(text, nofollow=False, wrap=True, **kwargs):
    from r2.lib.utils import generate_affiliate_link, domain
    if not text:
        return None

    target = kwargs.get("target", None)
    text = snudown.markdown(_force_utf8(text), nofollow, target)
    to_affiliate = kwargs.get("affiliate", False)
    if to_affiliate:
        soup = BeautifulSoup(text.decode('utf-8'))
        links = soup.findAll('a')
        update_text = False

        def detect_affiliate(markdown_link):
            return domain(markdown_link.get('href'))\
                    in g.merchant_affiliate_domains

        for link in filter(detect_affiliate, links):
            update_text = True
            link['class'] = 'affiliate'
            link['data-href-url'] = link.get('href')
            link['data-affiliate-url'] = generate_affiliate_link(
                                            link.get('href')
                                         )

        if update_text:
            text = str(soup)

    if wrap:
        return SC_OFF + MD_START + text + MD_END + SC_ON
    else:
        return SC_OFF + text + SC_ON
开发者ID:zeantsoi,项目名称:reddit,代码行数:32,代码来源:filters.py


示例6: hello

def hello():
    messages = rds.zrevrangebyscore('goygoy', '+inf', '-inf')
    msgs = []
    for i in messages:
        msg = json.loads(i)
        msgs.append(dict(
            msg = _force_unicode(snudown.markdown(_force_utf8(msg['msg']))),
            username='anonim'
        ))
    return render_template('index.html', messages=msgs)
开发者ID:ybrs,项目名称:goygoyvarminoktacom,代码行数:10,代码来源:app.py


示例7: strip_markdown

def strip_markdown(text):
    """Extract text from a markdown string.
    """
    html = markdown(text.encode('utf-8'))
    soup = BeautifulSoup(
        html,
        "html.parser",
        from_encoding='utf8'
        )
    return "".join(soup.findAll(text=True))
开发者ID:PsyBorgs,项目名称:redditanalyser,代码行数:10,代码来源:analyser.py


示例8: extract_urls_from_markdown

def extract_urls_from_markdown(md):
    "Extract URLs that will be hot links from a piece of raw Markdown."

    html = snudown.markdown(_force_utf8(md))
    links = SoupStrainer("a")

    for link in BeautifulSoup(html, parseOnlyThese=links):
        url = link.get('href')
        if url:
            yield url
开发者ID:HerculesCE,项目名称:reddit,代码行数:10,代码来源:utils.py


示例9: safemarkdown

def safemarkdown(text, nofollow=False, wrap=True, **kwargs):
    if not text:
        return None

    target = kwargs.get("target", None)
    text = snudown.markdown(_force_utf8(text), nofollow, target)

    if wrap:
        return SC_OFF + MD_START + text + MD_END + SC_ON
    else:
        return SC_OFF + text + SC_ON
开发者ID:pra85,项目名称:reddit,代码行数:11,代码来源:filters.py


示例10: runTest

    def runTest(self):
        output = snudown.markdown(self.input)

        for i, (a, b) in enumerate(zip(repr(self.expected_output), repr(output))):
            if a != b:
                io = StringIO.StringIO()
                print >> io, "TEST FAILED:"
                print >> io, "       input: %s" % repr(self.input)
                print >> io, "    expected: %s" % repr(self.expected_output)
                print >> io, "      actual: %s" % repr(output)
                print >> io, "              %s" % (" " * i + "^")
                self.fail(io.getvalue())
开发者ID:new-day-international,项目名称:snudown,代码行数:12,代码来源:test_snudown.py


示例11: safemarkdown

def safemarkdown(text, nofollow=False, target=None, wrap=True):
    if not text:
        return None

    if c.cname and not target:
        target = "_top"

    text = snudown.markdown(_force_utf8(text), nofollow, target)

    if wrap:
        return SC_OFF + MD_START + text + MD_END + SC_ON
    else:
        return text
开发者ID:kang3700,项目名称:donelist,代码行数:13,代码来源:filters.py


示例12: wikimarkdown

def wikimarkdown(text, include_toc=True, target=None):
    from r2.lib.template_helpers import make_url_protocol_relative

    # this hard codes the stylesheet page for now, but should be parameterized
    # in the future to allow per-page images.
    from r2.models.wiki import ImagesByWikiPage
    from r2.lib.utils import UrlParser
    from r2.lib.template_helpers import add_sr
    page_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
    
    def img_swap(tag):
        name = tag.get('src')
        name = custom_img_url.search(name)
        name = name and name.group(1)
        if name and name in page_images:
            url = page_images[name]
            url = make_url_protocol_relative(url)
            tag['src'] = url
        else:
            tag.extract()
    
    nofollow = True
    
    text = snudown.markdown(_force_utf8(text), nofollow, target,
                            renderer=snudown.RENDERER_WIKI)
    
    # TODO: We should test how much of a load this adds to the app
    soup = BeautifulSoup(text.decode('utf-8'))
    images = soup.findAll('img')
    
    if images:
        [img_swap(image) for image in images]

    def add_ext_to_link(link):
        url = UrlParser(link.get('href'))
        if url.is_reddit_url():
            link['href'] = add_sr(link.get('href'), sr_path=False)

    if c.render_style == 'compact':
        links = soup.findAll('a')
        [add_ext_to_link(a) for a in links]

    if include_toc:
        tocdiv = generate_table_of_contents(soup, prefix="wiki")
        if tocdiv:
            soup.insert(0, tocdiv)
    
    text = str(soup)
    
    return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
开发者ID:pra85,项目名称:reddit,代码行数:50,代码来源:filters.py


示例13: safemarkdown

def safemarkdown(text, nofollow=False, wrap=True, **kwargs):
    if not text:
        return None

    # this lets us skip the c.cname lookup (which is apparently quite
    # slow) if target was explicitly passed to this function.
    target = kwargs.get("target", None)
    if "target" not in kwargs and c.cname:
        target = "_top"

    text = snudown.markdown(_force_utf8(text), nofollow, target)

    if wrap:
        return SC_OFF + MD_START + text + MD_END + SC_ON
    else:
        return SC_OFF + text + SC_ON
开发者ID:ArslanRafique,项目名称:reddit,代码行数:16,代码来源:filters.py


示例14: runTest

    def runTest(self):
        output = snudown.markdown(self.input)

        for i, (a, b) in enumerate(zip(repr(self.expected_output),
                                       repr(output))):
            if a != b:
                try:
                    io = StringIO.StringIO()
                except:
                    io = StringIO()
                print("TEST FAILED:", file=io)
                print("       input: %s" % repr(self.input), file=io)
                print("    expected: %s" % repr(self.expected_output), file=io)
                print("      actual: %s" % repr(output), file=io)
                print("              %s" % (' ' * i + '^'), file=io)
                self.fail(io.getvalue())
开发者ID:chid,项目名称:snudown,代码行数:16,代码来源:test_snudown.py


示例15: wikimarkdown

def wikimarkdown(text, include_toc=True, target=None):
    from r2.lib.cssfilter import legacy_s3_url
    
    nofollow = True
    
    text = snudown.markdown(_force_utf8(text), nofollow, target, g.domain )
    
    # TODO: We should test how much of a load this adds to the app
    soup = BeautifulSoup(text.decode('utf-8'))

    if include_toc:
        tocdiv = generate_table_of_contents(soup, prefix="wiki")
        if tocdiv:
            soup.insert(0, tocdiv)
    
    text = str(soup)
    
    return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
开发者ID:new-day-international,项目名称:reddit,代码行数:18,代码来源:filters.py


示例16: process_self

    def process_self(self, submission):
        html = snudown.markdown(submission.selftext.encode('UTF-8'))
        soup = BeautifulSoup(html)
        refs = {}

        # Iterate through all links, get xkcd json
        for link in soup.find_all('a'):
            href = link.get('href')
            if not href:
                continue
            j = self.xkcd_fetcher.get_json(href)
            if not j:
                logger.warn('Data could not be fetched for {url}'.format(url=href))
                continue
            refs[int(j.get('num', -1))] = {
                'data': j,
                'href': href
            }

        return self.process_references(submission, refs)
开发者ID:elisspace,项目名称:redditbot-1,代码行数:20,代码来源:bot.py


示例17: wikimarkdown

def wikimarkdown(text, include_toc=True, target=None):
    from r2.lib.template_helpers import media_https_if_secure

    # this hard codes the stylesheet page for now, but should be parameterized
    # in the future to allow per-page images.
    from r2.models.wiki import ImagesByWikiPage
    page_images = ImagesByWikiPage.get_images(c.site, "config/stylesheet")
    
    def img_swap(tag):
        name = tag.get('src')
        name = custom_img_url.search(name)
        name = name and name.group(1)
        if name and name in page_images:
            url = page_images[name]
            url = media_https_if_secure(url)
            tag['src'] = url
        else:
            tag.extract()
    
    nofollow = True
    
    text = snudown.markdown(_force_utf8(text), nofollow, target,
                            renderer=snudown.RENDERER_WIKI)
    
    # TODO: We should test how much of a load this adds to the app
    soup = BeautifulSoup(text.decode('utf-8'))
    images = soup.findAll('img')
    
    if images:
        [img_swap(image) for image in images]
    
    if include_toc:
        tocdiv = generate_table_of_contents(soup, prefix="wiki")
        if tocdiv:
            soup.insert(0, tocdiv)
    
    text = str(soup)
    
    return SC_OFF + WIKI_MD_START + text + WIKI_MD_END + SC_ON
开发者ID:Acceto,项目名称:reddit,代码行数:39,代码来源:filters.py


示例18: parseComment

def parseComment(redditComment, postAuthorName, postAuthorExists, isRoot=True):
    commentAuthorName = ''
    commentAuthorExists = 0
    try:
        commentAuthorName = fixUnicode(redditComment.author.name)
        commentAuthorExists = 1
    except AttributeError:
        commentAuthorExists = 0
    if isRoot:
        htmlFile.write('<div id="' + str(redditComment.id))
        htmlFile.write('" class="comment">\n')
    else:
        htmlFile.write('<div id="' + str(redditComment.id)) 
        htmlFile.write('" class="comment" style="margin-bottom:10px;margin-left:0px;">\n')
    htmlFile.write('<div class="commentinfo">\n')
    if commentAuthorExists:
        if postAuthorExists and postAuthorName == commentAuthorName:
            htmlFile.write('<a href="' + redditComment.author._url)
            htmlFile.write('" class="postOP-comment">' + commentAuthorName + '</a> <em>')
        else:
            htmlFile.write('<a href="' + redditComment.author._url)
            htmlFile.write('">' + commentAuthorName + '</a> <em>')
    else:
        htmlFile.write('<strong>[Deleted]</strong> <em>')
    htmlFile.write(str(redditComment.ups - redditComment.downs))
    htmlFile.write(' Points </em><em>')
    htmlFile.write('Posted at ')
    postDate = time.gmtime(redditComment.created_utc)
    htmlFile.write(str(postDate.tm_hour) + ':')
    htmlFile.write(str(postDate.tm_min) + ' UTC on ')
    htmlFile.write(monthsList[postDate.tm_mon-1] + ' ')
    htmlFile.write(str(postDate.tm_mday) + ', ' + str(postDate.tm_year))
    htmlFile.write('</em></div>\n')
    htmlFile.write(snudown.markdown(fixMarkdown(redditComment.body)))
    for reply in redditComment._replies:
        parseComment(reply, postAuthorName, postAuthorExists, False)
    htmlFile.write('</div>\n')
开发者ID:nilseckert,项目名称:website-mirrors,代码行数:37,代码来源:archiver.py


示例19: emailmarkdown

def emailmarkdown(text, wrap=True):
    if not text:
        return None

    text = snudown.markdown(_force_utf8(text))

    soup = BeautifulSoup(text.decode('utf-8'))
    links = soup.findAll('a')
    update_text = False
    base = g.https_endpoint or g.origin

    for link in links:
        # if link is relative
        if link['href'].startswith('/'):
            update_text = True
            link['href'] = urljoin(base, link['href'])

    if update_text:
        text = str(soup)

    if wrap:
        return SC_OFF + MD_START + text + MD_END + SC_ON
    else:
        return SC_OFF + text + SC_ON
开发者ID:zeantsoi,项目名称:reddit,代码行数:24,代码来源:filters.py


示例20: markdown

def markdown(value):
    return snudown.markdown(value)
开发者ID:durka,项目名称:balningau,代码行数:2,代码来源:markdown.py



注:本文中的snudown.markdown函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python models.Message类代码示例发布时间:2022-05-27
下一篇:
Python snslog.SNSLog类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap