• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python utils.domain函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中r2.lib.utils.domain函数的典型用法代码示例。如果您正苦于以下问题:Python domain函数的具体用法?Python domain怎么用?Python domain使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了domain函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_context_data

    def get_context_data(self, request, context):
        """Extract common data from the current request and context

        This is generally done explicitly in `__init__`, but is done by hand for
        votes before the request context is lost by the queuing.

        request, context: Should be pylons.request & pylons.c respectively
        """
        data = {}

        if context.user_is_loggedin:
            data["user_id"] = context.user._id
            data["user_name"] = context.user.name
        else:
            loid = request.cookies.get("loid", None)
            if loid:
                data["loid"] = loid

        oauth2_client = getattr(context, "oauth2_client", None)
        if oauth2_client:
            data["oauth2_client_id"] = oauth2_client._id

        data["geoip_country"] = get_request_location(request, context)
        data["domain"] = request.host
        data["user_agent"] = request.user_agent

        http_referrer = request.headers.get("Referer", None)
        if http_referrer:
            data["referrer_url"] = http_referrer
            data["referrer_domain"] = domain(http_referrer)

        return data
开发者ID:RobRoseKnows,项目名称:reddit,代码行数:32,代码来源:eventcollector.py


示例2: for_url

    def for_url(cls, embedly_services, url):
        url_domain = domain(url)
        domain_embedly_regex = embedly_services.get(url_domain, None)

        if domain_embedly_regex and re.match(domain_embedly_regex, url):
            return _EmbedlyScraper(url)
        return _ThumbnailOnlyScraper(url)
开发者ID:AlbertoPeon,项目名称:reddit,代码行数:7,代码来源:media.py


示例3: valid_url

def valid_url(prop,value,report):
    """
    checks url(...) arguments in CSS, ensuring that the contents are
    officially sanctioned.  Sanctioned urls include:
     * anything in /static/
     * image labels %%..%% for images uploaded on /about/stylesheet
     * urls with domains in g.allowed_css_linked_domains
    """
    url = value.getStringValue()
    # local urls are allowed
    if local_urls.match(url):
        pass
    # custom urls are allowed, but need to be transformed into a real path
    elif custom_img_urls.match(url):
        name = custom_img_urls.match(url).group(1)
        # the label -> image number lookup is stored on the subreddit
        if c.site.images.has_key(name):
            num = c.site.images[name]
            value._setCssText("url(http:/%s%s_%d.png?v=%s)"
                              % (g.s3_thumb_bucket, c.site._fullname, num,
                                 randstr(36)))
        else:
            # unknown image label -> error
            report.append(ValidationError(msgs['broken_url']
                                          % dict(brokenurl = value.cssText),
                                          value))
    # allowed domains are ok
    elif domain(url) in g.allowed_css_linked_domains:
        pass
    else:
        report.append(ValidationError(msgs['broken_url']
                                      % dict(brokenurl = value.cssText),
                                      value))
开发者ID:vin,项目名称:reddit,代码行数:33,代码来源:cssfilter.py


示例4: submit_rss_links

def submit_rss_links(srname,rss,user,titlefield='title',linkfield='link'):
    #Fuck the API, let's just do it the way we would if we were really doing it.  This avoids screwing around with cookies and so forth...
    feed=fetch_feed(rss)
    if feed is None:
        return
    ac=Account._byID(user)
    sr=Subsciteit._by_name(srname)
    ip='0.0.0.0'
    niceify=False
    if domain(rss)=="arxiv.org":
        niceify=dict(find="\(arXiv:.*?\)",replace="")
    #Let's randomize why not...
    random.shuffle(feed.entries)
    for article in feed.entries:
	#This can take all night if it has to, we don't want to hammer the server into oblivios
	sleep(1)
        kw = fetch_article(article,titlefield=titlefield,linkfield=linkfield,niceify=niceify)
        if kw is None:
	    continue
	l = Link._submit(kw['title'],kw['link'],ac,sr,ip,spam=False)
	l._commit()
	l.set_url_cache()
	#We don't really need auto-submitted links to be vote on...
	queries.queue_vote(ac,l,True,ip,cheater=False)
	queries.new_link(l)
	changed(l)
	print "Submitted %s" % article[titlefield]
	sleep(.1)
    return
开发者ID:constantAmateur,项目名称:sciteit,代码行数:29,代码来源:sr_rss.py


示例5: _make_media_object

 def _make_media_object(self, oembed):
     if oembed.get("type") in ("video", "rich"):
         return {
             "type": domain(self.url),
             "oembed": oembed,
         }
     return None
开发者ID:ArtBears,项目名称:reddit,代码行数:7,代码来源:media.py


示例6: scrape

    def scrape(self):
        params = urllib.urlencode({
            "url": self.url,
            "format": "json",
            "maxwidth": 600,
            "key": g.embedly_api_key,
        })
        content = requests.get(self.EMBEDLY_API_URL + "?" + params).content
        oembed = json.loads(content, object_hook=self._utf8_encode)

        if not oembed:
            return None, None

        if oembed.get("type") == "photo":
            thumbnail_url = oembed.get("url")
        else:
            thumbnail_url = oembed.get("thumbnail_url")
        thumbnail = _make_thumbnail_from_url(thumbnail_url, referer=self.url)

        embed = {}
        if oembed.get("type") in ("video", "rich"):
            embed = {
                "type": domain(self.url),
                "oembed": oembed,
            }

        return thumbnail, embed
开发者ID:AlbertoPeon,项目名称:reddit,代码行数:27,代码来源:media.py


示例7: add_props

    def add_props(cls, user, wrapped):
        from r2.lib.count import incr_counts
        from r2.lib.media import thumbnail_url
        from r2.lib.utils import timeago

        saved = Link._saved(user, wrapped) if user else {}
        hidden = Link._hidden(user, wrapped) if user else {}
        #clicked = Link._clicked(user, wrapped) if user else {}
        clicked = {}

        for item in wrapped:
            show_media = (c.user.pref_media == 'on' or
                          (item.promoted and item.has_thumbnail
                           and c.user.pref_media != 'off') or
                          (c.user.pref_media == 'subreddit' and
                           item.subreddit.show_media))

            if not show_media:
                item.thumbnail = ""
            elif item.has_thumbnail:
                item.thumbnail = thumbnail_url(item)
            else:
                item.thumbnail = g.default_thumb
            
            item.score = max(0, item.score)

            item.domain = (domain(item.url) if not item.is_self
                          else 'self.' + item.subreddit.name)
            if not hasattr(item,'top_link'):
                item.top_link = False
            item.urlprefix = ''
            item.saved = bool(saved.get((user, item, 'save')))
            item.hidden = bool(hidden.get((user, item, 'hide')))
            item.clicked = bool(clicked.get((user, item, 'click')))
            item.num = None
            item.score_fmt = Score.number_only
            item.permalink = item.make_permalink(item.subreddit)
            if item.is_self:
                item.url = item.make_permalink(item.subreddit, force_domain = True)

            if c.user_is_admin:
                item.hide_score = False
            elif item.promoted:
                item.hide_score = True
            elif c.user == item.author:
                item.hide_score = False
            elif item._date > timeago("2 hours"):
                item.hide_score = True
            else:
                item.hide_score = False

            if c.user_is_loggedin and item.author._id == c.user._id:
                item.nofollow = False
            elif item.score <= 1 or item._spam or item.author._spam:
                item.nofollow = True
            else:
                item.nofollow = False
        if c.user_is_loggedin:
            incr_counts(wrapped)
开发者ID:vin,项目名称:reddit,代码行数:59,代码来源:link.py


示例8: _key_from_url

 def _key_from_url(cls, url):
     if not utils.domain(url) in g.case_sensitive_domains:
         keyurl = _force_utf8(UrlParser.base_url(url.lower()))
     else:
         # Convert only hostname to lowercase
         up = UrlParser(url)
         up.hostname = up.hostname.lower()
         keyurl = _force_utf8(UrlParser.base_url(up.unparse()))
     return keyurl
开发者ID:phektus,项目名称:dabuzz,代码行数:9,代码来源:link.py


示例9: validate_link

def validate_link(url,whitelist=False):
    if url:
        url=sanitize_url(url)
        if url:
	    if whitelist and domain(url) not in DOMAIN_WHITELIST:
	        print "Domain %s not in whitelist." % domain(url)
		return False
            try:
                lbu = LinksByUrl._byID(LinksByUrl._key_from_url(url))
            except tdb_cassandra.NotFound:
                return url
            link_id36s = lbu._values()
	    links = Link._byID36(link_id36s, data=True, return_dict=False)
	    links = [l for l in links if not l._deleted]
	    if len(links)==0:
	        return url
	    print "Link %s exists..." % url
    return False 
开发者ID:constantAmateur,项目名称:sciteit,代码行数:18,代码来源:sr_rss.py


示例10: GET_s

    def GET_s(self, rest):
        """/s/http://..., show a given URL with the toolbar. if it's
           submitted, redirect to /tb/$id36"""
        force_html()
        path = demangle_url(request.fullpath)

        if not path:
            # it was malformed
            self.abort404()

        # if the domain is shame-banned, bail out.
        if is_shamed_domain(path)[0]:
            self.abort404()

        listing = hot_links_by_url_listing(path, sr=c.site, num=1)
        link = listing.things[0] if listing.things else None

        if c.cname and not c.authorized_cname:
            # In this case, we make some bad guesses caused by the
            # cname frame on unauthorised cnames. 
            # 1. User types http://foo.com/http://myurl?cheese=brie
            #    (where foo.com is an unauthorised cname)
            # 2. We generate a frame that points to
            #    http://www.reddit.com/r/foo/http://myurl?cnameframe=0.12345&cheese=brie
            # 3. Because we accept everything after the /r/foo/, and
            #    we've now parsed, modified, and reconstituted that
            #    URL to add cnameframe, we really can't make any good
            #    assumptions about what we've done to a potentially
            #    already broken URL, and we can't assume that we've
            #    rebuilt it in the way that it was originally
            #    submitted (if it was)
            # We could try to work around this with more guesses (by
            # having demangle_url try to remove that param, hoping
            # that it's not already a malformed URL, and that we
            # haven't re-ordered the GET params, removed
            # double-slashes, etc), but for now, we'll just refuse to
            # do this operation
            return self.abort404()

        if link:
            # we were able to find it, let's send them to the
            # link-id-based URL so that their URL is reusable
            return self.redirect(add_sr("/tb/" + link._id36))

        title = utils.domain(path)
        res = Frame(
            title=title,
            url=match_current_reddit_subdomain(path),
        )

        # we don't want clients to think that this URL is actually a
        # valid URL for search-indexing or the like
        request.environ['usable_error_content'] = spaceCompress(res.render())
        abort(404)
开发者ID:ChooseGoose,项目名称:reddit,代码行数:54,代码来源:toolbar.py


示例11: quarantine_event

    def quarantine_event(self, event_type, subreddit,
            request=None, context=None):
        """Create a 'quarantine' event for event-collector.

        event_type: quarantine_interstitial_view, quarantine_opt_in,
            quarantine_opt_out, quarantine_interstitial_dismiss
        subreddit: The quarantined subreddit
        request, context: Should be pylons.request & pylons.c respectively;
            used to build the base Event

        """
        event = EventV2(
            topic="quarantine",
            event_type=event_type,
            request=request,
            context=context,
        )

        if context:
            if context.user_is_loggedin:
                event.add("verified_email", context.user.email_verified)
            else:
                event.add("verified_email", False)

        event.add("sr_id", subreddit._id)
        event.add("sr_name", subreddit.name)

        # Due to the redirect, the request object being sent isn't the 
        # original, so referrer and action data is missing for certain events
        if request and (event_type == "quarantine_interstitial_view" or
                 event_type == "quarantine_opt_out"):
            request_vars = request.environ["pylons.routes_dict"]
            event.add("sr_action", request_vars.get("action", None))

            # The thing_id the user is trying to view is a comment
            if request.environ["pylons.routes_dict"].get("comment", None):
                thing_id36 = request_vars.get("comment", None)
            # The thing_id is a link
            else:
                thing_id36 = request_vars.get("article", None)

            if thing_id36:
                event.add("thing_id", int(thing_id36, 36))

            referrer_url = request.headers.get('Referer', None)
            if referrer_url:
                event.add("referrer_url", referrer_url)
                event.add("referrer_domain", domain(referrer_url))

        self.save_event(event)
开发者ID:joannekoong,项目名称:reddit,代码行数:50,代码来源:eventcollector.py


示例12: make_scraper

def make_scraper(url):
    domain = utils.domain(url)
    scraper = Scraper
    for suffix, cls in scrapers.iteritems():
        if domain.endswith(suffix):
            scraper = cls
            break
    
    #sometimes youtube scrapers masquerade as google scrapers
    if scraper == GootubeScraper:
        youtube_url = youtube_in_google(url)
        if youtube_url:
            return make_scraper(youtube_url)
    return scraper(url)
开发者ID:kevinrose,项目名称:diggit,代码行数:14,代码来源:scraper.py


示例13: on_crawlable_domain

    def on_crawlable_domain(self):
        # This ensures we don't have the port included.
        requested_domain = utils.domain(request.host)

        # If someone CNAMEs myspammysite.com to reddit.com or something, we
        # don't want search engines to index that.
        if not utils.is_subdomain(requested_domain, g.domain):
            return False

        # Only allow the canonical desktop site and mobile subdomains, since
        # we have canonicalization set up appropriately for them.
        # Note: in development, DomainMiddleware needs to be temporarily
        # modified to not skip assignment of reddit-domain-extension on
        # localhost for this to work properly.
        return (requested_domain == g.domain or
                request.environ.get('reddit-domain-extension') in
                    ('mobile', 'compact'))
开发者ID:APerson241,项目名称:reddit,代码行数:17,代码来源:robots.py


示例14: get_context_data

    def get_context_data(self, request, context):
        """Extract common data from the current request and context

        This is generally done explicitly in `__init__`, but is done by hand for
        votes before the request context is lost by the queuing.

        request, context: Should be pylons.request & pylons.c respectively
        """
        data = {}

        if context.user_is_loggedin:
            data["user_id"] = context.user._id
            data["user_name"] = context.user.name
        else:
            if context.loid:
                data.update(context.loid.to_dict())

        oauth2_client = getattr(context, "oauth2_client", None)
        if oauth2_client:
            data["oauth2_client_id"] = oauth2_client._id
            data["oauth2_client_name"] = oauth2_client.name
            data["oauth2_client_app_type"] = oauth2_client.app_type

        data["geoip_country"] = get_request_location(request, context)
        data["domain"] = request.host
        data["user_agent"] = request.user_agent
        data["user_agent_parsed"] = parse_agent(request.user_agent)

        http_referrer = request.headers.get("Referer", None)
        if http_referrer:
            data["referrer_url"] = http_referrer
            data["referrer_domain"] = domain(http_referrer)

        hooks.get_hook("eventcollector.context_data").call(
            data=data,
            user=context.user,
            request=request,
            context=context,
        )

        return data
开发者ID:Frankie-666,项目名称:reddit,代码行数:41,代码来源:eventcollector.py


示例15: add_props

    def add_props(cls, user, wrapped):
        from r2.lib.count import incr_counts
        saved = Link._saved(user, wrapped) if user else {}
        hidden = Link._hidden(user, wrapped) if user else {}
        #clicked = Link._clicked(user, wrapped) if user else {}
        clicked = {}

        for item in wrapped:

            item.score = max(0, item.score)

            item.domain = (domain(item.url) if not item.is_self
                          else 'self.' + item.subreddit.name)
            item.top_link = False
            item.urlprefix = ''
            item.saved = bool(saved.get((user, item, 'save')))
            item.hidden = bool(hidden.get((user, item, 'hide')))
            item.clicked = bool(clicked.get((user, item, 'click')))
            item.num = None
            item.score_fmt = Score.number_only
                
        if c.user_is_loggedin:
            incr_counts(wrapped)
开发者ID:cmak,项目名称:reddit,代码行数:23,代码来源:link.py


示例16: get_context_data

    def get_context_data(self, request, context):
        data = {}

        if context.user_is_loggedin:
            data["user_id"] = context.user._id
            data["user_name"] = context.user.name
        else:
            loid = request.cookies.get("loid", None)
            if loid:
                data["loid"] = loid

        oauth2_client = getattr(context, "oauth2_client", None)
        if oauth2_client:
            data["oauth2_client_id"] = oauth2_client._id

        data["domain"] = request.host
        data["user_agent"] = request.user_agent

        http_referrer = request.headers.get("Referer", None)
        if http_referrer:
            data["referrer_url"] = http_referrer
            data["referrer_domain"] = domain(http_referrer)

        return data
开发者ID:scott71,项目名称:reddit,代码行数:24,代码来源:eventcollector.py


示例17: add_props

    def add_props(cls, user, wrapped):
        from r2.lib.pages import make_link_child
        from r2.lib.count import incr_counts
        from r2.lib.media import thumbnail_url
        from r2.lib.utils import timeago
        from r2.lib.template_helpers import get_domain
        from r2.models.subreddit import FakeSubreddit
        from r2.lib.wrapped import CachedVariable

        # referencing c's getattr is cheap, but not as cheap when it
        # is in a loop that calls it 30 times on 25-200 things.
        user_is_admin = c.user_is_admin
        user_is_loggedin = c.user_is_loggedin
        pref_media = user.pref_media
        pref_frame = user.pref_frame
        pref_newwindow = user.pref_newwindow
        cname = c.cname
        site = c.site

        if user_is_loggedin:
            saved_lu = []
            for item in wrapped:
                if not SaveHide._can_skip_lookup(user, item):
                    saved_lu.append(item._id36)

            saved = CassandraSave._fast_query(user._id36, saved_lu)
            hidden = CassandraHide._fast_query(user._id36, saved_lu)

            clicked = {}
        else:
            saved = hidden = clicked = {}

        trials = trial_info(wrapped)

        for item in wrapped:
            show_media = False
            if not hasattr(item, "score_fmt"):
                item.score_fmt = Score.number_only
            if c.render_style == 'compact':
                item.score_fmt = Score.points
            item.pref_compress = user.pref_compress
            if user.pref_compress and item.promoted is None:
                item.render_css_class = "compressed link"
                item.score_fmt = Score.points
            elif pref_media == 'on' and not user.pref_compress:
                show_media = True
            elif pref_media == 'subreddit' and item.subreddit.show_media:
                show_media = True
            elif item.promoted and item.has_thumbnail:
                if user_is_loggedin and item.author_id == user._id:
                    show_media = True
                elif pref_media != 'off' and not user.pref_compress:
                    show_media = True

            item.over_18 = bool(
                item.over_18 or item.subreddit.over_18
                or item._nsfw.findall(item.title))
            item.nsfw = item.over_18 and user.pref_label_nsfw

            item.is_author = (user == item.author)

            # always show a promo author their own thumbnail
            if item.promoted and (user_is_admin
                                  or item.is_author) and item.has_thumbnail:
                item.thumbnail = thumbnail_url(item)
            elif user.pref_no_profanity and item.over_18 and not c.site.over_18:
                if show_media:
                    item.thumbnail = "/static/nsfw2.png"
                else:
                    item.thumbnail = ""
            elif not show_media:
                item.thumbnail = ""
            elif item.has_thumbnail:
                item.thumbnail = thumbnail_url(item)
            elif item.is_self:
                item.thumbnail = g.self_thumb
            else:
                item.thumbnail = g.default_thumb

            item.score = max(0, item.score)

            if getattr(item, "domain_override", None):
                item.domain = item.domain_override
            else:
                item.domain = (domain(item.url) if not item.is_self else
                               'self.' + item.subreddit.name)
            item.urlprefix = ''

            if user_is_loggedin:
                item.saved = (user._id36, item._id36) in saved
                item.hidden = (user._id36, item._id36) in hidden

                item.clicked = bool(clicked.get((user, item, 'click')))
            else:
                item.saved = item.hidden = item.clicked = False

            item.num = None
            item.permalink = item.make_permalink(item.subreddit)
            if item.is_self:
                item.url = item.make_permalink(
#.........这里部分代码省略.........
开发者ID:ketralnis,项目名称:reddit,代码行数:101,代码来源:link.py


示例18: link_domain

 def link_domain(self):
     if self.is_self:
         return 'self'
     else:
         return domain(self.url)
开发者ID:ketralnis,项目名称:reddit,代码行数:5,代码来源:link.py


示例19: on_crawlable_domain

 def on_crawlable_domain(self):
     return utils.domain(request.host) == g.domain
开发者ID:0xcd03,项目名称:reddit,代码行数:2,代码来源:robots.py


示例20: add_props

    def add_props(cls, user, wrapped):
        from r2.lib.pages import make_link_child
        from r2.lib.count import incr_counts
        from r2.lib import media
        from r2.lib.utils import timeago
        from r2.lib.template_helpers import get_domain
        from r2.models.subreddit import FakeSubreddit
        from r2.lib.wrapped import CachedVariable

        # referencing c's getattr is cheap, but not as cheap when it
        # is in a loop that calls it 30 times on 25-200 things.
        user_is_admin = c.user_is_admin
        user_is_loggedin = c.user_is_loggedin
        pref_media = user.pref_media
        pref_frame = user.pref_frame
        pref_newwindow = user.pref_newwindow
        cname = c.cname
        site = c.site

        if user_is_loggedin:
            try:
                saved = CassandraSave._fast_query(user, wrapped)
                hidden = CassandraHide._fast_query(user, wrapped)
            except tdb_cassandra.TRANSIENT_EXCEPTIONS as e:
                g.log.warning("Cassandra save/hide lookup failed: %r", e)
                saved = hidden = {}

            clicked = {}
        else:
            saved = hidden = clicked = {}

        for item in wrapped:
            show_media = False
            if not hasattr(item, "score_fmt"):
                item.score_fmt = Score.number_only
            if c.render_style == 'compact':
                item.score_fmt = Score.points
            item.pref_compress = user.pref_compress
            if user.pref_compress and item.promoted is None:
                item.render_css_class = "compressed link"
                item.score_fmt = Score.points
            elif pref_media == 'on' and not user.pref_compress:
                show_media = True
            elif pref_media == 'subreddit' and item.subreddit.show_media:
                show_media = True
            elif item.promoted and item.has_thumbnail:
                if user_is_loggedin and item.author_id == user._id:
                    show_media = True
                elif pref_media != 'off' and not user.pref_compress:
                    show_media = True

            item.nsfw_str = item._nsfw.findall(item.title)
            item.over_18 = bool(item.over_18 or item.subreddit.over_18 or
                                item.nsfw_str)
            item.nsfw = item.over_18 and user.pref_label_nsfw

            item.is_author = (user == item.author)

            item.thumbnail_sprited = False
            # always show a promo author their own thumbnail
            if item.promoted and (user_is_admin or item.is_author) and item.has_thumbnail:
                item.thumbnail = media.thumbnail_url(item)
            elif user.pref_no_profanity and item.over_18 and not c.site.over_18:
                if show_media:
                    item.thumbnail = "nsfw"
                    item.thumbnail_sprited = True
                else:
                    item.thumbnail = ""
            elif not show_media:
                item.thumbnail = ""
            elif item.has_thumbnail:
                item.thumbnail = media.thumbnail_url(item)
            elif item.is_self:
                item.thumbnail = "self"
                item.thumbnail_sprited = True
            else:
                item.thumbnail = "default"
                item.thumbnail_sprited = True

            item.score = max(0, item.score)

            if getattr(item, "domain_override", None):
                item.domain = item.domain_override
            else:
                item.domain = (domain(item.url) if not item.is_self
                               else 'self.' + item.subreddit.name)
            item.urlprefix = ''

            if user_is_loggedin:
                item.saved = (user, item) in saved
                item.hidden = (user, item) in hidden

                item.clicked = bool(clicked.get((user, item, 'click')))
            else:
                item.saved = item.hidden = item.clicked = False

            item.num = None
            item.permalink = item.make_permalink(item.subreddit)
            if item.is_self:
                item.url = item.make_permalink(item.subreddit,
#.........这里部分代码省略.........
开发者ID:jcald,项目名称:reddit,代码行数:101,代码来源:link.py



注:本文中的r2.lib.utils.domain函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python utils.epoch_timestamp函数代码示例发布时间:2022-05-26
下一篇:
Python utils.constant_time_compare函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap