• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python feed.LazyBuilder类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pyopenmensa.feed.LazyBuilder的典型用法代码示例。如果您正苦于以下问题:Python LazyBuilder类的具体用法?Python LazyBuilder怎么用?Python LazyBuilder使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了LazyBuilder类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: metadata

    def metadata(self, request):
        meta = LazyBuilder(version=self.parser.version)

        meta.feeds.append(Feed(
            name='today',
            hour='8-14',
            url='/'.join([request.host, self.parser.name, self.name, 'today.xml']),
            priority=0,
            source=None,
            dayOfMonth='*',
            dayOfWeek='*',
            minute='0',
            retry=None
        ))

        meta.feeds.append(Feed(
            name='full',
            hour='8',
            url='/'.join([request.host, self.parser.name, self.name, 'full.xml']),
            priority=0,
            source=None,
            dayOfMonth='*',
            dayOfWeek='*',
            minute='0',
            retry=None
        ))

        return meta.toXMLFeed()
开发者ID:mswart,项目名称:openmensa-parsers,代码行数:28,代码来源:utils.py


示例2: parse_url

def parse_url(url, today=False, canteentype="Mittagsmensa", this_week="", next_week=True, legend_url=None):
    canteen = LazyBuilder()
    canteen.legendKeyFunc = lambda v: v.lower()
    if not legend_url:
        legend_url = url[: url.find("essen/") + 6] + "wissenswertes/lebensmittelkennzeichnung"
    legend_doc = parse(urlopen(legend_url)).find(id="artikel")
    allergene = buildLegend(
        text=legend_doc.text.replace("\xa0", " "), regex=r"(?P<name>[A-Z]+) {3,}enthält (?P<value>\w+( |\t|\w)*)"
    )
    allergene["EI"] = "Ei"
    zusatzstoffe = buildLegend(
        text=legend_doc.text.replace("\xa0", " "), regex=r"(?P<name>\d+) {3,} (enthält )?(?P<value>\w+( |\t|\w)*)"
    )
    for tr in legend_doc.find_all("tr"):
        tds = tr.find_all("td")
        if len(tds) != 2:
            continue
        title = tds[0].find("strong")
        if title is None:
            continue
        else:
            title = title.text
        text = tds[1].text.replace("enthält", "").strip()
        if title.isdigit():
            zusatzstoffe[title] = text
        else:
            allergene[title] = text
    parse_week(url + this_week, canteen, canteentype, allergene=allergene, zusatzstoffe=zusatzstoffe)
    if not today and next_week is True:
        parse_week(url + "-kommende-woche", canteen, canteentype, allergene=allergene, zusatzstoffe=zusatzstoffe)
    if not today and type(next_week) is str:
        parse_week(url + next_week, canteen, canteentype, allergene=allergene, zusatzstoffe=zusatzstoffe)
    print(canteen.toXMLFeed())
    return canteen.toXMLFeed()
开发者ID:mswart,项目名称:openmensa-parsers,代码行数:34,代码来源:ostniedersachsen.py


示例3: feed_all

    def feed_all(self, name):
        canteen = LazyBuilder()

        date = self.__now()

        # Get this week
        lastWeekday = -1
        while self.handler(canteen, self.xml2locId[name], date.date()):
            date += datetime.timedelta(days=1)
            if lastWeekday > date.weekday():
                break
            lastWeekday = date.weekday()

        # Skip over weekend
        if date.weekday() > 4:
            date += datetime.timedelta(days=7-date.weekday())

            # Get next week
            lastWeekday = -1
            while self.handler(canteen, self.xml2locId[name], date.date()):
                date += datetime.timedelta(days=1)
                if lastWeekday > date.weekday():
                    break
                lastWeekday = date.weekday()

        return canteen.toXMLFeed()
开发者ID:cvzi,项目名称:mensahd,代码行数:26,代码来源:__init__.py


示例4: parse_url

def parse_url(url, today=False):
    canteen = LazyBuilder()
    parse_week(url + '.html', canteen)
    if not today:
        parse_week(url + '-w1.html', canteen)
        parse_week(url + '-w2.html', canteen)
    return canteen.toXMLFeed()
开发者ID:mlewe,项目名称:openmensa-parsers,代码行数:7,代码来源:dresden.py


示例5: parse_url

def parse_url(url, mensa, *weeks, today):
    canteen = LazyBuilder()
    for week in weeks:
        parse_week(url + week, canteen, mensa)
        if today:
            break
    return canteen.toXMLFeed()
开发者ID:azrdev,项目名称:openmensa-parsers,代码行数:7,代码来源:marburg.py


示例6: parse_url

def parse_url(url, today=False):
    canteen = LazyBuilder()
    parse_week(url + (datetime.date.today()
               + datetime.date.resolution * 7).strftime('/%Y/%W/'), canteen)
    if not today:
        parse_week(url + (datetime.date.today()
                       + datetime.date.resolution * 14).strftime('/%Y/%W/'), canteen)
    return canteen.toXMLFeed()
开发者ID:mlewe,项目名称:openmensa-parsers,代码行数:8,代码来源:hamburg.py


示例7: feed

    def feed(self, name):
        canteen = LazyBuilder()
        if name in self.xmlnames:
            parse_url(canteen, name) # all categories
        else :
            xmlname_enty = [x for x in self.xmlnames if x[0] == name][0]
            parse_url(canteen, *xmlname_enty) # only certain categories

        return canteen.toXMLFeed()
开发者ID:cvzi,项目名称:mensahd,代码行数:9,代码来源:__init__.py


示例8: parse_url

def parse_url(url, today=False):
    canteen = LazyBuilder()
    day = datetime.date.today()
    for _ in range(21):
        parse_day(canteen, '{}&date={}'.format(url, day.strftime('%Y-%m-%d')))
        if today:
            break
        day += datetime.timedelta(days=1)
    return canteen.toXMLFeed()
开发者ID:azrdev,项目名称:openmensa-parsers,代码行数:9,代码来源:leipzig.py


示例9: parse_url

def parse_url(url, today):
    canteen = LazyBuilder()
    canteen.setAdditionalCharges('student', {})
    if today:
        parse_week(url, canteen)  # base url only contains current day
    else:
        parse_week(url + 'week', canteen)
        parse_week(url + 'nextweek', canteen)

    return canteen.toXMLFeed()
开发者ID:RafiKueng,项目名称:openmensa-parsers,代码行数:10,代码来源:darmstadt.py


示例10: parse_url

def parse_url(url, today=False):
    base_data = load_base_data()

    canteen = LazyBuilder()
    with urlopen(url) as response:
        data = json.loads(response.read().decode())

    for day in data['days']:
        date = datetime.datetime.strptime(day['date'], UTC_DATE_STRING).date()

        if today and (datetime.date.today() != date):
            continue

        for counter in day['counters']:
            counter_name = counter['displayName']
            counter_description = counter['description']
            counter_hours = counter.get('openingHours')

            for meal in counter['meals']:
                if 'knownMealId' in meal:
                    # This is meant to allow recognizing recurring meals,
                    # for features like marking meals as favorites.
                    # Up to now, not really used in the mensaar.de API,
                    # nor functional in this API parser.
                    # The meal will still be recognized as every other meal.
                    print('knownMealId: %s' % meal['knownMealId'], file=sys.stderr)

                meal_name = meal['name']
                if 'category' in meal:
                    meal_name = '%s: %s' % (meal['category'], meal_name)

                meal_notes = (
                    # The description is typically the location
                    # (but not required to be by the API specification).
                    build_location(counter_description) +
                    build_hours(counter_hours) +
                    build_notes(base_data, meal['notices'], meal['components']))

                meal_prices = {}
                if 'prices' in meal:
                    prices = meal['prices']
                    for role in prices:
                        if role in ROLES:
                            meal_prices[base_data['roles'][role]] = prices[role]

                if 'pricingNotice' in meal:
                    meal_notes.append(meal['pricingNotice'])

                canteen.addMeal(date, counter_name,
                                meal_name, meal_notes, meal_prices)

    return canteen.toXMLFeed()
开发者ID:mswart,项目名称:openmensa-parsers,代码行数:52,代码来源:saarland.py


示例11: parse_url

def parse_url(url, today=False):
    canteen = LazyBuilder()
    day = datetime.date.today()
    emptyCount = 0
    while emptyCount < 7:
        if not parse_day(canteen, '{}&day={}&month={}&year={}&limit=25'
                         .format(url, day.day, day.month, day.year),
                         day.strftime('%Y-%m-%d')):
            emptyCount += 1
        else:
            emptyCount = 0
        if today:
            break
        day += datetime.date.resolution
    return canteen.toXMLFeed()
开发者ID:mlewe,项目名称:openmensa-parsers,代码行数:15,代码来源:leipzig.py


示例12: parse_url

def parse_url(url, today=False, canteentype='Mittagsmensa', this_week='', next_week=True, legend_url=None):
    canteen = LazyBuilder()
    canteen.legendKeyFunc = lambda v: v.lower()
    if not legend_url:
        legend_url = url[:url.find('essen/') + 6] + 'lebensmittelkennzeichnung'
    legend_doc = parse(urlopen(legend_url))
    canteen.setLegendData(
        text=legend_doc.find(id='artikel').text,
        regex=r'(?P<name>(\d+|[A-Z]+))\s+=\s+(?P<value>\w+( |\t|\w)*)'
    )
    parse_week(url + this_week, canteen, canteentype)
    if not today and next_week is True:
        parse_week(url + '-kommende-woche', canteen, canteentype)
    if not today and type(next_week) is str:
        parse_week(url + next_week, canteen, canteentype)
    return canteen.toXMLFeed()
开发者ID:steeb,项目名称:openmensa-parsers,代码行数:16,代码来源:ostniedersachsen.py


示例13: parse_url

def parse_url(url, today=False, canteentype='Mittagsmensa', this_week='', next_week=True, legend_url=None):
    canteen = LazyBuilder()
    canteen.legendKeyFunc = lambda v: v.lower()
    if not legend_url:
        legend_url = url[:url.find('essen/') + 6] + 'wissenswertes/lebensmittelkennzeichnung'
    legend_doc = parse(urlopen(legend_url), 'lxml').find(id='artikel')
    allergene = buildLegend(
        text=legend_doc.text.replace('\xa0', ' '),
        regex=r'(?P<name>[A-Z]+) {3,}enthält (?P<value>\w+( |\t|\w)*)'
    )
    allergene['EI'] = 'Ei'
    zusatzstoffe = buildLegend(
        text=legend_doc.text.replace('\xa0', ' '),
        regex=r'(?P<name>\d+) {3,} (enthält )?(?P<value>\w+( |\t|\w)*)'
    )
    suballergene = re.compile(r'(?P<name>[0-9A-Z]+)[^a-zA-Z]*enthält (?P<value>\w+( |\t|\w)*)')
    for tr in legend_doc.find_all('tr'):
        tds = tr.find_all('td')
        if len(tds) != 2:
            continue
        title = tds[0].find('strong')
        if title is None:
            continue
        else:
            title = title.text
        lines = tds[1].text.split('\n')
        for line in lines[1:]:
            try_allergine = suballergene.match(line)
            if try_allergine:
                allergene[try_allergine.group('name')] = try_allergine.group('value')
        text = lines[0].replace('enthält', '').strip()
        if title.isdigit():
            zusatzstoffe[title] = text
        else:
            allergene[title] = text
    parse_week(url + this_week, canteen, canteentype,
               allergene=allergene, zusatzstoffe=zusatzstoffe)
    if not today and next_week is True:
        parse_week(url + '-kommende-woche', canteen, canteentype,
                   allergene=allergene, zusatzstoffe=zusatzstoffe)
    if not today and type(next_week) is str:
        parse_week(url + next_week, canteen, canteentype,
                   allergene=allergene, zusatzstoffe=zusatzstoffe)
    return canteen.toXMLFeed()
开发者ID:azrdev,项目名称:openmensa-parsers,代码行数:44,代码来源:ostniedersachsen.py


示例14: parse_url

def parse_url(url, today=False):
    global legend
    canteen = LazyBuilder()
    canteen.setLegendData(legend)
    day = datetime.date.today()
    emptyCount = 0
    totalCount = 0
    while emptyCount < 7 and totalCount < 32:
        if not parse_day(canteen, '{}&tag={}&monat={}&jahr={}'
                         .format(url, day.day, day.month, day.year),
                         day.strftime('%Y-%m-%d')):
            emptyCount += 1
        else:
            emptyCount = 0
        if today:
            break
        totalCount += 1
        day += datetime.date.resolution
    return canteen.toXMLFeed()
开发者ID:azrdev,项目名称:openmensa-parsers,代码行数:19,代码来源:chemnitz_zwickau.py


示例15: parse_url

def parse_url(url, data_canteen, today=False):
    canteen = LazyBuilder()

    data = urlopen(url).read().decode('utf-8')
    document = parse(data, 'lxml')

    dish = document.find(class_='neo-menu-single-dishes')
    if dish is not None:
        dishes = dish.find_all(name='tr', attrs={"data-canteen": data_canteen})
    else:
        dishes = []

    side = document.find(class_='neo-menu-single-modals')
    if side is not None:
        dishes = dishes + side.find_all(name='tr', attrs={"data-canteen": data_canteen})

    for dish in dishes:
        parse_dish(dish, canteen)

    return canteen.toXMLFeed()
开发者ID:mswart,项目名称:openmensa-parsers,代码行数:20,代码来源:marburg.py


示例16: parse_url

def parse_url(url, today=False):
    canteen = LazyBuilder()

    canteen.extra_regex = re.compile('\((?P<extra>[0-9a-zA-Z]{1,3}'
                                     '(?:,[0-9a-zA-Z]{1,3})*)\)', re.UNICODE)

    legend_url = 'https://www.stwdo.de/mensa-co/allgemein/zusatzstoffe/'
    legend = parse_legend(legend_url)
    canteen.setLegendData(legend)

    day = datetime.date.today()
    week = getWeekdays(day)

    for wDay in week:
        py = {'tx_pamensa_mensa[date]' : wDay}
        payload = urlencode(py).encode('ascii')
        data = rq.urlopen(url, payload).read().decode('utf-8')
        soup = BeautifulSoup(data, 'html.parser')
        parse_day(canteen, soup, wDay)

    return canteen.toXMLFeed()
开发者ID:mswart,项目名称:openmensa-parsers,代码行数:21,代码来源:dortmund.py


示例17: parse_url

def parse_url(url, today=False):
    canteen = LazyBuilder()
    legend = {'f': 'fleischloses Gericht', 'v': 'veganes Gericht'}
    document = parse(urlopen(base + '/speiseplan/zusatzstoffe-de.html').read())
    for td in document.find_all('td', 'beschreibung'):
        legend[td.previous_sibling.previous_sibling.text] = td.text
    document = parse(urlopen(base + '/unsere-preise/').read())
    prices = {}
    for tr in document.find('table', 'essenspreise').find_all('tr'):
        meal = tr.find('th')
        if not meal or not meal.text.strip():
            continue
        if len(tr.find_all('td', 'betrag')) < 3:
            continue
        if 'titel' in meal.attrs.get('class', []) or 'zeilentitel' in meal.attrs.get('class', []):
            continue
        meal = meal.text.strip()
        prices[meal] = {}
        for role, _id in [('student', 0), ('employee', 1), ('other', 2)]:
            price_html = tr.find_all('td', 'betrag')[_id].text
            price_search = price_regex.search(price_html)
            if price_search:
                prices[meal][role] = price_search.group('price')
    errorCount = 0
    date = datetime.date.today()
    while errorCount < 7:
        try:
            document = parse(urlopen(url.format(date)).read())
        except HTTPError as e:
            if e.code == 404:
                errorCount += 1
                date += datetime.date.resolution
                continue
            else:
                raise e
        else:
            errorCount = 0
        for tr in document.find('table', 'zusatzstoffe').find_all('tr'):
            identifier = tr.find_all('td')[0].text \
                           .replace('(', '').replace(')', '')
            legend[identifier] = tr.find_all('td')[1].text.strip()
        canteen.setLegendData(legend)
        mensa_data = document.find('table', 'menu')
        category = None
        for menu_tr in mensa_data.find_all('tr'):
            if menu_tr.find('td', 'headline'):
                continue
            if menu_tr.find('td', 'gericht').text:
                category = menu_tr.find('td', 'gericht').text
            data = menu_tr.find('td', 'beschreibung')
            name = data.find('span').text.strip()
            notes = [span['title'] for span in data.find_all('span', title=True)]
            canteen.addMeal(
                date, category, name, notes,
                prices.get(category.replace('Aktionsessen', 'Bio-/Aktionsgericht'), {})
            )
        date += datetime.date.resolution
        if today:
            break
    return canteen.toXMLFeed()
开发者ID:a-andre,项目名称:openmensa-parsers,代码行数:60,代码来源:muenchen.py


示例18: parse_url

def parse_url(url, today=False):
    content = urlopen(url).read()
    document = parse(content)
    legends = document.find_all('div', {'class': 'legende'})
    if len(legends) > 0:
        extraLegend = {int(v[0]): v[1] for v in reversed(legend_regex.findall(legends[0].text))}
    else:
        extraLegend = {}
    canteen = LazyBuilder()
    for day_td in document.find_all('td', text=day_regex):
        date = day_regex.search(day_td.string).group('date')
        table = None
        for element in day_td.parents:
            if element.name == 'table':
                table = element
                break
        if not table:
            continue
        for tr in table.tbody.find_all('tr'):
            if 'geschlossen' in tr.text or 'Feiertage' in tr.text:
                match = day_range_regex.search(tr.text)
                if not match:
                    canteen.setDayClosed(date)
                else:
                    fromDate = datetime.datetime.strptime(match.group('from'), '%d.%m.%Y')
                    toDate = datetime.datetime.strptime(match.group('to'), '%d.%m.%Y')
                    while fromDate <= toDate:
                        canteen.setDayClosed(fromDate.strftime('%Y-%m-%d'))
                        fromDate += datetime.date.resolution
                continue
            if len(tr) != 3:
                continue  # no meal
            strings = list(tr.contents[0].strings)
            name = strings[0]
            # prices:
            prices = strings[-1].split('|')
            print(prices)
            if '-' in map(lambda v: v.strip(), prices):
                prices = {}
            # notes:
            notes = []
            for img in tr.contents[1].find_all('img'):
                notes.append(img['alt'].replace('Symbol', '').strip())
            for extra in list(set(map(lambda v: int(v), extra_regex.findall(tr.text)))):
                if extra in extraLegend:
                    notes.append(extraLegend[extra])
            canteen.addMeal(date, 'Hauptgerichte', name, notes, prices, roles if prices else None)
    return canteen.toXMLFeed()
开发者ID:mlewe,项目名称:openmensa-parsers,代码行数:48,代码来源:magdeburg.py


示例19: parsePlan

def parsePlan(url, internalMensaId, today):
    canteen = LazyBuilder()
    end = False
    while (url != None):
        dom = BeautifulSoup(urlopen(url).read(), 'lxml')
        date = dom.select('#mensa_date > p')[0].contents[0]
        menuDefinition = dom.find(id=internalMensaId)
        menuDescription = menuDefinition.parent.find('dd')
        tables = menuDescription.select('table')
        legend = {}
        legend = buildLegend(legend, str(dom), regex='<strong>(?P<name>\w+)\s*</strong>\s*-\s*(?P<value>[\w\s)(]+)')
        if tables != None and len(tables) == 1:
            table = tables[0]
            rows = table.find_all('tr')
            for row in rows:
                menuNameElement = row.select('td[class="mensa_col_55"] > b')
                if menuNameElement != None and menuNameElement[0].contents != None:
                    menuName = menuNameElement[0].contents[0]
                    category = 'Gericht'

                    # get notes
                    notes = {}
                    notesElement = row.select('td[class="mensa_col_55"] > span')
                    if notesElement != None and len(notesElement) > 0 and notesElement[0].text != None:
                        notes = [legend.get(n, n) for n in notesElement[0].text.split(' ') if n]

                    # get prices
                    prices = {}
                    for td in row.select('td[class="mensa_col_15"]'):
                        priceElement = td.find('b')
                        groupElement = td.find('span')
                        if priceElement != None and groupElement != None and groupElement.contents != None and len(groupElement.contents) > 0 and priceElement.contents != None and len(priceElement.contents) > 0:
                            group = str(groupElement.contents[0])
                            price = str(priceElement.contents[0])
                            if group == 'Stud.:':
                                prices['student'] = price
                            elif group == 'Bed.:':
                                prices['employee'] = price
                            elif group == 'Gast:':
                                prices['other'] = price

                    canteen.addMeal(date, category, menuName, notes, prices)
        else:
            canteen.setDayClosed(date)

        # check for further pages
        nextPageLink = dom.find(id='next_day_link')
        if nextPageLink == None or today:
            url = None
        else:
            url = 'https://www.studentenwerk-rostock.de/' + nextPageLink['href']
    return canteen.toXMLFeed()
开发者ID:azrdev,项目名称:openmensa-parsers,代码行数:52,代码来源:rostock.py


示例20: parse_url

def parse_url(url, today=False):
    canteen = LazyBuilder()
    document = parse(urlopen(url).read(), 'lxml')

    for day_div in document.find_all('div', attrs={'data-day': True}):
        # parse date, warning: calculate year number needed
        date_test = day_regex.search(day_div['data-day'])
        if not date_test:
            print('Error: unable to parse date "{}"'.format(day_div['data-day']))
            continue
        else:
            year = datetime.datetime.now().year
            if datetime.datetime.now().month > int(date_test.group('month')):
                year += 1  # date from next year
            date = '{}-{}-{}'.format(year, date_test.group('month'), date_test.group('day'))

        closed_candidate = day_div.find('div', 'holiday') is not None

        for meal_article in day_div.find_all('article', 'menu'):
            name = meal_article.find('div', 'title').text
            if not name:
                continue

            category = meal_article.find('div', 'icon')['title']
            notes = []
            prices = {}

            additives = meal_article.find('div', 'additnr')
            if additives:
                notes += [additive.text for additive in additives.find_all('li')]
            notes += [v['title'] for v in meal_article.find_all('div', 'theicon') if v['title'] and v['title'] not in notes]

            price_div = meal_article.find('div', 'price')
            if price_div:
                for k, v in price_map.items():
                    price = price_div['data-' + k]
                    if price:
                        prices[v] = price
            canteen.addMeal(date, category, name, notes, prices)

        if closed_candidate and not canteen.hasMealsFor(date):
            canteen.setDayClosed(date)

    return canteen.toXMLFeed()
开发者ID:mswart,项目名称:openmensa-parsers,代码行数:44,代码来源:wuerzburg.py



注:本文中的pyopenmensa.feed.LazyBuilder类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python utils.check_time函数代码示例发布时间:2022-05-27
下一篇:
Python tools.dtype_to_ctype函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap