• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python datetimeutil.utc_now函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中socorro.lib.datetimeutil.utc_now函数的典型用法代码示例。如果您正苦于以下问题:Python utc_now函数的具体用法?Python utc_now怎么用?Python utc_now使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了utc_now函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _set_ongoing_job

 def _set_ongoing_job(self, class_):
     app_name = class_.app_name
     info = self.job_state_database.get(app_name)
     if info:
         # Was it already ongoing?
         if info.get('ongoing'):
             # Unless it's been ongoing for ages, raise OngoingJobError
             age_hours = (utc_now() - info['ongoing']).seconds / 3600.0
             if age_hours < self.config.crontabber.max_ongoing_age_hours:
                 raise OngoingJobError(info['ongoing'])
             else:
                 self.logger.debug(
                     '{} has been ongoing for {:2} hours. Ignore it and running the app anyway.'
                     .format(app_name, age_hours)
                 )
         info['ongoing'] = utc_now()
     else:
         info = {
             'next_run': None,
             'first_run': None,
             'last_run': None,
             'last_success': None,
             'last_error': {},
             'error_count': 0,
             'depends_on': [],
             'ongoing': utc_now(),
         }
     self.job_state_database[app_name] = info
开发者ID:willkg,项目名称:socorro,代码行数:28,代码来源:crontabber_app.py


示例2: time_to_run

 def time_to_run(self, class_, time_):
     """return true if it's time to run the job.
     This is true if there is no previous information about its last run
     or if the last time it ran and set its next_run to a date that is now
     past.
     """
     app_name = class_.app_name
     try:
         info = self.database[app_name]
     except KeyError:
         if time_:
             h, m = [int(x) for x in time_.split(':')]
             # only run if this hour and minute is < now
             now = utc_now()
             if now.hour > h:
                 return True
             elif now.hour == h and now.minute >= m:
                 return True
             return False
         else:
             # no past information, run now
             return True
     next_run = info['next_run']
     if next_run < utc_now():
         return True
     return False
开发者ID:vfazio,项目名称:socorro,代码行数:26,代码来源:crontabber.py


示例3: run

    def run(self):
        # if this is non-zero, we use it.
        if self.config.days_into_past:
            last_run = (
                utc_now() -
                datetime.timedelta(days=self.config.days_into_past)
            )
        else:
            try:
                # KeyError if it's never run successfully
                # TypeError if self.job_information is None
                last_run = self.job_information['last_success']
            except (KeyError, TypeError):
                # basically, the "virgin run" of this job
                last_run = utc_now()

        # bugzilla runs on PST, so we need to communicate in its time zone
        PST = tz.gettz('PST8PDT')
        last_run_formatted = last_run.astimezone(PST).strftime('%Y-%m-%d')
        query = self.config.query % last_run_formatted
        for (
            bug_id,
            signature_set
        ) in self._iterator(query):
            try:
                # each run of this loop is a transaction
                self.database_transaction_executor(
                    self.inner_transaction,
                    bug_id,
                    signature_set
                )
            except NothingUsefulHappened:
                pass
开发者ID:Krispy2009,项目名称:socorro,代码行数:33,代码来源:bugzilla.py


示例4: _normal_jobs_iter

 def _normal_jobs_iter(self):
     """
     Yields a list of job tuples pulled from the 'jobs' table for which the
     owner is this process and the started datetime is null.  This iterator
     is perpetual - it never raises the StopIteration exception
     """
     get_normal_job_sql = (
         "select"
         "    j.id,"
         "    j.uuid,"
         "    priority "
         "from"
         "    jobs j "
         "where"
         "    j.owner = %d"
         "    and j.starteddatetime is null "
         "order by queueddatetime"
         "  limit %d" % (self.processor_id,
                         self.config.batchJobLimit))
     normal_jobs_list = []
     last_query_timestamp = utc_now()
     while True:
         polling_threshold = utc_now() - self.config.pollingInterval
         if not normal_jobs_list and \
            last_query_timestamp < polling_threshold:  # get more
             normal_jobs_list = self.transaction(
                 execute_query_fetchall,
                 get_normal_job_sql
             )
             last_query_timestamp = utc_now()
         if normal_jobs_list:
             while normal_jobs_list:
                 yield normal_jobs_list.pop(0)
         else:
             yield None
开发者ID:azuwis,项目名称:socorro,代码行数:35,代码来源:legacy_new_crash_source.py


示例5: test_no_new_crashes

    def test_no_new_crashes(self):
        new_crash_source = ESNewCrashSource(self.config)
        self.health_check()

        generator = new_crash_source.new_crashes(
            utc_now() - datetime.timedelta(days=1),
            'Firefox',
            ['43.0.1']
        )
        assert list(generator) == []

        self.index_crash(
            a_processed_crash,
            raw_crash=a_raw_crash,
            crash_id=a_processed_crash['uuid']
        )
        self.refresh_index()

        # Same test now that there is a processed crash in there
        # but notably under a different name and version.
        generator = new_crash_source.new_crashes(
            utc_now() - datetime.timedelta(days=1),
            'Firefox',
            ['43.0.1']
        )
        assert list(generator) == []
开发者ID:stephendonner,项目名称:socorro,代码行数:26,代码来源:test_new_crash_source.py


示例6: updateCronjobsTable

def updateCronjobsTable(connection, cronjobName, success, lastTargetTime, failureMessage=None):
    cursor = connection.cursor()

    params = [lastTargetTime]
    if success:
        params.append(utc_now())
        sql = """
          /* socorro.cron.dailyMatviews updateCronjobsTable */
          UPDATE cronjobs
          SET
            last_target_time = %s,
            last_success = %s
          WHERE cronjob = %s
        """
    else:
        params.append(utc_now())
        params.append(failureMessage)
        sql = """
          /* socorro.cron.dailyMatviews updateCronjobsTable */
          UPDATE cronjobs
          SET
            last_target_time = %s,
            last_failure = %s,
            failure_message = %s
          WHERE cronjob = %s
        """
    params.append(cronjobName)

    cursor.execute(sql, params)
    connection.commit()
开发者ID:xni,项目名称:socorro,代码行数:30,代码来源:dailyMatviews.py


示例7: test_slow_run_job

    def test_slow_run_job(self):
        config_manager, json_file = self._setup_config_manager(
          'socorro.unittest.cron.test_crontabber.SlowJob|1h'
        )

        with config_manager.context() as config:
            tab = crontabber.CronTabber(config)
            time_before = utc_now()
            tab.run_all()
            time_after = utc_now()
            time_taken = (time_after - time_before).seconds
            #time_taken = (time_after - time_before).microseconds / 1000.0 / 1000.0
            #print time_taken
            self.assertEqual(round(time_taken), 1.0)

            # check that this was written to the JSON file
            # and that the next_run is going to be 1 day from now
            assert os.path.isfile(json_file)
            structure = json.load(open(json_file))
            information = structure['slow-job']
            self.assertEqual(information['error_count'], 0)
            self.assertEqual(information['last_error'], {})
            self.assertTrue(information['next_run'].startswith(
                             (time_before + datetime.timedelta(hours=1))
                              .strftime('%Y-%m-%d %H:%M:%S')))
开发者ID:Meghashyamt,项目名称:socorro,代码行数:25,代码来源:test_crontabber.py


示例8: test_sending_many_emails

    def test_sending_many_emails(self, exacttarget_mock):
        """Test that we can send emails to a lot of users in the same run. """

        # First add a lot of emails.
        now = utc_now() - datetime.timedelta(minutes=30)

        config_manager = self._setup_storage_config()
        with config_manager.context() as config:
            storage = ElasticSearchCrashStorage(config)

            for i in range(21):
                storage.save_processed({
                    'uuid': 'fake-%s' % i,
                    'email': 'fake-%[email protected]' % i,
                    'product': 'WaterWolf',
                    'version': '20.0',
                    'release_channel': 'Release',
                    'date_processed': now,
                })

            storage.es.refresh()

        config_manager = self._setup_simple_config()
        with config_manager.context() as config:
            job = automatic_emails.AutomaticEmailsCronApp(config, '')
            job.run(utc_now())

            et_mock = exacttarget_mock.return_value
            # Verify that we have the default 4 results + the 21 we added.
            self.assertEqual(et_mock.trigger_send.call_count, 25)
开发者ID:pkucoin,项目名称:socorro,代码行数:30,代码来源:test_automatic_emails.py


示例9: test_delete_old_indices

    def test_delete_old_indices(self):
        # Create old indices to be deleted.
        self.index_client.create('socorro200142', {})
        self.indices.append('socorro200142')

        self.index_client.create('socorro200000', {})
        self.indices.append('socorro200000')

        # Create an old aliased index.
        self.index_client.create('socorro200201_20030101', {})
        self.indices.append('socorro200201_20030101')
        self.index_client.put_alias(
            index='socorro200201_20030101',
            name='socorro200201',
        )

        # Create a recent aliased index.
        last_week_index = self.get_index_for_date(
            utc_now() - datetime.timedelta(weeks=1)
        )
        self.index_client.create('socorro_some_aliased_index', {})
        self.indices.append('socorro_some_aliased_index')
        self.index_client.put_alias(
            index='socorro_some_aliased_index',
            name=last_week_index,
        )

        # Create a recent index that should not be deleted.
        now_index = self.get_index_for_date(utc_now())
        self.index_client.create(now_index, {})
        self.indices.append(now_index)

        # These will raise an error if an index was not correctly created.
        assert self.index_client.exists('socorro200142')
        assert self.index_client.exists('socorro200000')
        assert self.index_client.exists('socorro200201')
        assert self.index_client.exists(now_index)
        assert self.index_client.exists(last_week_index)

        api = IndexCleaner(self.config)
        api.delete_old_indices()

        # Verify the recent index is still there.
        ok_(self.index_client.exists(now_index))
        ok_(self.index_client.exists(last_week_index))

        # Verify the old indices are gone.
        ok_(not self.index_client.exists('socorro200142'))
        ok_(not self.index_client.exists('socorro200000'))
        ok_(not self.index_client.exists('socorro200201'))
开发者ID:amuntner,项目名称:socorro,代码行数:50,代码来源:test_index_cleaner.py


示例10: test_cleanup_radix

    def test_cleanup_radix(self):
        self.fsrts._current_slot = lambda: ["00", "00_00"]
        self.fsrts.save_raw_crash({"test": "TEST"}, {"foo": "bar", self.fsrts.config.dump_field: "baz"}, self.CRASH_ID)
        self.fsrts._current_slot = lambda: ["10", "00_01"]

        self.assertEqual(list(self.fsrts.new_crashes()), [self.CRASH_ID])
        self.assertEqual(list(self.fsrts.new_crashes()), [])

        config_manager, json_file = self._setup_config_manager()
        with config_manager.context() as config:
            tab = crontabber.CronTabber(config)

        tab.run_all()

        self.assertEqual(os.listdir(self.fsrts.config.fs_root), [])

        future = (utc_now() + datetime.timedelta(days=10)).strftime("%Y%m%d")
        future_id = "0bba929f-8721-460c-dead-a43c%s" % future

        self.fsrts._current_slot = lambda: ["00", "00_00"]
        self.fsrts.save_raw_crash({"test": "TEST"}, {"foo": "bar", self.fsrts.config.dump_field: "baz"}, future_id)
        self.fsrts._current_slot = lambda: ["10", "00_01"]

        self.assertEqual(list(self.fsrts.new_crashes()), [future_id])
        self.assertEqual(list(self.fsrts.new_crashes()), [])

        tab.run_all()

        self.assertEqual(os.listdir(self.fsrts.config.fs_root), [future])
开发者ID:rfw,项目名称:socorro,代码行数:29,代码来源:test_cleanup_radix.py


示例11: test_update_user

    def test_update_user(self):
        config_manager = self._setup_simple_config()
        with config_manager.context() as config:
            job = automatic_emails.AutomaticEmailsCronApp(config, '')
            now = utc_now()

            report = {
                'email': '[email protected]'
            }
            job.update_user(report, now, self.conn)

            cursor = self.conn.cursor()
            cursor.execute("""
                SELECT last_sending FROM emails WHERE email=%(email)s
            """, report)

            self.assertEqual(cursor.rowcount, 1)
            row = cursor.fetchone()
            self.assertEqual(row[0], now)

            # Test with a non-existing user
            report = {
                'email': '[email protected]'
            }
            job.update_user(report, now, self.conn)

            cursor = self.conn.cursor()
            cursor.execute("""
                SELECT last_sending FROM emails WHERE email=%(email)s
            """, report)

            self.assertEqual(cursor.rowcount, 1)
            row = cursor.fetchone()
            self.assertEqual(row[0], now)
开发者ID:esamanas,项目名称:socorro,代码行数:34,代码来源:test_automatic_emails.py


示例12: __init__

  def __init__(self,*args,**kwargs):
    """
    Passes appropriate kwargs to Config, pays local attention to these keys:
    updateInterval: default: '0' format: 'dd:hh:mm:ss', leading parts optional. Must be >= 0 seconds.
    updateFunction: default: noop(). Takes self as argument. Behavior: Updates default values in argument
    reEvaluateFunction: default: noop(). Takes self as argument. Behavior: Mutates values in argument
    signalNumber: default: SIGALRM (14). If 0, then signals will not be handled.
      Instances that share the same signalNumber will all be update()-ed at every signal.

    self.internal.updateFunction may be set after construction if desired: Avoids double-work at construction
    self.internal.reEvalutateFunction may be set after construction if desired, but this is not recommended.
    """
    skwargs = dict([(x,kwargs[x]) for x in socorro_config.getDefaultedConfigOptions().keys() if x in kwargs])
    for i in range(len(args)):
      skwargs[socorro_config.namedConfigOptions[i]] = args[i]
    super(DynamicConfig,self).__init__(**skwargs)
    self.internal.updateFunction = kwargs.get('updateFunction',noop)
    self.internal.reEvaluateFunction = kwargs.get('reEvaluateFunction',noop)
    self.internal.signalNumber = kwargs.get('signalNumber',14)
    self.internal.nextUpdate = None
    updateInterval = kwargs.get('updateInterval','0:0:0:0')
    self.internal.updateDelta = socorro_config.timeDeltaConverter(updateInterval)
    if self.internal.updateDelta:
      if self.internal.updateDelta < datetime.timedelta(0):
        raise ValueError("updateInterval must be non-negative, but %s"%self.internal.updateDelta)
      self.internal.nextUpdate = utc_now() + self.internal.updateDelta

    # finally: make sure we are current
    if self.internal.signalNumber:
      priorSignal = signal.signal(self.internal.signalNumber,DynamicConfig.handleAlarm)
    self.doUpdate()
    DynamicConfig.instances[id(self)] = self
开发者ID:Earth4,项目名称:socorro,代码行数:32,代码来源:dynamicConfigurationManager.py


示例13: get_signatures

    def get_signatures(self, **kwargs):
        """Return top crashers by signatures.

        See http://socorro.readthedocs.org/en/latest/middleware.html#tcbs
        """
        filters = [
            ("product", None, "str"),
            ("version", None, "str"),
            ("crash_type", "all", "str"),
            ("to_date", datetimeutil.utc_now(), "datetime"),
            ("duration", datetime.timedelta(7), "timedelta"),
            ("os", None, "str"),
            ("limit", 100, "int"),
            ("date_range_type", None, "str")
        ]

        params = external_common.parse_arguments(filters, kwargs)
        params.logger = logger

        # what the twoPeriodTopCrasherComparison() function does is that it
        # makes a start date from taking the to_date - duration
        if params.duration > datetime.timedelta(30):
            raise BadArgumentError('Duration too long. Max 30 days.')

        with self.get_connection() as connection:
            return tcbs.twoPeriodTopCrasherComparison(connection, params)
开发者ID:Earth4,项目名称:socorro,代码行数:26,代码来源:crashes.py


示例14: test_basic_run

    def test_basic_run(self):
        cur = self.conn.cursor()
        # Ensure test table is present.
        statement = """
            INSERT INTO raw_adi
            (date, product_name, adi_count) VALUES
            (%(first)s, 'WinterFox', 11),
            (%(second)s, 'WinterFox', 23)
        """
        second = utc_now().date()
        first = second - datetime.timedelta(days=1)
        cur.execute(statement, {'first': first, 'second': second})
        self.conn.commit()

        # Run the crontabber job to remove the test table.
        config_manager = self._setup_config_manager(days_to_keep=1)
        with config_manager.context() as config:
            tab = CronTabber(config)
            tab.run_all()

        # Basic assertion test of stored procedure.
        information = self._load_structure()
        assert information['clean-raw-adi']
        assert not information['clean-raw-adi']['last_error']
        assert information['clean-raw-adi']['last_success']

        # Ensure test row was removed
        cur.execute("""
            SELECT date FROM raw_adi
        """)
        result, = cur.fetchall()
        report_date = result[0]
        eq_(report_date, second)
开发者ID:Krispy2009,项目名称:socorro,代码行数:33,代码来源:test_clean_raw_adi.py


示例15: mocked_urlopener

        def mocked_urlopener(url, today=None):
            if today is None:
                today = utc_now()
            html_wrap = "<html><body>\n%s\n</body></html>"
            if url.endswith('/firefox/'):
                return html_wrap % """
                <a href="candidates/">candidates</a>
                <a href="nightly/">nightly</a>
                """
            if url.endswith('/firefox/nightly/'):
                return html_wrap % """
                <a href="10.0-candidates/">10.0-candidiates</a>
                """
            if url.endswith('/firefox/candidates/'):
                return html_wrap % """
                <a href="10.0b4-candidates/">10.0b4-candidiates</a>
                """
            if (url.endswith('/firefox/nightly/10.0-candidates/') or
                url.endswith('/firefox/candidates/10.0b4-candidates/')):
                return html_wrap % """
                <a href="build1/">build1</a>
                """
            if (url.endswith('/firefox/nightly/10.0-candidates/build1/') or
                url.endswith('/firefox/candidates/10.0b4-candidates/build1/')):
                return html_wrap % """
                <a href="linux_info.txt">linux_info.txt</a>
                """
            if url.endswith(today.strftime('/firefox/nightly/%Y/%m/')):
                return html_wrap % today.strftime("""
                <a href="%Y-%m-%d-trunk/">%Y-%m-%d-trunk</a>
                """)
            if url.endswith(today.strftime(
              '/firefox/nightly/%Y/%m/%Y-%m-%d-trunk/')):
                return html_wrap % """
                <a href="mozilla-nightly-15.0a1.en-US.linux-x86_64.txt">txt</a>
                <a href="mozilla-nightly-15.0a2.en-US.linux-x86_64.txt">txt</a>
                """
            if url.endswith(today.strftime(
              '/firefox/nightly/%Y/%m/%Y-%m-%d-trunk/mozilla-nightly-15.0a1.en'
              '-US.linux-x86_64.txt')):
                return (
                   "20120505030510\n"
                   "http://hg.mozilla.org/mozilla-central/rev/0a48e6561534"
                )
            if url.endswith(today.strftime(
              '/firefox/nightly/%Y/%m/%Y-%m-%d-trunk/mozilla-nightly-15.0a2.en'
              '-US.linux-x86_64.txt')):
                return (
                   "20120505443322\n"
                   "http://hg.mozilla.org/mozilla-central/rev/xxx123"
                )
            if url.endswith(
              '/firefox/nightly/10.0-candidates/build1/linux_info.txt'):
                return "buildID=20120516113045"
            if url.endswith(
              '/firefox/candidates/10.0b4-candidates/build1/linux_info.txt'):
                return "buildID=20120516114455"

            # bad testing boy!
            raise NotImplementedError(url)
开发者ID:vdt,项目名称:socorro,代码行数:60,代码来源:test_ftpscraper.py


示例16: transferOne

 def transferOne(
     self, ooid, anotherJsonDumpStorage, createLinks=True, removeOld=False, webheadHostName=None, aDate=None
 ):
     """
 Transfer data from another JsonDumpStorage instance into this instance of JsonDumpStorage
 ooid - the id of the data to transfer
 anotherJsonDumpStorage - An instance of JsonDumpStorage holding the data to be transferred
 createLinks - If true, create symlinks from and to date subdir
 removeOld - If true, attempt to delete the files and symlinks in source file tree
 webheadHostName: Used if known
 aDate: Used if unable to parse date from source directories and uuid
 NOTE: Assumes that the path names and suffixes for anotherJsonDumpStorage are the same as for self
 """
     self.logger.debug("%s - transferOne %s %s", threading.currentThread().getName(), ooid, aDate)
     jsonFromFile = anotherJsonDumpStorage.getJson(ooid)
     self.logger.debug("%s - fetched json", threading.currentThread().getName())
     dumpFromFile = os.path.splitext(jsonFromFile)[0] + anotherJsonDumpStorage.dumpSuffix
     if createLinks:
         self.logger.debug("%s - fetching stamp", threading.currentThread().getName())
         stamp = anotherJsonDumpStorage.pathToDate(anotherJsonDumpStorage.lookupOoidInDatePath(None, ooid, None)[0])
     else:
         self.logger.debug("%s - not bothering to fetch stamp", threading.currentThread().getName())
         stamp = None
     self.logger.debug("%s - fetched pathToDate ", threading.currentThread().getName())
     if not stamp:
         if not aDate:
             aDate = utc_now()
         stamp = aDate
     self.logger.debug("%s - about to copyFrom ", threading.currentThread().getName())
     self.copyFrom(ooid, jsonFromFile, dumpFromFile, webheadHostName, stamp, createLinks, removeOld)
开发者ID:xni,项目名称:socorro,代码行数:30,代码来源:JsonDumpStorage.py


示例17: fillProcessorTable

def fillProcessorTable(cursor, processorCount, stamp=None, processorMap = {},logger = None):
  """
  Puts some entries into the processor table.
  Also creates priority_jobs_NNN for each processor id, unless that table exists
  Given a map of id->timestamp, sets the lastseendatetime for each successive processor to that stamp
  (Ignores ids generated by the count or in the processorMap, and uses database's serial id generator)
  """
  if not logger:
    logger = logging.getLogger()

  if not stamp: stamp = utc_now()
  if not processorCount and not processorMap: return
  sql = "INSERT INTO processors (name,startdatetime,lastseendatetime) VALUES (%s,%s,%s);"
  data = []
  if processorMap:
    data.extend([('test_%d'%(id),stamp,processorMap.get(id,stamp)) for id in processorMap.keys() ])
  else:
    data.extend([('test_%d'%(x),stamp, stamp) for x in range(1,processorCount+1) ])
  try:
    cursor.executemany(sql,data)
    cursor.connection.commit()

    sql = "SELECT id from processors;"
    cursor.execute(sql)
    allIds = cursor.fetchall()
    cursor.connection.rollback()
    sql = "CREATE TABLE priority_jobs_%s (uuid varchar(50) not null primary key);"
    for tup in allIds:
      try:
        cursor.execute(sql%(tup[0]))
        cursor.connection.commit()
      except psycopg2.ProgrammingError:
        cursor.connection.rollback()
  finally:
    cursor.connection.rollback()
开发者ID:Earth4,项目名称:socorro,代码行数:35,代码来源:dbtestutil.py


示例18: test_mapping

    def test_mapping(self, mapping):
        """Verify that a mapping is correct.

        This function does so by first creating a new, temporary index in
        elasticsearch using the mapping. It then takes some recent crash
        reports that are in elasticsearch and tries to insert them in the
        temporary index. Any failure in any of those steps will raise an
        exception. If any is raised, that means the mapping is incorrect in
        some way (either it doesn't validate against elasticsearch's rules,
        or is not compatible with the data we currently store).

        If no exception is raised, the mapping is likely correct.

        This function is to be used in any place that can change the
        `storage_mapping` field in any Super Search Field.
        Methods `create_field` and `update_field` use it, see above.
        """
        temp_index = 'socorro_mapping_test'

        es_connection = self.get_connection()

        # Import at runtime to avoid dependency circle.
        from socorro.external.es.index_creator import IndexCreator
        index_creator = IndexCreator(self.config)
        try:
            index_creator.create_index(
                temp_index,
                mapping,
            )

            now = datetimeutil.utc_now()
            last_week = now - datetime.timedelta(days=7)
            current_indices = self.generate_list_of_indexes(last_week, now)

            crashes_sample = es_connection.search(
                index=current_indices,
                doc_type=self.config.elasticsearch.elasticsearch_doctype,
                size=self.config.elasticsearch.mapping_test_crash_number,
            )
            crashes = [x['_source'] for x in crashes_sample['hits']['hits']]

            for crash in crashes:
                es_connection.index(
                    index=temp_index,
                    doc_type=self.config.elasticsearch.elasticsearch_doctype,
                    body=crash,
                )
        except elasticsearch.exceptions.ElasticsearchException as e:
            raise BadArgumentError(
                'storage_mapping',
                msg='Indexing existing data in Elasticsearch failed with the '
                    'new mapping. Error is: %s' % str(e),
            )
        finally:
            try:
                index_creator.get_index_client().delete(temp_index)
            except elasticsearch.exceptions.NotFoundError:
                # If the index does not exist (if the index creation failed
                # for example), we don't need to do anything.
                pass
开发者ID:snorp,项目名称:socorro,代码行数:60,代码来源:super_search_fields.py


示例19: testLookupOoidInDatePath

 def testLookupOoidInDatePath(self):
   d = dumpStorage.DumpStorage(self.testDir)
   expected = {}
   count = 0
   for ooid,v in createJDS.jsonFileData.items():
     dateS = v[0]
     if 0 == count%2:
       nd,dd = d.newEntry(ooid,datetime.datetime(*[int(x) for x in dateS.split('-')], tzinfo=UTC))
       expected[ooid] = dd
     elif 0 == count%5:
       expected[ooid] = None
       pass
     else:
       nd,dd = d.newEntry(ooid)
       expected[ooid] = dd
     count += 1
     dateS = v[0]
   count = 0
   for ooid in createJDS.jsonFileData.keys():
     dateS = v[0]
     if expected[ooid]:
       exEnd = datetime.datetime(*[int(x) for x in dateS.split('-')], tzinfo=UTC)
       passDate = utc_now()
       if 0 == count%3:
         passDate = None
       else:
         passDate = exEnd
       got,ignore = d.lookupOoidInDatePath(passDate,ooid)
       assert expected[ooid] == got, 'For %s: Expected %s, got %s'%(ooid,expected[ooid],got)
开发者ID:Manchester412,项目名称:socorro,代码行数:29,代码来源:testDumpStorage.py


示例20: POST

    def POST(self, *args):
        raw_crash, dumps = \
            self._make_raw_crash_and_dumps(web.webapi.rawinput())

        current_timestamp = utc_now()
        raw_crash.submitted_timestamp = current_timestamp.isoformat()
        # legacy - ought to be removed someday
        raw_crash.timestamp = time.time()

        crash_id = createNewOoid(current_timestamp)

        raw_crash.legacy_processing = self.legacy_throttler.throttle(raw_crash)
        if raw_crash.legacy_processing == LegacyThrottler.DISCARD:
            self.logger.info('%s discarded', crash_id)
            return "Discarded=1\n"
        if raw_crash.legacy_processing == LegacyThrottler.IGNORE:
            self.logger.info('%s ignored', crash_id)
            return "Unsupported=1\n"

        crash_storage = self.context.crashStoragePool.crashStorage()
        try:
            crash_storage.save_raw(
                crash_id,
                raw_crash,
                dumps
            )
        except PolyStorageError, x:
            self.logger.error('%s storage exception: %s',
                              crash_id,
                              str(x.exceptions),  # log internal error set
                              exc_info=True)
            raise
开发者ID:ajsb85,项目名称:socorro,代码行数:32,代码来源:wsgicollector.py



注:本文中的socorro.lib.datetimeutil.utc_now函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python external_common.parse_arguments函数代码示例发布时间:2022-05-27
下一篇:
Python datetimeutil.string_to_datetime函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap