本文整理汇总了Python中redo.retry函数的典型用法代码示例。如果您正苦于以下问题:Python retry函数的具体用法?Python retry怎么用?Python retry使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了retry函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: add_chain
def add_chain(self):
from redo import retry
import requests
import pem
def get_chain():
r = requests.get(self.chain_url)
r.raise_for_status()
return r.text
chain = retry(get_chain)
req = {"chain": []}
chain = pem.parse(chain)
for i in range(len(chain)):
cert = crypto.load_certificate(crypto.FILETYPE_PEM, str(chain[i]))
der = crypto.dump_certificate(crypto.FILETYPE_ASN1, cert)
req["chain"].append(base64.b64encode(der))
def post_chain():
r = requests.post(self.log_url + '/ct/v1/add-chain', json=req)
r.raise_for_status()
return r.json()
resp = retry(post_chain)
sct = SignedCertificateTimestamp(resp)
self.write_to_file(self.sct_filename, sct.to_rfc6962())
开发者ID:luke-chang,项目名称:gecko-1,代码行数:27,代码来源:submit-to-ct.py
示例2: updateWorkloadStateTable
def updateWorkloadStateTable(self,action):
self.currentTime = str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
if (action == Orchestrator.ACTION_START) and (self.scalingProfile is not None):
UpdateExpressionAttr='SET Profile= :profile, LastActionTime= :currentTime, LastActionType= :actionType'
ExpressionAttributeValuesAttr={
':profile': self.scalingProfile,
':currentTime': self.currentTime,
':actionType': action
}
else:
UpdateExpressionAttr='SET LastActionTime= :currentTime, LastActionType= :actionType'
ExpressionAttributeValuesAttr={
':currentTime': self.currentTime,
':actionType': action,
}
try:
retry(self.WorkloadStateTable.update_item, attempts=5, sleeptime=0,jitter=0, kwargs= {
"Key":{
'Workload': self.partitionTargetValue,
},
"UpdateExpression":UpdateExpressionAttr,
"ExpressionAttributeValues":ExpressionAttributeValuesAttr
})
except Exception as e:
msg = 'Orchestrator::updateWorkloadStateTable() Exception encountered during DDB update %s -->' % e
logger.error(msg + str(e))
开发者ID:tangerinedream,项目名称:AWS_EC2_Scheduler,代码行数:30,代码来源:Orchestrator.py
示例3: refresh_antivirus
def refresh_antivirus(self):
self.info("Refreshing clamav db...")
try:
redo.retry(lambda: sh.freshclam("--stdout", "--verbose", _timeout=300, _err_to_out=True))
self.info("Done.")
except sh.ErrorReturnCode:
self.warning("Freshclam failed, skipping DB update")
开发者ID:subsevenx2001,项目名称:gecko-dev,代码行数:7,代码来源:beet_mover.py
示例4: testRetrySelectiveExceptionSucceed
def testRetrySelectiveExceptionSucceed(self):
retry(
_raiseCustomException,
attempts=2,
sleeptime=0,
jitter=0,
retry_exceptions=(NewError,),
)
开发者ID:bhearsum,项目名称:redo,代码行数:8,代码来源:test_retry.py
示例5: slavealloc_disable
def slavealloc_disable(self, machine, loanid):
try:
url = furl(current_app.config.get("SLAVEAPI_URL", None))
url.path.add(machine).add("actions").add("disable")
loan_bug = Loans.query.get(loanid).bug_id
postdata = dict(reason="Being loaned on slaveloan bug %s" % loan_bug)
retry(requests.post, args=(str(url),), kwargs=dict(data=postdata)).json()
return machine
except Exception as exc: # pylint: disable=W0703
logger.exception(exc)
self.retry(exc=exc)
开发者ID:catlee,项目名称:build-relengapi,代码行数:11,代码来源:tasks.py
示例6: _scan_files
def _scan_files(self):
"""Scan the files we've collected. We do the download and scan concurrently to make
it easier to have a coherent log afterwards. Uses the venv python."""
self.info("Refreshing clamav db...")
redo.retry(lambda:
sh.freshclam("--stdout", "--verbose", _timeout=300, _err_to_out=True))
self.info("Done.")
external_tools_path = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))), 'external_tools')
self.run_command([self.query_python_path(), os.path.join(external_tools_path,'extract_and_run_command.py'),
'-j{}'.format(self.config['scan_parallelization']),
'clamscan', '--no-summary', '--', self.dest_dir])
开发者ID:linclark,项目名称:gecko-dev,代码行数:12,代码来源:beet_mover.py
示例7: wait_for_started
def wait_for_started(self):
# late imports because we need the jenkins virtualenv to be activated
# (this is done in the constructor)
import redo
import requests
session = requests.Session()
def wait_for_jenkins():
if not session.get('http://localhost:8080').status_code == 200:
raise Exception('Jenkins did not start successfully.')
redo.retry(wait_for_jenkins, sleeptime=0.5, jitter=0, sleepscale=1,
attempts=120)
开发者ID:KaiRo-at,项目名称:mozmill-ci,代码行数:14,代码来源:run_tests.py
示例8: update_runnableapi
def update_runnableapi():
"""
Use it to update runnablejobs.json file.
"""
url = "https://index.taskcluster.net/v1/task/gecko.v2.%s.latest.firefox.decision/"
latest_task = retry(requests.get, args=(url % "mozilla-inbound", ),
kwargs={'headers': {'accept-encoding': 'json'}, 'verify': True}).json()
task_id = latest_task['taskId']
# The format of expires is like 2017-07-04T22:13:23.248Z and we only want 2017-07-04 part
expires = latest_task['expires'].split('T')[0]
time_tuple = datetime.datetime.strptime(expires, "%Y-%m-%d").timetuple()
new_timestamp = time.mktime(time_tuple)
path = ROOT_DIR + '/runablejobs.json'
# we do nothing if the timestamp of runablejobs.json is equal with the latest task
# otherwise we download and update it
if os.path.isfile(path):
with open(path, 'r+') as data:
# read the timesstamp of this task from json file
oldtime = json.loads(data.read())['meta']['timetamp']
if oldtime == new_timestamp:
print "The runnable json file is latest already."
return
else:
print "It's going to update your runnable jobs data."
download_runnable_jobs(new_timestamp, task_id)
else:
print "It's going to help you download the runnable jobs file."
download_runnable_jobs(new_timestamp, task_id)
开发者ID:jonallengriffin,项目名称:ouija,代码行数:30,代码来源:failures.py
示例9: query_push_by_revision
def query_push_by_revision(repo_url, revision, full=False, return_revision_list=False):
"""
Return a dictionary with meta-data about a push including:
* changesets
* date
* user
repo_url - represents the URL to clone a rep
revision - the revision used to set the query range
full - query whole information of a push if it's True
return_revision_list - return a list of revisions if it's True
"""
url = "%s?changeset=%s&tipsonly=1" % (JSON_PUSHES % {"repo_url": repo_url}, revision)
if full:
url += "&full=1"
LOG.debug("About to fetch %s" % url)
req = retry(requests.get, args=(url,))
data = req.json()
assert len(data) == 1, "We should only have information about one push"
if not full:
LOG.debug("Push info: %s" % str(data))
push_id, push_info = data.popitem()
push = Push(push_id=push_id, push_info=push_info)
else:
LOG.debug("Requesting the info with full=1 can yield too much unnecessary output " "to debug anything properly")
if return_revision_list:
return push.changesets[0].node
return push
开发者ID:sethfowler,项目名称:dot-config,代码行数:30,代码来源:pushlog_client.py
示例10: query_pushes_by_pushid_range
def query_pushes_by_pushid_range(repo_url, start_id, end_id, version=VERSION, return_revision_list=False):
"""
Return an ordered list of pushes (oldest first).
repo_url - represents the URL to clone a repo
start_id - from which pushid to start with (oldest)
end_id - from which pushid to end with (most recent)
version - version of json-pushes to use (see docs)
return_revision_list - return a list of revisions if it's True
"""
push_list = []
url = "%s?startID=%s&endID=%s&version=%s&tipsonly=1" % (
JSON_PUSHES % {"repo_url": repo_url},
start_id - 1, # off by one to compensate for pushlog as it skips start_id
end_id,
version,
)
LOG.debug("About to fetch %s" % url)
req = retry(requests.get, args=(url,))
pushes = req.json()["pushes"]
for push_id in sorted(pushes.keys()):
# Querying by push ID is preferred because date ordering is
# not guaranteed (due to system clock skew)
# We can interact with self-serve with the 12 char representation
push_list.append(Push(push_id=push_id, push_info=pushes[push_id]))
if return_revision_list:
return _pushes_to_list(push_list)
return push_list
开发者ID:sethfowler,项目名称:dot-config,代码行数:30,代码来源:pushlog_client.py
示例11: worker
def worker(item):
source, destination = item
def copy_key():
source_key = bucket.get_key(source)
dest_key = bucket.get_key(destination)
# According to http://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html
# S3 key MD5 is represented as ETag, except when objects are
# uploaded using multipart method. In this case objects's ETag
# is constructed using its MD5, minus symbol, and number of
# part. See http://stackoverflow.com/questions/12186993/what-is-the-algorithm-to-compute-the-amazon-s3-etag-for-a-file-larger-than-5gb#answer-19896823
source_md5 = source_key.etag.split("-")[0]
if dest_key:
dest_md5 = dest_key.etag.split("-")[0]
else:
dest_md5 = None
if not dest_key:
self.info("Copying {} to {}".format(source, destination))
bucket.copy_key(destination, self.config["bucket_name"],
source)
elif source_md5 == dest_md5:
self.warning(
"{} already exists with the same content ({}), skipping copy".format(
destination, dest_md5))
else:
self.fatal(
"{} already exists with the different content (src ETag: {}, dest ETag: {}), aborting".format(
destination, source_key.etag, dest_key.etag))
return retry(copy_key, sleeptime=5, max_sleeptime=60,
retry_exceptions=(S3CopyError, S3ResponseError))
开发者ID:MichaelKohler,项目名称:gecko-dev,代码行数:32,代码来源:push-candidate-to-releases.py
示例12: order
def order(self, asset, amount, style, order_id=None):
log.debug('ordering {} {}'.format(amount, asset.symbol))
if amount == 0:
log.warn('skipping 0 amount orders')
return None
if self.simulate_orders:
return super(ExchangeBlotter, self).order(
asset, amount, style, order_id
)
else:
order = retry(
action=self.exchange_order,
attempts=self.attempts['order_attempts'],
sleeptime=self.attempts['retry_sleeptime'],
retry_exceptions=(ExchangeRequestError,),
cleanup=lambda: log.warn('Ordering again.'),
args=(asset, amount, style),
)
self.open_orders[order.asset].append(order)
self.orders[order.id] = order
self.new_orders.append(order)
return order.id
开发者ID:zhoukalex,项目名称:catalyst,代码行数:26,代码来源:exchange_blotter.py
示例13: get_changed_files
def get_changed_files(repository, revision):
"""
Get the set of files changed in the push headed by the given revision.
Responses are cached, so multiple calls with the same arguments are OK.
"""
key = repository, revision
if key not in _cache:
url = '%s/json-automationrelevance/%s' % (repository.rstrip('/'), revision)
logger.debug("Querying version control for metadata: %s", url)
def get_automationrelevance():
response = requests.get(url, timeout=5)
return response.json()
contents = retry(get_automationrelevance, attempts=2, sleeptime=10)
logger.debug('{} commits influencing task scheduling:'
.format(len(contents['changesets'])))
changed_files = set()
for c in contents['changesets']:
logger.debug(" {cset} {desc}".format(
cset=c['node'][0:12],
desc=c['desc'].splitlines()[0].encode('ascii', 'ignore')))
changed_files |= set(c['files'])
_cache[key] = changed_files
return _cache[key]
开发者ID:MichaelKohler,项目名称:gecko-dev,代码行数:26,代码来源:files_changed.py
示例14: _get_product_uptake
def _get_product_uptake(self, tuxedo_server_url, auth,
related_product, os):
from redo import retry
import requests
url = get_tuxedo_uptake_url(tuxedo_server_url, related_product, os)
self.info("Requesting {} from tuxedo".format(url))
def get_tuxedo_page():
r = requests.get(url, auth=auth,
verify=False, timeout=60)
r.raise_for_status()
return r.content
def calculateUptake(page):
doc = xml.dom.minidom.parseString(page)
uptake_values = []
for element in doc.getElementsByTagName('available'):
for node in element.childNodes:
if node.nodeType == xml.dom.minidom.Node.TEXT_NODE and \
node.data.isdigit():
uptake_values.append(int(node.data))
if not uptake_values:
uptake_values = [0]
return min(uptake_values)
page = retry(get_tuxedo_page)
uptake = calculateUptake(page)
self.info("Current uptake for {} is {}".format(related_product, uptake))
return uptake
开发者ID:Wafflespeanut,项目名称:gecko-dev,代码行数:31,代码来源:uptake_monitoring.py
示例15: download_runnable_jobs
def download_runnable_jobs(new_timestamp, task_id=None):
if task_id:
url = TREEHERDER_HOST.format('mozilla-inbound', task_id)
data = retry(requests.get, args=(url, ), kwargs={'headers': headers}).json()
if len(data['results']) > 0:
data['meta'].update({'timetamp': new_timestamp})
with open(ROOT_DIR + '/runnablejobs.json', 'w') as f:
json.dump(data, f)
开发者ID:jonallengriffin,项目名称:ouija,代码行数:8,代码来源:failures.py
示例16: worker
def worker(item):
source, destination = item
self.info("Downloading {} to {}".format(source, destination))
key = bucket.get_key(source)
return retry(key.get_contents_to_filename,
args=(destination, ),
sleeptime=5, max_sleeptime=60,
retry_exceptions=(S3CopyError, S3ResponseError))
开发者ID:paulmadore,项目名称:luckyde,代码行数:9,代码来源:antivirus.py
示例17: start_disable_slave
def start_disable_slave(self, machine, loanid):
try:
url = furl(current_app.config.get("SLAVEAPI_URL", None))
url.path.add(machine).add("actions").add("shutdown_buildslave")
ret = retry(requests.post, args=(str(url),), ).json()
return (ret["requestid"], machine)
except Exception as exc:
logger.exception(exc)
self.retry(exc=exc)
开发者ID:catlee,项目名称:build-relengapi,代码行数:9,代码来源:tasks.py
示例18: _update_bouncer_alias
def _update_bouncer_alias(self, tuxedo_server_url, auth,
related_product, alias):
from redo import retry
import requests
url = "%s/create_update_alias" % tuxedo_server_url
data = {"alias": alias, "related_product": related_product}
self.log("Updating {} to point to {} using {}".format(alias,
related_product,
url))
# Wrap the real call to hide credentials from retry's logging
def do_update_bouncer_alias():
r = requests.post(url, data=data, auth=auth,
verify=False, timeout=60)
r.raise_for_status()
retry(do_update_bouncer_alias)
开发者ID:MekliCZ,项目名称:positron,代码行数:18,代码来源:postrelease_bouncer_aliases.py
示例19: query_low_value_tasks
def query_low_value_tasks(self, project, bbb=False):
# Request the set of low value tasks from the SETA service. Low value tasks will be
# optimized out of the task graph.
low_value_tasks = []
if not bbb:
# we want to get low priority taskcluster jobs
url = SETA_ENDPOINT % (project, 'taskcluster')
else:
# we want low priority buildbot jobs
url = SETA_ENDPOINT % (project, 'buildbot&priority=5')
# Try to fetch the SETA data twice, falling back to an empty list of low value tasks.
# There are 10 seconds between each try.
try:
logger.debug("Retrieving low-value jobs list from SETA")
response = retry(requests.get, attempts=2, sleeptime=10,
args=(url, ),
kwargs={'timeout': 60, 'headers': ''})
task_list = json.loads(response.content).get('jobtypes', '')
if type(task_list) == dict and len(task_list) > 0:
if type(task_list.values()[0]) == list and len(task_list.values()[0]) > 0:
low_value_tasks = task_list.values()[0]
# bb job types return a list instead of a single string,
# convert to a single string to match tc tasks format
if type(low_value_tasks[0]) == list:
low_value_tasks = [self._get_task_string(x) for x in low_value_tasks]
# ensure no build tasks slipped in, we never want to optimize out those
low_value_tasks = [x for x in low_value_tasks if 'build' not in x.lower()]
# In the event of request times out, requests will raise a TimeoutError.
except exceptions.Timeout:
logger.warning("SETA timeout, we will treat all test tasks as high value.")
# In the event of a network problem (e.g. DNS failure, refused connection, etc),
# requests will raise a ConnectionError.
except exceptions.ConnectionError:
logger.warning("SETA connection error, we will treat all test tasks as high value.")
# In the event of the rare invalid HTTP response(e.g 404, 401),
# requests will raise an HTTPError exception
except exceptions.HTTPError:
logger.warning("We got bad Http response from ouija,"
" we will treat all test tasks as high value.")
# We just print the error out as a debug message if we failed to catch the exception above
except exceptions.RequestException as error:
logger.warning(error)
# When we get invalid JSON (i.e. 500 error), it results in a ValueError (bug 1313426)
except ValueError as error:
logger.warning("Invalid JSON, possible server error: {}".format(error))
return low_value_tasks
开发者ID:luke-chang,项目名称:gecko-1,代码行数:56,代码来源:seta.py
示例20: format_in_table
def format_in_table(active_jobs, master):
results = {}
sum_removed = 0
sum_remaining = 0
data = retry(requests.get, args=('http://alertmanager.allizom.org/data/jobnames/', ),
kwargs={'headers': headers,
'verify': True}).json()
running_jobs = data['results']
for jobtype in active_jobs:
key = "%s_%s" % (jobtype[0], jobtype[1])
if key not in results:
results[key] = []
for item in master:
if item[0] == jobtype[0] and item[1] == jobtype[1]:
results[key].append(item[2])
keys = results.keys()
keys.sort()
missing_jobs = []
for key in keys:
data = results[key]
data.sort()
output = ""
for platform, buildtype, test in active_jobs:
if "%s_%s" % (platform, buildtype) != key:
continue
output += '\t'
if test in data or '' in data:
found = False
for job in running_jobs:
if job['name'] == test:
output += job['job_type_symbol']
found = True
break
if not found:
output += '**'
missing_jobs.append(test)
sum_removed += 1
else:
output += "--"
sum_remaining += 1
print "%s%s" % (key, output)
if missing_jobs:
print "** new jobs which need a code: %s" % ','.join(missing_jobs)
print "Total removed %s" % (sum_removed)
print "Total remaining %s" % (sum_remaining)
print "Total jobs %s" % (sum_removed + sum_remaining)
开发者ID:jonallengriffin,项目名称:ouija,代码行数:56,代码来源:failures.py
注:本文中的redo.retry函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论