本文整理汇总了Python中requests_futures.sessions.FuturesSession类的典型用法代码示例。如果您正苦于以下问题:Python FuturesSession类的具体用法?Python FuturesSession怎么用?Python FuturesSession使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了FuturesSession类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: bench_requests_futures_async
def bench_requests_futures_async(number_reqs, nb_worker):
# https://pypi.python.org/pypi/requests-futures
l=[]
start = datetime.datetime.now()
print('Start : ', start)
def bg_cb(sess, resp):
# resp.text
if resp.status_code != requests.codes.ok:
print(resp.status_code)
resp.raise_for_status()
#print(dir(resp))
l.append(1)
l_size = len(l)
print(l_size)
#print(len(response.body))
if l_size == number_reqs:
tornado.ioloop.IOLoop.instance().stop()
if datetime.datetime.now() - start == 60:
tornado.ioloop.IOLoop.instance().stop()
session = FuturesSession( max_workers=10 )
for elem in range(int(number_reqs/nb_worker)):
for e in range(nb_worker):
session.get(
"http://www.leboncoin.fr/",
background_callback = bg_cb
)
time.sleep(1)
print('[Rq TURFU] Done :', datetime.datetime.now() - start)
开发者ID:peondusud,项目名称:py.scrapy.lbc,代码行数:31,代码来源:lbc_bench.py
示例2: check_result
def check_result(request):
"""
This is NOT A VIEW.
Returns the job status after querying asynchronously. If finished, returns result.
"""
API_KEY = gimmeapikey(request)
jobid = request.session['jobid']
payload = {'apikey':API_KEY}
session = FuturesSession()
try:
future = session.post('https://api.havenondemand.com/1/job/status/'+jobid, data = payload)
r = future.result()
except Exception as e: # This is the correct syntax
return 0
# check if response if valid, else return error.
# r.content is a byte array. To cure that, decode utf-8 is used.
response = r.content.decode('utf-8')
json_data = json.loads(response)
if 'status' in json_data:
if json_data['status'] == 'finished':
request.session['extracted_text'] = json_data['actions'][0]['result']['document'][0]['content']
return json_data['status']
else:
return 0
开发者ID:zvovov,项目名称:totext,代码行数:29,代码来源:views.py
示例3: fetchReviews
def fetchReviews(unique_id):
s = FuturesSession()
# Hand shake proc. to figure out how many calls we send to server
api_format = 'https://watcha.net/comment/list?unique_id={unique_id}&start_index={start_index}&count=10&type=like'
handshake = api_format.format(unique_id=unique_id, start_index=str(0))
hs = s.get(handshake).result().content
json_hs = json.loads(hs)
total_count = int(json_hs['meta']['total_count'])
how_many_queries = total_count / 10 + 1
query_urls = [api_format.format(unique_id=unique_id, start_index=str(i * 10)) for i in xrange(0, how_many_queries, 1)]
reviews = [
{
'movie_title': r['movie_title'],
'rating': r['rating'],
'text': r['text'],
'updated_at': time.mktime(dateutil.parser.parse(r['updated_at']).timetuple()),
'comment_id': r['comment_id']
}
for qu in query_urls
for r in json.loads(s.get(qu).result().content)['data']
]
return reviews
开发者ID:dandelin,项目名称:watcha-doc2vec-regression,代码行数:25,代码来源:0_data.py
示例4: search
def search(self, q='', cat='', indexer='all', **kwargs):
self.logger.debug("Searching for %s category %s on indexer %s" % (q, cat, indexer))
if cat:
cat = '&cat=' + cat
sess = FuturesSession(max_workers=8)
job_list = []
if indexer == 'all':
for i in NewznabIndexers.select():
cmd = 'search&q=' + urllib2.quote(q.encode(encoding="UTF-8")) + cat + '&extended=1'
u = i.apiurl
u += cmd
u = u.replace('o=json', 'o=xml')
job_list.append(u)
else:
for i in NewznabIndexers.select():
if i.name == indexer:
cmd = 'search&q=' + urllib2.quote(q.encode(encoding="UTF-8")) + cat + '&extended=1'
u = i.apiurl
u += cmd
u = u.replace('o=json', 'o=xml')
job_list.append(u)
result = []
future = []
for url in job_list:
try:
self.logger.debug('Fetching search results from %s' % url)
t = sess.get(url, timeout=60, headers=self.headers)
except Exception as e:
self.logger.error('%s when fetching %s' % (e, url))
continue
future.append(t)
for future in cf.as_completed(future):
if future.exception() is not None:
self.logger.error('Failed to fetch results %s' % (future.exception()))
else:
f = []
res = future.result()
try:
provider_res = xmltodict.parse(res.content, attr_prefix='')
if provider_res:
if 'rss' in provider_res:
if 'channel' in provider_res['rss']:
if 'item' in provider_res['rss']['channel']:
f.append(provider_res['rss']['channel'])
if 'error' in provider_res:
self.logger.debug('%s %s' % (provider_res['rss']['channel']['title'], provider_res['error']['description']))
except Exception as e:
self.logger.error(res.url, e, exc_info=True)
result.append(f)
return result
开发者ID:scith,项目名称:htpc-manager_ynh,代码行数:60,代码来源:newznab.py
示例5: _chapter_pages
def _chapter_pages(self, soup, html):
# For webtoons, all pages are shown in a single page.
# When that's the case, there's this element that asks if you want to
# view page-by-page instead. Let's use this element to check if we're
# parsing a webtoon chapter.
webtoon = soup.find("a", href="?supress_webtoon=t")
if webtoon is not None:
img_tags = soup.find_all(_page_img_tag)
return [tag["src"] for tag in img_tags]
# a <select> tag has options that each points to a page
opts = soup.find("select", id="page_select").find_all("option")
urls = [opt["value"] for opt in opts]
# Page 1 has already been fetched (stored in this html param, duh!)
# so let's save ourselves an http request
pages_htmls = [html]
urls = urls[1:]
session = FuturesSession()
for order, url in enumerate(urls):
res = session.get(url).result()
if res.status_code != 200:
raise HtmlError("cannot fetch")
pages_htmls.append(res.content)
returns = []
for page_html in pages_htmls:
soup = BeautifulSoup(page_html)
img_url = soup.find("img", id="comic_page")["src"]
returns.append(img_url)
return returns
开发者ID:suryakencana,项目名称:niimanga,代码行数:32,代码来源:batoto.py
示例6: HttpClient
class HttpClient(ClientBase):
def __init__(self, host='localhost', port=8094, tags=None):
# only import HttpClient's dependencies if using HttpClient
# if they're not found, inform the user how to install them
try:
from requests_futures.sessions import FuturesSession
except ImportError:
raise ImportError('pytelegraf[http] must be installed to use HTTP transport')
super(HttpClient, self).__init__(host, port, tags)
# the default url path for writing metrics to Telegraf is /write
self.url = 'http://{host}:{port}/write'.format(host=self.host, port=self.port)
# create a session to reuse the TCP socket when possible
self.future_session = FuturesSession()
def send(self, data):
"""
Send the data in a separate thread via HTTP POST.
HTTP introduces some overhead, so to avoid blocking the main thread,
this issues the request in the background.
"""
self.future_session.post(url=self.url, data=data)
开发者ID:paksu,项目名称:pytelegraf,代码行数:25,代码来源:client.py
示例7: get_blocks
def get_blocks(*heights):
urls = [get_block_coinsecrets_url(h) for h in heights]
session = FuturesSession()
reqs = [session.get(url) for url in urls]
responses = [r.result() for r in reqs]
resps_json = [json.loads(r.content.decode()) for r in responses]
return resps_json
开发者ID:XertroV,项目名称:nvbtally,代码行数:7,代码来源:coinsecrets.py
示例8: get_frames
def get_frames(self, count):
"""Get a list of images from Environment Canada."""
soup = BeautifulSoup(requests.get(self.IMAGES_URL.format(self.station_code)).text, 'html.parser')
image_links = [tag['href'] for tag in soup.find_all('a') if '.gif' in tag['href']]
if len([i for i in image_links[:8] if 'COMP' in i]) > 4:
image_string = '_'.join([self.station_code, 'COMP_PRECIPET', self.get_precip_type() + '.gif'])
else:
image_string = '_'.join([self.station_code, 'PRECIPET', self.get_precip_type() + '.gif'])
images = [tag['href'] for tag in soup.find_all('a') if image_string in tag['href']]
futures = []
session = FuturesSession(max_workers=count)
for i in reversed(images[:count]):
url = self.FRAME_URL.format(self.station_code, i)
futures.append(session.get(url=url).result().content)
def add_layers(frame):
frame_bytesio = BytesIO()
base = Image.open(BytesIO(frame)).convert('RGBA')
base.alpha_composite(self.roads)
base.alpha_composite(self.cities)
base.save(frame_bytesio, 'GIF')
frame_bytesio.seek(0)
return frame_bytesio.read()
frames = [add_layers(f) for f in futures if f[0:3] == b'GIF']
"""Repeat last frame."""
for i in range(0, 2): # pylint: disable=unused-variable
frames.append(frames[count - 1])
return frames
开发者ID:michaeldavie,项目名称:env_canada,代码行数:35,代码来源:env_canada.py
示例9: _chapter_pages
def _chapter_pages(self, soup, html):
# a <select> tag has options that each points to a page
neighbour = soup.find('select', id='combobox').find_next_sibling('select')
opts = neighbour.find_all('option')
urls = [opt['value'] for opt in opts]
# Page 1 has already been fetched (stored in this html param, duh!)
# so let's save ourselves an http request
pages_htmls = [html]
urls = urls[1:]
session = FuturesSession()
for order, url in enumerate(urls):
uri = self.netlocs[2] + url
print(uri)
res = session.get(uri).result()
if res.status_code != 200:
raise HtmlError('cannot fetch')
pages_htmls.append(res.content)
returns = []
for page_html in pages_htmls:
soup = BeautifulSoup(page_html)
img_url = soup.find('img', id='mainImg')['src']
returns.append(img_url)
return returns
开发者ID:suryakencana,项目名称:niimanga,代码行数:27,代码来源:mangaeden.py
示例10: APNsClient
class APNsClient(object):
def __init__(self, cert_file, use_sandbox=False, use_alternative_port=False):
server = 'api.development.push.apple.com' if use_sandbox else 'api.push.apple.com'
port = 2197 if use_alternative_port else 443
self.cert = cert_file
self.server = "https://{}:{}".format(server, port)
self.__connection = FuturesSession()
self.__connection.mount('https://', HTTP20Adapter())
def send_notification(self, tokens, notification, priority=NotificationPriority.Immediate, topic=None):
# print(notification.dict())
json_payload = json.dumps(notification.dict(), ensure_ascii=False, separators=(',', ':')).encode('utf-8')
headers = {
'apns-priority': priority.value
}
if topic:
headers['apns-topic'] = topic
if not isinstance(tokens, list):
tokens = [tokens]
for token in tokens:
url = '{}/3/device/{}'.format(self.server, token)
self.__connection.post(url, json_payload, headers=headers, cert=self.cert, background_callback=req_callback)
开发者ID:yichengchen,项目名称:PyAPNs2,代码行数:25,代码来源:client.py
示例11: test_futures_session
def test_futures_session(self):
# basic futures get
sess = FuturesSession()
future = sess.get(httpbin('get'))
self.assertIsInstance(future, Future)
resp = future.result()
self.assertIsInstance(resp, Response)
self.assertEqual(200, resp.status_code)
# non-200, 404
future = sess.get(httpbin('status/404'))
resp = future.result()
self.assertEqual(404, resp.status_code)
def cb(s, r):
self.assertIsInstance(s, FuturesSession)
self.assertIsInstance(r, Response)
# add the parsed json data to the response
r.data = r.json()
future = sess.get(httpbin('get'), background_callback=cb)
# this should block until complete
resp = future.result()
self.assertEqual(200, resp.status_code)
# make sure the callback was invoked
self.assertTrue(hasattr(resp, 'data'))
def rasing_cb(s, r):
raise Exception('boom')
future = sess.get(httpbin('get'), background_callback=rasing_cb)
with self.assertRaises(Exception) as cm:
resp = future.result()
self.assertEqual('boom', cm.exception.args[0])
开发者ID:CoreyKumpOA,项目名称:vimconfig,代码行数:34,代码来源:test_requests_futures.py
示例12: get_usgs_nearby_cities
def get_usgs_nearby_cities(self, earthquake):
"""
performs request on local earthquake nearby cities url and returns the data
"""
try:
nearest_cities_object = earthquake[
"properties"]["products"]["nearby-cities"]
nearest_cities_url = nearest_cities_object[0][
"contents"]["nearby-cities.json"]["url"]
except:
nearest_cities_url = None
if nearest_cities_url:
session = FuturesSession(max_workers=1)
nearest_cities_response = session.get(
nearest_cities_url, headers=app.config["API_MANAGER_HEADERS"])
nearest_cities_details = nearest_cities_response.result().json()
list_of_nearby_cities = []
for item in nearest_cities_details:
city = NearestCity(
id=None,
distance=item["distance"],
direction=item["direction"],
name=item["name"],
latitude=item["latitude"],
longitude=item["longitude"],
population=item["population"],
earthquake_id=None
)
list_of_nearby_cities.append(city)
earthquake["properties"]["nearest_cities_url"] = nearest_cities_url
earthquake["properties"]["nearest_cities"] = list_of_nearby_cities
else:
earthquake["properties"]["nearest_cities_url"] = None
earthquake["properties"]["nearest_cities"] = []
return earthquake
开发者ID:SCPR,项目名称:calif-earthquakes,代码行数:35,代码来源:manage.py
示例13: add_list_new
def add_list_new() -> None:
requester = FuturesSession(executor=ProcessPoolExecutor(30), session=requests.session())
api_key = settings.TBA_API_HEADERS
team_list_get = lambda p: requester.get(team_by_page_url_template(page=p), headers=api_key)
team_participation_get = lambda tn: requester.get(team_participation_url_template(team=tn), headers=api_key)
page_range = get_page_range()
print("\nStarting %d HTTP requests for team lists, split between %d processes..." % (
page_range[1] - page_range[0], requester.executor._max_workers))
team_list_futures = [team_list_get(p) for p in range(*page_range)]
print("Waiting...")
wait(team_list_futures)
print("Done!\n")
teams_lists = map(lambda f: f.result().json(), team_list_futures)
teams_data = [item for page_data in teams_lists for item in page_data]
team_numbers = [*map(lambda t: t['team_number'], teams_data)]
print("Starting %d HTTP requests for team participation data, split between %d processes..." % (
len(team_numbers), requester.executor._max_workers))
team_participation_futures = [team_participation_get(tn) for tn in team_numbers]
print("Waiting...")
wait(team_participation_futures)
print("Done!\n")
team_participations = map(lambda f: f.result().json(), team_participation_futures)
arg_list = zip(team_numbers, teams_data, team_participations)
for args in arg_list:
add_team(*args)
开发者ID:FRC-RS,项目名称:FRS,代码行数:32,代码来源:addteams.py
示例14: crowdsource_undetected
def crowdsource_undetected(related_list, files_path, instructions, data_for):
# if no files found then return zero
if not os.listdir(files_path):
return 0
# Remove trailing slashes
files_path = os.path.normpath(files_path)
# Get an api crowd user
api_user = get_api_user()
crowd_user_id = 0
if api_user and 'Id' in api_user:
crowd_user_id = api_user['Id']
# get a crowd job
crowd_job_id = 0
if crowd_user_id > 0:
crowd_job_id = create_api_job(crowd_user_id, os.path.basename(files_path), instructions)
zip_path = None
if crowd_job_id > 0:
# save json object to json file
if related_list is not None and len(related_list) > 0:
sio = StringIO()
json.dump(related_list, sio)
with open(os.path.join(files_path,'%s.json'%data_for), "w") as fjson:
fjson.write(sio.getvalue())
# compress all files in files_path directory
zip_path = os.path.join(files_path, '%s.zip'%data_for)
buff = StringIO()
with zipfile.ZipFile(buff, 'w', zipfile.ZIP_DEFLATED) as zipf:
print 'zipping ' + zip_path
zipdir(files_path, zipf)
print 'zipped ' + zip_path
session = FuturesSession()
# api_uri = 'http://api.opescode.com/api/UserData?id=%s' %str(job_api_id)
api_uri = '{0}/api/UserData?id={1}'.format(service_base_uri, str(crowd_job_id))
logger.info('Calling web api {0} for {1}'.format(api_uri, zip_path))
def bg_cb(sess, resp):
print zip_path, resp.status_code
# if failed then save the files to the recording physical folder
if resp.status_code != 200:
print 'Post file {0} failed with stc={1}'.format(zip_path, str(resp.status_code))
# For now, I will not log this until I find a better way to pass logger to the callback method. Note: callback method has no access to self
logger.error('Post file {0} failed with stc={1}'.format(zip_path, str(resp.status_code)))
else:
logger.info('%s posted successfully'%zip_path)
try:
with open(zip_path, "wb") as f: # use `wb` mode
print 'saving zip ' + zip_path
f.write(buff.getvalue())
print 'zip saved ' + zip_path
if not archive_only:
print 'posting ' + zip_path
session.post(api_uri, files={"archive": buff.getvalue()}, background_callback=bg_cb)
print 'posted ' + zip_path
logger.info('posted %s and awaiting api response.'%zip_path)
except Exception as ex:
logger.error('Exception occured while calling web api.')
return crowd_job_id
开发者ID:mothman1,项目名称:pupil,代码行数:60,代码来源:offline_eyetracking.py
示例15: send_ga_event
def send_ga_event(event, user):
session = FuturesSession()
payload = {
'v': 1,
'tid': settings.GOOGLE_TRACKING_ID,
'uid': google_user_id(user),
't': 'event',
'ec': 'email',
'ea': event.event_type,
'cm': 'email',
}
if event.esp_event:
payload['ua'] = event.esp_event.get('user-agent')
payload['dt'] = event.esp_event.get('subject', [None])[0]
payload['cn'] = event.esp_event.get('campaign_name', None)
payload['cs'] = event.esp_event.get('campaign_source', None)
payload['cc'] = payload['el'] = event.esp_event.get(
'email_id', None)
payload['dp'] = "%s/%s" % (
payload['cc'], event.event_type)
else:
logger.warn("No ESP event found for event: %s" % event.__dict__)
logger.info("Sending mail event data Analytics: %s" % payload)
session.post(
'https://www.google-analytics.com/collect', data=payload)
开发者ID:ebmdatalab,项目名称:openprescribing,代码行数:25,代码来源:handlers.py
示例16: home
def home(request, album_key):
response = cache.get(album_key)
if response is None:
session = FuturesSession(max_workers=5)
session.auth = AlchemyApiAuth(settings.ALCHEMYAPI_KEY)
futures = []
source_item, reviews, comment_by_comment_key = get_rdio_comments(album_key)
for comment_key, comment_text in comment_by_comment_key.iteritems():
futures.append(start_request(session, comment_key, comment_text))
sentiment_by_comment_key = complete_requests(futures)
total_sentiment, per_item_sentiment = aggregate_sentiment(reviews, sentiment_by_comment_key)
response = {
'item': source_item,
'total_sentiment': total_sentiment,
'per_item_sentiment': per_item_sentiment,
'sentiment_by_comment_key': sentiment_by_comment_key,
'comment_by_comment_key': comment_by_comment_key,
}
response = json.dumps(response, indent=2)
cache.set(album_key, response)
return http.HttpResponse(response,
content_type='application/json')
开发者ID:dasevilla,项目名称:rdio-sentiment-analysis,代码行数:28,代码来源:views.py
示例17: requestPool
def requestPool(parameters, url):
"""
Generator that asynchronously processes profile requests and yields profile futures.
"""
session = FuturesSession(max_workers=10)
for parameter in parameters:
future = session.get(url, params=parameter)
yield future
开发者ID:Sandy4321,项目名称:nba-4,代码行数:8,代码来源:build_player_db.py
示例18: async_next
def async_next(self, list_url):
'''utility to dowload like async.io multiple url
and send them to extract_nexts
'''
session = FuturesSession(max_workers=5)
for url in list_url:
future = session.get(url)
future.add_done_callback(self.extract_nexts)
开发者ID:c24b,项目名称:rk_extract,代码行数:8,代码来源:rakuten.py
示例19: async_requests
def async_requests(locations, site=None):
session = FuturesSession()
check_date = datetime.now() + timedelta(hours=-4)
for location in locations:
gig = Gigs.select().where(Gigs.location.contains(location)).order_by(Gigs.datetime.desc()).first()
if (gig is None) or ((datetime.strptime(gig.datetime, '%Y-%m-%d %H:%M') < check_date)):
url = "https://{}.craigslist.org/search/{}/".format(location, (site or CRAIGSLIST_SITE))
future = session.get(url, background_callback=insert_callback)
开发者ID:carbide20,项目名称:GigFinder,代码行数:8,代码来源:scraper.py
示例20: send_single
def send_single(receiver, body):
self = agent_repo.get_byname('~self')
url = 'http://{0}:8888/incoming'.format(receiver.hostname)
session = FuturesSession()
response_future = session.post(url, data={'sender': self.hostname, 'content': body})
# wait for the response to come in
response_future.result()
开发者ID:onnovalkering,项目名称:sparql-over-sms,代码行数:9,代码来源:httptransfer.py
注:本文中的requests_futures.sessions.FuturesSession类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论