本文整理汇总了Python中requesocks.session函数的典型用法代码示例。如果您正苦于以下问题:Python session函数的具体用法?Python session怎么用?Python session使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了session函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: get_page
def get_page(url, port, timeout, max_retries=1):
if max_retries == 1:
session = requesocks.session()
elif max_retries > 1:
session = requesocks.session(config={'max_retries': 10})
else:
raise
session.proxies = {'http': 'socks5://127.0.0.1:{}'.format(port),
'https': 'socks5://127.0.0.1:{}'.format(port)}
return session.get(url, timeout=timeout)
开发者ID:fowlslegs,项目名称:wfpadtools,代码行数:10,代码来源:netutil.py
示例2: runtest
def runtest():
if target.tor is None and target.proxy is None and target.socks is None:
try:
request = requests.get(target.host)
checkparam(request)
except requests.ConnectionError as error_message:
print "============================"
print "HTTP connection failed with status message:"
print error_message
print "============================"
elif target.tor is not None and target.proxy is None and target.socks is None:
try:
session = requesocks.session()
session.proxies = {
"socks5": "socks5://127.0.0.1:9050"
}
request = session.get(target.host, auth=('user','pass'))
checkparam(request)
except requesocks.exceptions.ConnectionError as error_message:
print "============================"
print "HTTP connection failed with status message:"
print error_message
print "============================"
elif target.proxy is not None and target.tor is None and target.socks is None:
proxy = {
"http": "http://"+target.proxy,
"https": "https://"+target.proxy
}
try:
request = requests.get(target.host, proxies=proxy)
checkparam(request)
except requests.ConnectionError as error_message:
print "============================"
print "HTTP connection failed with status message:"
print error_message
print "============================"
elif target.socks is not None and target.proxy is None and target.tor is None:
try:
session = requesocks.session()
session.proxies = {
"socks4": "socks4://"+target.socks,
"socks5": "socks5://"+target.socks
}
request = session.get(target.host, auth=('user','pass'))
checkparam(request)
except requesocks.exceptions.ConnectionError as error_message:
print "============================"
print "HTTP connection failed with status message:"
print error_message
print "============================"
开发者ID:weldan,项目名称:httptester,代码行数:53,代码来源:httptester.py
示例3: getUrl
def getUrl(animelist):
s = requesocks.session()
s.proxies = {'http':'socks5://127.0.0.1:1080'}
urllist = []
for item in animelist:
res = s.get('http://www.dilidili.com/anime/'+item).content
soup = BeautifulSoup(res, "lxml")
try:
u = soup.find('div', 'download area').a['href']
except:
continue
if re.search('pan.baidu', u):
urllist.append(u)
print "Get Url:"+u
continue
res = s.get(u).content
soup = BeautifulSoup(res, "lxml")
for it in soup('a', href=True):
if re.search('pan.baidu', it['href']):
urllist.append(it['href'])
print "Get url:"+it['href']
break
for item in urllist:
os.system('echo %s >> urllist.txt' % item)
return urllist
开发者ID:Sunkworld,项目名称:Trash,代码行数:25,代码来源:saveToDisk.py
示例4: __init__
def __init__(self):
self.cookies = {}
self.browser = requesocks.session()
self.browser.cookies.clear()
self.browser.cookies = self.cookies
self.sock5 = None
self.proxy = None
开发者ID:yeungocanh,项目名称:MultiUpload,代码行数:7,代码来源:mybrowser.py
示例5: get_metadata
def get_metadata(self):
if self.script_alias:
archive_url = '{0}/{1}/perfsonar/archive/'.format(self.api_url, self.script_alias)
else:
archive_url = '{0}/perfsonar/archive/'.format(self.api_url)
session = requesocks.session()
if os.getenv('SOCKS5'):
session.proxies = {'http': os.getenv('SOCKS5'), 'https': os.getenv('SOCKS5')}
session.verify=False
r = session.get(archive_url,
params=dict(self.filters.metadata_filters, **self.filters.time_filters),
headers = self.request_headers)
self.inspect_request(r)
if r.status_code == 200 and \
r.headers['content-type'] == 'application/json':
data = json.loads(r.text)
for i in data:
yield Metadata(i, self.api_url, self.filters)
else:
self.http_alert(r)
return
yield
开发者ID:marian-babik,项目名称:rsv-perfsonar,代码行数:25,代码来源:SocksApiConnect.py
示例6: main
def main(stochastic=False, resolve_classes=False):
session = requesocks.session()
session.proxies = {'http':'54.183.147.66:3128',
'https':'54.183.147.66:3128'}
resolver = ClassResolver(session, resolve_classes)
timeschedules = TimeSchedules(session)
print('loaded')
for index, term in enumerate(timeschedules):
print(term)
has_entries = False
# ignore with probability 1 - (1 / index)
if random.random() * index >= 1 and stochastic:
continue
dataset = TimeSchedulesDataset()
for dept in term:
print(dept)
dataset.add(dept.get_entries())
has_entries = True
if has_entries:
base = os.path.dirname(os.path.realpath(__file__))
dataset.notify_changes('/var/www/canigraduate/data/timeschedules/%s.json' % str(term))
dataset.write_timeschedules('/var/www/canigraduate/data/timeschedules/%s.json' % str(term))
dataset.write_timeschedules_compressed('/var/www/canigraduate/data/timeschedules-compressed/%s.json' % str(term))
dataset.write_timeschedules_data_compressed('/var/www/canigraduate/data/timeschedules-compressed/%s.data.json' % str(term))
if resolve_classes:
for id in dataset.data.keys():
resolver.add(id, term, dataset.names[id])
print('resolving classes')
if resolve_classes:
resolver.write(resolve_classes)
开发者ID:architek360,项目名称:canigraduate.uchicago.edu,代码行数:30,代码来源:get_schedules.py
示例7: getip_requesocks
def getip_requesocks(url):
print "(+) Sending request with requesocks..."
session = requesocks.session()
session.proxies = {'http': 'socks5://127.0.0.1:9050',
'https': 'socks5://127.0.0.1:9050'}
r = session.get(url)
print "(+) IP is: " + r.text.replace("\n", "")
开发者ID:gogogo365,项目名称:Anti-Anti-Spider,代码行数:7,代码来源:requesocks.py
示例8: login
def login(self):
self.s = requests.session()
#self.s.proxies = {'http': 'http://127.0.0.1:8087','https': 'http://127.0.0.1:8087'}
soup = BeautifulSoup(self.get_source(self.login_url))
authenticity_token = soup.find_all(attrs={"name": "authenticity_token"})[0]['value']
utf8 = soup.find_all(attrs={"name": "utf8"})[0]['value']
data = {
'utf8':utf8,
'session[login]':self.username,
'session[password]':self.password,
'authenticity_token' : authenticity_token
}
self.s.post(soup.select('.sign-in__form')[0]['action'], data = data)
soup = BeautifulSoup(self.get_source('https://tutsplus.com/account/courses'))
account_name = soup.select('.account-header__name')[0].string
if not account_name :
return False
print 'Logined success, account name: '+account_name
return True
开发者ID:PizzaLiu,项目名称:tutsplus-downloader,代码行数:25,代码来源:Tutsplus.py
示例9: get_by_rnp
def get_by_rnp(number, TIMEOUT):
payload = {
#"action": "enviar",
"txtRuc": "",
"txtRnp": str(number),
"cmbCapitulo": "",
"cmbTipoPersona": "",
}
kargs = {
"data": payload,
"headers": _headers,
"timeout": TIMEOUT,
}
url = "http://www.osce.gob.pe/consultasenlinea/rnp_consulta/ProveedoresInscritos.asp?action=enviar"
tor_req = req_socks.session()
tor_req.proxies = _tor_proxies
try:
r = tor_req.post(url, **kargs)
name = extract_name(r.text)
if name is not None:
with codecs.open("out_osce.tsv", "a") as myfile:
myfile.write(number.encode("utf-8") + "\t")
myfile.write(name.encode("utf-8") + "\n")
except req_socks.exceptions.Timeout:
with codecs.open("out_osce.tsv", "a") as myfile:
out = "Timeout error %s" % number.encode("utf-8")
myfile.write(out + "\n")
except socket.timeout:
with codecs.open("out_osce.tsv", "a") as myfile:
out = "Timeout error %s" % number.encode("utf-8")
myfile.write(out + "\n")
开发者ID:roher123,项目名称:scripts,代码行数:33,代码来源:query_osce.py
示例10: __init__
def __init__(self, args):
super(HttpScan, self).__init__(args)
self.session = requesocks.session()
adapters.DEFAULT_RETRIES = self.args.max_retries
self.tor = None
if self.args.tor:
self.out.log("Enabling TOR")
self.tor = Torify()
self.session.proxies = {'http': 'socks5://127.0.0.1:9050',
'https': 'socks5://127.0.0.1:9050'}
if self.args.check_tor:
# Check TOR
self.out.log("Checking IP via TOR")
rip, tip = self.tor.check_ip(verbose=True)
if tip is None:
self.out.log('TOR is not working properly!', logging.ERROR)
exit(-1)
if self.args.cookies is not None:
if path.exists(self.args.cookies) and path.isfile(self.args.cookies):
self.cookies = MozillaCookieJar(self.args.cookies)
self.cookies.load()
else:
# self.out.log('Could not find cookie file: %s' % self.args.load_cookies, logging.ERROR)
self.cookies = Cookies.from_request(self.args.cookies)
else:
self.cookies = None
self.ua = UserAgent() if self.args.user_agent is None else self.args.user_agent
开发者ID:0x90,项目名称:futurescan,代码行数:30,代码来源:scan.py
示例11: downloadNewImages
def downloadNewImages(bucket, new_dog_img_dict, d_id):
try:
dfile = new_dog_img_dict[d_id]
fname= d_id +'.jpg'
if not bucket.get_key(fname):
session = requesocks.session()
session.proxies = {'http': 'socks5://127.0.0.1:9050','https': 'socks5://127.0.0.1:9050'}
file_object = bucket.new_key(fname)
r = session.get(dfile)
if r.status_code == 200:
with open(fname, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
file_object.set_contents_from_filename('./'+fname,policy='public-read')
os.remove(fname)
else:
client = MongoClient()
db = client.pet
err_coll = db.errs
err_coll.insert_one({'id':d_id, 'err':r.status_code})
except:
client = MongoClient()
db = client.pet
err_coll = db.errs
e = sys.exc_info()[1]
err_coll.insert_one({'id':record['id'], 'err':str(e)})
开发者ID:atruji,项目名称:yournextdog,代码行数:27,代码来源:PooledDownloader.py
示例12: downloadNewImages
def downloadNewImages():
tor_process = stem.process.launch_tor_with_config(config = {'SocksPort': str(self.socks_port)})
self.success = []
for x in self.new_dog_img_dict.keys():
try:
dfile = new_dog_img_dict[x]
fname= x +'.jpg'
if not self.bucket.get_key(fname):
session = requesocks.session()
session.proxies = {'http': 'socks5://127.0.0.1:9050','https': 'socks5://127.0.0.1:9050'}
file_object = self.bucket.new_key(fname)
r = session.get(dfile)
if r.status_code == 200:
with open(fname, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
file_object.set_contents_from_filename('./'+fname,policy='public-read')
self.success.append(x)
os.remove(fname)
else:
self.err_coll.insert_one({'id':record['id'], 'err':r.status_code})
except:
e = sys.exc_info()[1]
self.err_coll.insert_one({'id':record['id'], 'err':str(e)})
开发者ID:atruji,项目名称:yournextdog,代码行数:25,代码来源:UpdateDB.py
示例13: __init__
def __init__(self, cookies_dict: dict=None):
self.xnova_url = 'uni4.xnova.su'
self.user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:48.0) Gecko/20100101 Firefox/48.0'
self.error_str = None
self.proxy = None
# load user-agent from config/net.ini
cfg = configparser.ConfigParser()
cfg.read('config/net.ini', encoding='utf-8')
if 'net' in cfg:
self.user_agent = cfg['net']['user_agent']
self.xnova_url = cfg['net']['xnova_url']
self.proxy = cfg['net']['proxy']
if self.proxy == '':
self.proxy = None
# construct requests HTTP session
if self.proxy is not None:
if self.proxy.startswith('socks5://'):
# for SOCKS5 proxy create requesocks session
self.sess = requesocks.session()
logger.info('Using SOCKS5 proxy session (requesocks)')
else:
self.sess = requests.Session() # else normal session
if self.proxy is not None:
self.sess.proxies = {'http': self.proxy, 'https': self.proxy}
logger.info('Set HTTP/HTTPS proxy to: {0}'.format(self.proxy))
# Some default headers for a page downloader
self.sess.headers.update({'User-Agent': self.user_agent})
self.sess.headers.update({'Referer': 'https://{0}/'.format(self.xnova_url)})
self.sess.headers.update({'Accept': '*/*'})
self.sess.headers.update({'Accept-Language': 'ru-RU,ru;q=0.8,en-US;q=0.6,en;q=0.4'})
self.sess.headers.update({'Accept-Encoding': 'gzip, deflate'})
if cookies_dict:
self.set_cookies_from_dict(cookies_dict)
开发者ID:minlexx,项目名称:xnova_galaxy_parser,代码行数:35,代码来源:xn_page_dnl.py
示例14: get_http_client
def get_http_client():
if config['use_tor_proxy']:
session = requesocks.session()
session.proxies = {'http': 'socks5://127.0.0.1:%d' % config['tor_proxy_port'],
'https': 'socks5://127.0.0.1:%d' % config['tor_proxy_port']}
return session
else:
return requests.session()
开发者ID:nladuo,项目名称:taobao_bra_crawler,代码行数:8,代码来源:utils.py
示例15: doGet
def doGet(*args, **kwargs):
url = args[0]
doVhosts = kwargs.pop('vhosts' ,None)
urlQueue = kwargs.pop('urlQueue' ,None)
subs = kwargs.pop('subs' ,None)
extraHosts = kwargs.pop('extraHosts',None)
proxy = kwargs.pop('proxy',None)
kwargs['allow_redirects'] = False
session = requests.session()
if(proxy is not None):
session.proxies={'http':'socks5://'+proxy,'https':'socks5://'+proxy}
resp = session.get(url[0],**kwargs)
#If we have an https URL and we are configured to scrape hosts from the cert...
if(url[0].find('https') != -1 and url[1] == True):
#Pull hostnames from cert, add as additional URLs and flag as not to pull certs
host = urlparse(url[0]).hostname
port = urlparse(url[0]).port
if(port is None):
port = 443
names = []
try:
cert = ssl.get_server_certificate((host,port),ssl_version=ssl.PROTOCOL_SSLv23)
x509 = M2Crypto.X509.load_cert_string(cert.decode('string_escape'))
subjText = x509.get_subject().as_text()
names = re.findall("CN=([^\s]+)",subjText)
altNames = x509.get_ext('subjectAltName').get_value()
names.extend(re.findall("DNS:([^,]*)",altNames))
except:
pass
for name in names:
if(name.find('*.') != -1):
for sub in subs:
try:
sub = sub.strip()
hostname = name.replace('*.',sub+'.')
if(hostname not in extraHosts):
extraHosts[hostname] = 1
address = socket.gethostbyname(hostname)
urlQueue.put(['https://'+hostname+':'+str(port),False,url[2]])
print '[+] Discovered subdomain '+address
except:
pass
name = name.replace('*.','')
if(name not in extraHosts):
extraHosts[name] = 1
urlQueue.put(['https://'+name+':'+str(port),False,url[2]])
print '[+] Added host '+name
else:
if (name not in extraHosts):
extraHosts[name] = 1
urlQueue.put(['https://'+name+':'+str(port),False,url[2]])
print '[+] Added host '+name
return resp
else:
return resp
开发者ID:awhitehatter,项目名称:httpscreenshot,代码行数:58,代码来源:httpscreenshot.py
示例16: doGet
def doGet(*args, **kwargs):
url = args[0]
doVhosts = kwargs.pop("vhosts", None)
urlQueue = kwargs.pop("urlQueue", None)
subs = kwargs.pop("subs", None)
extraHosts = kwargs.pop("extraHosts", None)
proxy = kwargs.pop("proxy", None)
kwargs["allow_redirects"] = False
session = requests.session()
if proxy is not None:
session.proxies = {"http": "socks5://" + proxy, "https": "socks5://" + proxy}
resp = session.get(url[0], **kwargs)
# If we have an https URL and we are configured to scrape hosts from the cert...
if url[0].find("https") != -1 and url[1] == True:
# Pull hostnames from cert, add as additional URLs and flag as not to pull certs
host = urlparse(url[0]).hostname
port = urlparse(url[0]).port
if port is None:
port = 443
names = []
try:
cert = ssl.get_server_certificate((host, port), ssl_version=ssl.PROTOCOL_SSLv23)
x509 = M2Crypto.X509.load_cert_string(cert.decode("string_escape"))
subjText = x509.get_subject().as_text()
names = re.findall("CN=([^\s]+)", subjText)
altNames = x509.get_ext("subjectAltName").get_value()
names.extend(re.findall("DNS:([^,]*)", altNames))
except:
pass
for name in names:
if name.find("*.") != -1:
for sub in subs:
try:
sub = sub.strip()
hostname = name.replace("*.", sub + ".")
if hostname not in extraHosts:
extraHosts[hostname] = 1
address = socket.gethostbyname(hostname)
urlQueue.put(["https://" + hostname + ":" + str(port), False, url[2]])
print "[+] Discovered subdomain " + address
except:
pass
name = name.replace("*.", "")
if name not in extraHosts:
extraHosts[name] = 1
urlQueue.put(["https://" + name + ":" + str(port), False, url[2]])
print "[+] Added host " + name
else:
if name not in extraHosts:
extraHosts[name] = 1
urlQueue.put(["https://" + name + ":" + str(port), False, url[2]])
print "[+] Added host " + name
return resp
else:
return resp
开发者ID:olasec,项目名称:httpscreenshot,代码行数:58,代码来源:httpscreenshot.py
示例17: init_tor_session
def init_tor_session():
global session
if not session:
session = requesocks.session()
session.proxies = {
'http': 'socks5://127.0.0.1:9050',
'https': 'socks5://127.0.0.1:9050'
}
session.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2062.120 Safari/537.36'
开发者ID:nguyenhoaibao,项目名称:crawler,代码行数:9,代码来源:request_url.py
示例18: __init__
def __init__(self):
self.cookies = {}
self.browser = requesocks.session()
self.browser.cookies.clear()
self.browser.cookies = self.cookies
self.header = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36'
self.sock5 = False
self.proxy = False
self.link_host = ''
self.link_origin = ''
开发者ID:yeungocanh,项目名称:python3-tool,代码行数:10,代码来源:mybrowser.py
示例19: reset_socks
def reset_socks():
if USE_SOCKS_PROXY:
import os
os.system("sudo killall -HUP tor") # force tor to get a new IP
if DEBUG:
import requesocks as requests
session = requests.session()
session.proxies = {'http': 'socks5://127.0.0.1:9050', 'https': 'socks5://127.0.0.1:9050'}
resp = session.get('http://ipv4bot.whatismyipaddress.com')
print(resp.text)
开发者ID:stratosmacker,项目名称:clscraper,代码行数:10,代码来源:settings.py
示例20: whoami
def whoami(self):
"""
This is a check to make sure the proxy is working.
"""
IPCHICKEN = "http://www.ipchicken.com"
session = requesocks.session()
session.proxies = self.session_proxies
response = session.get(IPCHICKEN, headers=self.request_params)
soup = BS(response.content, 'html.parser')
return soup.find_all('p')[1].find('b').next.replace(' ', '').replace('\n', '')
开发者ID:MichaelAHood,项目名称:real_estate_recommender,代码行数:11,代码来源:web_scraping.py
注:本文中的requesocks.session函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论