import urllib3
http = urllib3.PoolManager()
r = http.request('GET', 'https://www.baidu.com')
print(r.data)
生成PoolManager
实例
http = urllib3.PoolManager(num_pools=10)
# 默认10个ConnectionPool实例,当向不同主机发出请求时,增加此数量能提高性能。
http = urllib3.PoolManager(maxsize=10)
#当连接相同主机时,连接池初始化
http = urllib3.HTTPConnectionPool('baidu.com', maxsize=10)
http = urllib3.PoolManager(timeout = 5)
#分别设置连接超时与读超时
http = urllib3.PoolManager(urllib3.Timeout(connect = 1, read = 3))
proxy = urllib3.ProxyManager('http://localhost:3128/')
proxy.request('GET', 'https://www.baidu.com/')
发起请求
r = http.request('GET','http://www.baidu.com')
r = http.request(
'POST',
'http://httpbin.org/post',
fields={'hello': 'world'}
)
r = http.request(
'GET',
'http://httpbin.org/headers',
headers={
'X-Something': 'value'
})
with open('example.txt') as fp:
file_data = fp.read()
r = http.request(
'POST',
'http://httpbin.org/post',
fields={
'filefield': ('example.txt', file_data,'text/plain'),
})
http.requests('GET', 'http://httpbin.org/ip', retries=10) # retries = False 时不重试
处理结果
r = http.request('GET', 'https://www.baidu.com', preload_connect = False)
for chunk in r.stream(32):
print(chunk)
r.release_conn() # 当preload_connect等于False时,必须释放
print(r.status) #网页返回状态码,成功200
print(r.data) #网页返回数据
print(r.headers) #网页返回头
参考文档
http://urllib3.readthedocs.io/en/latest/user-guide.html