怎样使用aiohttp请求多个url

这是aiohttp官方请求单个url的方式,如果请求多个url并获取内容应该怎么做?

import aiohttp
import asyncio

async def fetch(session, url):
    async with session.get(url) as response:
        return await response.text()

async def main():
    async with aiohttp.ClientSession() as session:
        html = await fetch(session, 'http://python.org')
        print(html)

loop = asyncio.get_event_loop()
loop.run_until_complete(main())
阅读 6.1k
2 个回答
urls = ['http://baidu.com', 'http://qq.com']
async def get(url):
    async with aiohttp.ClientSession() as session:
        html = await fetch(session, url)
        print(html)
tasks = [get(x) for x in urls]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*tasks))
新手上路,请多包涵

import asyncio
import aiohttp
import time

from my_spider import urls

async def async_craw(url):

async with aiohttp.ClientSession() as session:
    async with session.get(url) as resp:
        res = await resp.json()
        print(f'craw_url: {url},', res.get('message'))

超级循环

loop = asyncio.get_event_loop()

tasks = [

loop.create_task(async_craw(url))
for url in urls

]

start_time = time.time()

等待任务完成

loop.run_until_complete(asyncio.wait(tasks))
end_time = time.time()
print(f'耗费时间:{end_time - start_time}')

撰写回答
你尚未登录,登录后可以
  • 和开发者交流问题的细节
  • 关注并接收问题和回答的更新提醒
  • 参与内容的编辑和改进,让解决方法与时俱进