-
Notifications
You must be signed in to change notification settings - Fork 3.3k
/
Copy pathasync_webcrawler_multiple_urls_example.py
55 lines (46 loc) · 1.72 KB
/
async_webcrawler_multiple_urls_example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
# File: async_webcrawler_multiple_urls_example.py
import os, sys
# append 2 parent directories to sys.path to import crawl4ai
parent_dir = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
)
sys.path.append(parent_dir)
import asyncio
from crawl4ai import AsyncWebCrawler
async def main():
# Initialize the AsyncWebCrawler
async with AsyncWebCrawler(verbose=True) as crawler:
# List of URLs to crawl
urls = [
"https://example.com",
"https://python.org",
"https://github.com",
"https://stackoverflow.com",
"https://news.ycombinator.com",
]
# Set up crawling parameters
word_count_threshold = 100
# Run the crawling process for multiple URLs
results = await crawler.arun_many(
urls=urls,
word_count_threshold=word_count_threshold,
bypass_cache=True,
verbose=True,
)
# Process the results
for result in results:
if result.success:
print(f"Successfully crawled: {result.url}")
print(f"Title: {result.metadata.get('title', 'N/A')}")
print(f"Word count: {len(result.markdown.split())}")
print(
f"Number of links: {len(result.links.get('internal', [])) + len(result.links.get('external', []))}"
)
print(f"Number of images: {len(result.media.get('images', []))}")
print("---")
else:
print(f"Failed to crawl: {result.url}")
print(f"Error: {result.error_message}")
print("---")
if __name__ == "__main__":
asyncio.run(main())