aboutsummaryrefslogtreecommitdiff
path: root/scrape.py
blob: c45166406ce24b341f3ef2c60d689ee6ca8b3cbf (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
#!/usr/bin/env python3
import csv
import time
import requests
from bs4 import BeautifulSoup

queue = {
    "cisco": "https://ipv6ready.org/db/index.php/public/search/?pc=2&vn=Cisco&o=4&do=1&lim=25&p=0",
    "all": "https://ipv6ready.org/db/index.php/public/search/?do=1&lim=25&p=0"
}
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; rv:109.0) Gecko/20100101 Firefox/115.0",
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
    "Accept-Language": "en-US,en;q=0.5",
    "Accept-Encoding": "gzip, deflate, br",
    "Connection": "keep-alive",
    "Upgrade-Insecure-Requests": "1",
    "Sec-Fetch-Dest": "document",
    "Sec-Fetch-Mode": "navigate",
    "Sec-Fetch-Site": "none",
    "Sec-Fetch-User": "?1",
    "Pragma": "no-cache",
    "Cache-Control": "no-cache"
}

for name, url in queue.items():
    page = requests.get(url, headers=headers, timeout=30)
    soup = BeautifulSoup(page.text, 'html.parser')
    table = soup.find('table', attrs={'class':'application_list'})

    # line breaks can be used in place of spaces to partition headers
    for br in soup.select("br"):
        br.replace_with(" ")

    data = []
    for row in table.find_all("tr"):
        data.append([ele.text.strip() for ele in row.find_all(["th", "td"])])

    data = list(filter(None, data))
    path = time.strftime(f"%Y-%m-%d-ipv6ready-{name}.csv")
    with open(path, "w", newline="", encoding="utf-8") as f:
        writer = csv.writer(f, lineterminator="\n")
        writer.writerows(data)