aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJordan <me@jordan.im>2024-03-19 17:08:06 -0700
committerJordan <me@jordan.im>2024-03-19 17:08:06 -0700
commitff5a1d79413a95e2afca180a8e2e8389656ee0a1 (patch)
treebb67008b96aa7950d9c565a033adc8dfbc8621ae
downloadipv6ready-master.tar.gz
ipv6ready-master.zip
initial commitHEADmaster
-rw-r--r--README5
-rwxr-xr-xscrape.py43
2 files changed, 48 insertions, 0 deletions
diff --git a/README b/README
new file mode 100644
index 0000000..816c3fa
--- /dev/null
+++ b/README
@@ -0,0 +1,5 @@
+ipv6ready-scrape is a minimal script that extracts IPv6Ready's approved products
+table and stores its contents in a CSV; retrieves "all" and "cisco" scoped
+searches by default
+
+https://www.ipv6ready.org/db/index.php/public/?o=4
diff --git a/scrape.py b/scrape.py
new file mode 100755
index 0000000..c451664
--- /dev/null
+++ b/scrape.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+import csv
+import time
+import requests
+from bs4 import BeautifulSoup
+
+queue = {
+ "cisco": "https://ipv6ready.org/db/index.php/public/search/?pc=2&vn=Cisco&o=4&do=1&lim=25&p=0",
+ "all": "https://ipv6ready.org/db/index.php/public/search/?do=1&lim=25&p=0"
+}
+headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; rv:109.0) Gecko/20100101 Firefox/115.0",
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
+ "Accept-Language": "en-US,en;q=0.5",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Connection": "keep-alive",
+ "Upgrade-Insecure-Requests": "1",
+ "Sec-Fetch-Dest": "document",
+ "Sec-Fetch-Mode": "navigate",
+ "Sec-Fetch-Site": "none",
+ "Sec-Fetch-User": "?1",
+ "Pragma": "no-cache",
+ "Cache-Control": "no-cache"
+}
+
+for name, url in queue.items():
+ page = requests.get(url, headers=headers, timeout=30)
+ soup = BeautifulSoup(page.text, 'html.parser')
+ table = soup.find('table', attrs={'class':'application_list'})
+
+ # line breaks can be used in place of spaces to partition headers
+ for br in soup.select("br"):
+ br.replace_with(" ")
+
+ data = []
+ for row in table.find_all("tr"):
+ data.append([ele.text.strip() for ele in row.find_all(["th", "td"])])
+
+ data = list(filter(None, data))
+ path = time.strftime(f"%Y-%m-%d-ipv6ready-{name}.csv")
+ with open(path, "w", newline="", encoding="utf-8") as f:
+ writer = csv.writer(f, lineterminator="\n")
+ writer.writerows(data)