1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
|
// A restartable crawler that extracts links from HTML pages and
// simply prints them.
//
package main
import (
"flag"
"log"
"net/http"
"strings"
"git.jordan.im/crawl"
"git.jordan.im/crawl/analysis"
)
var (
concurrency = flag.Int("c", 10, "concurrent workers")
depth = flag.Int("depth", 10, "maximum link depth")
validSchemes = flag.String("schemes", "http,https", "comma-separated list of allowed protocols")
)
func extractLinks(p crawl.Publisher, u string, tag, depth int, resp *http.Response, _ error) error {
links, err := analysis.GetLinks(resp)
if err != nil {
// Not a fatal error, just a bad web page.
return nil
}
for _, link := range links {
if err := p.Enqueue(link, depth+1); err != nil {
return err
}
}
return nil
}
func main() {
flag.Parse()
seeds := crawl.MustParseURLs(flag.Args())
scope := crawl.AND(
crawl.NewSchemeScope(strings.Split(*validSchemes, ",")),
crawl.NewDepthScope(*depth),
crawl.NewSeedScope(seeds),
)
crawler, err := crawl.NewCrawler(
"crawldb",
seeds,
scope,
crawl.FetcherFunc(http.Get),
crawl.HandleRetries(crawl.FollowRedirects(crawl.FilterErrors(crawl.HandlerFunc(extractLinks)))),
)
if err != nil {
log.Fatal(err)
}
crawler.Run(*concurrency)
crawler.Close()
}
|