aboutsummaryrefslogtreecommitdiff
path: root/cmd/links/links.go
blob: eb97577ea370f782366ac275d9e852b4033eb7e7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
// A restartable crawler that extracts links from HTML pages and
// simply prints them.
//

package main

import (
	"flag"
	"log"
	"net/http"
	"strings"

	"git.autistici.org/ale/crawl"
	"git.autistici.org/ale/crawl/analysis"
)

var (
	dbPath       = flag.String("state", "crawldb", "crawl state database path")
	concurrency  = flag.Int("c", 10, "concurrent workers")
	depth        = flag.Int("depth", 10, "maximum link depth")
	validSchemes = flag.String("schemes", "http,https", "comma-separated list of allowed protocols")
)

func extractLinks(c *crawl.Crawler, u string, depth int, resp *http.Response, err error) error {
	links, err := analysis.GetLinks(resp)
	if err != nil {
		return err
	}

	for _, link := range links {
		c.Enqueue(link, depth+1)
	}

	return nil
}

func main() {
	flag.Parse()

	seeds := crawl.MustParseURLs(flag.Args())
	scope := []crawl.Scope{
		crawl.NewSchemeScope(strings.Split(*validSchemes, ",")),
		crawl.NewDepthScope(*depth),
		crawl.NewSeedScope(seeds),
	}

	crawler, err := crawl.NewCrawler("crawldb", seeds, scope, crawl.FetcherFunc(http.Get), crawl.NewRedirectHandler(crawl.HandlerFunc(extractLinks)))
	if err != nil {
		log.Fatal(err)
	}
	crawler.Run(*concurrency)
}