aboutsummaryrefslogtreecommitdiff
path: root/analysis/links.go
blob: 2bcfff1f5c74cac7478ea3782d27d755eb48f790 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
// Extract links from HTML/CSS content.

package analysis

import (
	"fmt"
	"io/ioutil"
	"net/http"
	"regexp"
	"strings"

	"github.com/PuerkitoBio/goquery"

	"git.autistici.org/ale/crawl"
)

var (
	urlcssRx = regexp.MustCompile(`(?:@import|:).*url\(["']?([^'"\)]+)["']?\)`)

	linkMatches = []struct {
		tag     string
		attr    string
		linkTag int
	}{
		{"a", "href", crawl.TagPrimary},
		{"link", "href", crawl.TagRelated},
		{"img", "src", crawl.TagRelated},
		{"script", "src", crawl.TagRelated},
		{"iframe", "src", crawl.TagRelated},
	}
)

// The unparsed version of an Outlink.
type rawOutlink struct {
	URL string
	Tag int
}

// GetLinks returns all the links found in a document. Currently only
// parses HTML pages and CSS stylesheets.
func GetLinks(resp *http.Response) ([]crawl.Outlink, error) {
	// Parse outbound links relative to the request URI, and
	// return unique results.
	var result []crawl.Outlink
	links := make(map[string]crawl.Outlink)
	for _, l := range extractLinks(resp) {
		// Skip data: URLs altogether.
		if strings.HasPrefix(l.URL, "data:") {
			continue
		}
		if linkurl, err := resp.Request.URL.Parse(l.URL); err == nil {
			links[linkurl.String()] = crawl.Outlink{
				URL: linkurl,
				Tag: l.Tag,
			}
		}
	}
	for _, l := range links {
		result = append(result, l)
	}
	return result, nil
}

func extractLinks(resp *http.Response) []rawOutlink {
	ctype := resp.Header.Get("Content-Type")
	switch {
	case strings.HasPrefix(ctype, "text/html"):
		return extractLinksFromHTML(resp)
	case strings.HasPrefix(ctype, "text/css"):
		return extractLinksFromCSS(resp)
	default:
		return nil
	}
}

func extractLinksFromHTML(resp *http.Response) []rawOutlink {
	var outlinks []rawOutlink
	// Use goquery to extract links from the parsed HTML
	// contents (query patterns are described in the
	// linkMatches table).
	doc, err := goquery.NewDocumentFromReader(resp.Body)
	if err != nil {
		return nil
	}

	for _, lm := range linkMatches {
		doc.Find(fmt.Sprintf("%s[%s]", lm.tag, lm.attr)).Each(func(i int, s *goquery.Selection) {
			val, _ := s.Attr(lm.attr)
			outlinks = append(outlinks, rawOutlink{URL: val, Tag: lm.linkTag})
		})
	}
	return outlinks
}

func extractLinksFromCSS(resp *http.Response) []rawOutlink {
	// Use a simple (and actually quite bad) regular
	// expression to extract "url()" links from CSS.
	var outlinks []rawOutlink
	if data, err := ioutil.ReadAll(resp.Body); err == nil {
		for _, val := range urlcssRx.FindAllStringSubmatch(string(data), -1) {
			outlinks = append(outlinks, rawOutlink{URL: val[1], Tag: crawl.TagRelated})
		}
	}
	return outlinks
}