aboutsummaryrefslogtreecommitdiff
path: root/cmd/crawl/crawl.go
blob: ea88412afcdd02645789d0e7491f8e3aeb925581 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
// A restartable crawler that dumps everything to a WARC file.

package main

import (
	"bufio"
	"bytes"
	"errors"
	"flag"
	"fmt"
	"io"
	"io/ioutil"
	"log"
	"net"
	"net/http"
	"os"
	"os/signal"
	"regexp"
	"runtime/pprof"
	"strconv"
	"strings"
	"sync"
	"sync/atomic"
	"syscall"
	"time"

	"git.jordan.im/crawl"
	"git.jordan.im/crawl/analysis"
	"git.jordan.im/crawl/warc"
)

var (
	dbPath         = flag.String("state", "crawldb", "crawl state database path")
	keepDb         = flag.Bool("keep", false, "keep the state database when done")
	concurrency    = flag.Int("c", 10, "concurrent workers")
	depth          = flag.Int("depth", -1, "maximum link depth")
	validSchemes   = flag.String("schemes", "http,https", "comma-separated list of allowed protocols")
	excludeRelated = flag.Bool("exclude-related", false, "do not include related resources (css, images, etc) if their URL is not in scope")
	resumeDir      = flag.String("resume", "", "path to directory of previous crawl to resume")
	warcFileSizeMB = flag.Int("output-max-size", 100, "maximum output WARC file size (in MB) when using patterns")
	cpuprofile     = flag.String("cpuprofile", "", "create cpu profile")
	bindIP         = flag.String("bind", "", "IP address from which to make outbound connections")

	dnsMap   = dnsMapFlag(make(map[string]string))
	excludes []*regexp.Regexp

	httpClient = crawl.DefaultClient
)

func init() {
	flag.Var(&excludesFlag{}, "exclude", "exclude regex URL patterns")
	flag.Var(&excludesFileFlag{}, "exclude-from-file", "load exclude regex URL patterns from a file")
	flag.Var(dnsMap, "resolve", "set DNS overrides (in hostname=addr format)")

	stats = &crawlStats{
		states: make(map[int]int),
		start:  time.Now(),
	}

	go func() {
		for range time.Tick(10 * time.Second) {
			stats.Dump()
		}
	}()
}

type excludesFlag struct{}

func (f *excludesFlag) String() string { return "" }

func (f *excludesFlag) Set(s string) error {
	rx, err := regexp.Compile(s)
	if err != nil {
		return err
	}
	excludes = append(excludes, rx)
	return nil
}

type excludesFileFlag struct{}

func (f *excludesFileFlag) String() string { return "" }

func (f *excludesFileFlag) Set(s string) error {
	ff, err := os.Open(s) // #nosec
	if err != nil {
		return err
	}
	defer ff.Close() // nolint
	var lineNum int
	scanner := bufio.NewScanner(ff)
	for scanner.Scan() {
		lineNum++
		rx, err := regexp.Compile(scanner.Text())
		if err != nil {
			return fmt.Errorf("%s, line %d: %v", s, lineNum, err)
		}
		excludes = append(excludes, rx)
	}
	return nil
}

type dnsMapFlag map[string]string

func (f dnsMapFlag) String() string { return "" }

func (f dnsMapFlag) Set(s string) error {
	parts := strings.Split(s, "=")
	if len(parts) != 2 {
		return errors.New("value not in host=addr format")
	}
	f[parts[0]] = parts[1]
	return nil
}

func extractLinks(p crawl.Publisher, u string, depth int, resp *http.Response, _ error) error {
	links, err := analysis.GetLinks(resp)
	if err != nil {
		// This is not a fatal error, just a bad web page.
		return nil
	}

	for _, link := range links {
		if err := p.Enqueue(link, depth+1); err != nil {
			return err
		}
	}

	return nil
}

func hdr2str(h http.Header) []byte {
	var b bytes.Buffer
	h.Write(&b) // nolint
	return b.Bytes()
}

type warcSaveHandler struct {
	warc       *warc.Writer
	warcInfoID string
	numWritten int
}

func (h *warcSaveHandler) writeWARCRecord(typ, uri string, data []byte) error {
	hdr := warc.NewHeader()
	hdr.Set("WARC-Type", typ)
	hdr.Set("WARC-Target-URI", uri)
	hdr.Set("WARC-Warcinfo-ID", h.warcInfoID)
	hdr.Set("Content-Length", strconv.Itoa(len(data)))

	w, err := h.warc.NewRecord(hdr)
	if err != nil {
		return err
	}
	if _, err := w.Write(data); err != nil {
		return err
	}
	return w.Close()
}

func (h *warcSaveHandler) Handle(p crawl.Publisher, u string, tag, depth int, resp *http.Response, _ error) error {
	// Read the response body (so we can save it to the WARC
	// output) and replace it with a buffer.
	data, derr := ioutil.ReadAll(resp.Body)
	if derr != nil {
		// Errors at this stage are usually transport-level errors,
		// and as such, retriable.
		return crawl.ErrRetryRequest
	}
	resp.Body = ioutil.NopCloser(bytes.NewReader(data))

	// Dump the request to the WARC output.
	var b bytes.Buffer
	if werr := resp.Request.Write(&b); werr != nil {
		return werr
	}
	if werr := h.writeWARCRecord("request", resp.Request.URL.String(), b.Bytes()); werr != nil {
		return werr
	}

	// Dump the response.
	statusLine := fmt.Sprintf("HTTP/1.1 %s", resp.Status)
	respPayload := bytes.Join(
		[][]byte{[]byte(statusLine), hdr2str(resp.Header), data},
		[]byte{'\r', '\n'},
	)
	if werr := h.writeWARCRecord("response", resp.Request.URL.String(), respPayload); werr != nil {
		return werr
	}

	h.numWritten++

	return extractLinks(p, u, depth, resp, nil)
}

func newWarcSaveHandler(w *warc.Writer) (crawl.Handler, error) {
	info := strings.Join([]string{
		"Software: crawl/1.0\r\n",
		"Format: WARC File Format 1.0\r\n",
		"Conformsto: http://bibnum.bnf.fr/WARC/WARC_ISO_28500_version1_latestdraft.pdf\r\n",
	}, "")

	hdr := warc.NewHeader()
	hdr.Set("WARC-Type", "warcinfo")
	hdr.Set("WARC-Warcinfo-ID", hdr.Get("WARC-Record-ID"))
	hdr.Set("Content-Length", strconv.Itoa(len(info)))
	hdrw, err := w.NewRecord(hdr)
	if err != nil {
		return nil, err
	}
	if _, err := io.WriteString(hdrw, info); err != nil {
		return nil, err
	}
	hdrw.Close() // nolint
	return &warcSaveHandler{
		warc:       w,
		warcInfoID: hdr.Get("WARC-Record-ID"),
	}, nil
}

type crawlStats struct {
	urls  int64
	bytes int64
	start time.Time

	lock   sync.Mutex
	states map[int]int
}

func (c *crawlStats) Update(resp *http.Response) {
	c.lock.Lock()
	defer c.lock.Unlock()

	c.urls++
	c.states[resp.StatusCode]++
	resp.Body = &byteCounter{resp.Body}
}

func (c *crawlStats) UpdateBytes(n int64) {
	atomic.AddInt64(&c.bytes, n)
}

func (c *crawlStats) Dump() {
	c.lock.Lock()
	defer c.lock.Unlock()
	rate := float64(c.bytes) / time.Since(c.start).Seconds() / 1000
	log.Printf("stats: downloaded %d urls, %d bytes (%.4g KB/s), status: %v", c.urls, c.bytes, rate, c.states) // nolint
}

var stats *crawlStats

func fetch(urlstr string) (*http.Response, error) {
	req, _ := http.NewRequest("GET", urlstr, nil)
	req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0")
	resp, err := httpClient.Do(req)
	if err == nil {
		stats.Update(resp)
	}
	return resp, err
}

type byteCounter struct {
	io.ReadCloser
}

func (b *byteCounter) Read(buf []byte) (int, error) {
	n, err := b.ReadCloser.Read(buf)
	if n > 0 {
		stats.UpdateBytes(int64(n))
	}
	return n, err
}

func warcWriterFromFlags(host string) (w *warc.Writer, err error) {
	w, err = warc.NewMultiWriter(host+"-%s.warc.gz", uint64(*warcFileSizeMB)*1024*1024)
	return
}

func main() {
	log.SetFlags(0)
	flag.Parse()

	if *cpuprofile != "" {
		f, err := os.Create(*cpuprofile)
		if err != nil {
			log.Fatal("could not create CPU profile: ", err)
		}
		if err := pprof.StartCPUProfile(f); err != nil {
			log.Fatal("could not start CPU profile: ", err)
		}
		defer pprof.StopCPUProfile()
	}

	seeds := crawl.MustParseURLs(flag.Args())
	if len(seeds) == 0 {
		log.Fatal("no seed URL provided")
	}
	scope := crawl.AND(
		crawl.NewSchemeScope(strings.Split(*validSchemes, ",")),
		crawl.NewDepthScope(*depth),
		crawl.NewSeedScope(seeds),
		crawl.NewRegexpIgnoreScope(excludes),
	)
	if !*excludeRelated {
		scope = crawl.AND(crawl.OR(scope, crawl.NewIncludeRelatedScope()), crawl.NewRegexpIgnoreScope(excludes))
	}

	// Use first URL's host in crawl directory/file names
	host := seeds[0].Host
	if *resumeDir != "" {
		err := os.Chdir(*resumeDir)
		if err != nil {
			log.Fatal(err)
		}
	} else {
		shortForm := "2006-01-02"
		d := host+"-"+time.Now().Format(shortForm)
		if _, err := os.Stat(d); os.IsNotExist(err) {
			err := os.Mkdir(d, 0700)
			if err != nil {
				log.Fatal(err)
			}
		}
		err := os.Chdir(d)
		if err != nil {
			log.Fatal(err)
		}
	}

	w, err := warcWriterFromFlags(host)
	if err != nil {
		log.Fatal(err)
	}
	defer w.Close() // nolint

	saver, err := newWarcSaveHandler(w)
	if err != nil {
		log.Fatal(err)
	}

	httpClient = crawl.NewHTTPClientWithDNSOverride(dnsMap)

	if *bindIP != "" {
		addr, err := net.ResolveIPAddr("ip", *bindIP)
		if err != nil {
			log.Fatal(err)
		}
		httpClient = crawl.NewHTTPClientWithLocalAddrOverride(addr)
	}

	crawler, err := crawl.NewCrawler(
		*dbPath,
		seeds,
		scope,
		crawl.FetcherFunc(fetch),
		crawl.HandleRetries(crawl.FollowRedirects(crawl.FilterErrors(saver))),
	)
	if err != nil {
		log.Fatal(err)
	}

	// Set up signal handlers so we can terminate gently if possible.
	var signaled atomic.Value
	signaled.Store(false)
	sigCh := make(chan os.Signal, 1)
	go func() {
		<-sigCh
		log.Printf("exiting due to signal")
		signaled.Store(true)
		crawler.Stop()
	}()
	signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)

	crawler.Run(*concurrency)
	crawler.Close()

	if signaled.Load().(bool) {
		os.Exit(1)
	}
	if !*keepDb {
		os.RemoveAll(*dbPath) // nolint
	}
}