aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/internal
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/internal')
-rw-r--r--src/cmd/internal/dwarf/dwarf.go128
-rw-r--r--src/cmd/internal/goobj/objfile.go2
-rw-r--r--src/cmd/internal/obj/arm64/a.out.go7
-rw-r--r--src/cmd/internal/obj/arm64/asm7.go21
-rw-r--r--src/cmd/internal/obj/dwarf.go4
-rw-r--r--src/cmd/internal/obj/objfile.go3
-rw-r--r--src/cmd/internal/obj/ppc64/a.out.go10
-rw-r--r--src/cmd/internal/obj/ppc64/asm9.go85
-rw-r--r--src/cmd/internal/obj/ppc64/asm_test.go41
-rw-r--r--src/cmd/internal/obj/ppc64/obj9.go63
-rw-r--r--src/cmd/internal/obj/riscv/anames.go2
-rw-r--r--src/cmd/internal/obj/riscv/cpu.go2
-rw-r--r--src/cmd/internal/obj/riscv/obj.go18
-rw-r--r--src/cmd/internal/obj/util.go5
14 files changed, 281 insertions, 110 deletions
diff --git a/src/cmd/internal/dwarf/dwarf.go b/src/cmd/internal/dwarf/dwarf.go
index 4e163db020..be37641706 100644
--- a/src/cmd/internal/dwarf/dwarf.go
+++ b/src/cmd/internal/dwarf/dwarf.go
@@ -50,6 +50,7 @@ type Var struct {
Abbrev int // Either DW_ABRV_AUTO[_LOCLIST] or DW_ABRV_PARAM[_LOCLIST]
IsReturnValue bool
IsInlFormal bool
+ DictIndex uint16 // index of the dictionary entry describing the type of this variable
StackOffset int32
// This package can't use the ssa package, so it can't mention ssa.FuncDebug,
// so indirect through a closure.
@@ -97,6 +98,8 @@ type FnState struct {
Scopes []Scope
InlCalls InlCalls
UseBASEntries bool
+
+ dictIndexToOffset []int64
}
func EnableLogging(doit bool) {
@@ -315,6 +318,7 @@ const (
DW_AT_go_runtime_type = 0x2904
DW_AT_go_package_name = 0x2905 // Attribute for DW_TAG_compile_unit
+ DW_AT_go_dict_index = 0x2906 // Attribute for DW_TAG_typedef_type, index of the dictionary entry describing the real type of this type shape
DW_AT_internal_location = 253 // params and locals; not emitted
)
@@ -325,8 +329,10 @@ const (
DW_ABRV_COMPUNIT
DW_ABRV_COMPUNIT_TEXTLESS
DW_ABRV_FUNCTION
+ DW_ABRV_WRAPPER
DW_ABRV_FUNCTION_ABSTRACT
DW_ABRV_FUNCTION_CONCRETE
+ DW_ABRV_WRAPPER_CONCRETE
DW_ABRV_INLINED_SUBROUTINE
DW_ABRV_INLINED_SUBROUTINE_RANGES
DW_ABRV_VARIABLE
@@ -360,6 +366,7 @@ const (
DW_ABRV_STRINGTYPE
DW_ABRV_STRUCTTYPE
DW_ABRV_TYPEDECL
+ DW_ABRV_DICT_INDEX
DW_NABRV
)
@@ -455,6 +462,19 @@ var abbrevs = [DW_NABRV]dwAbbrev{
},
},
+ /* WRAPPER */
+ {
+ DW_TAG_subprogram,
+ DW_CHILDREN_yes,
+ []dwAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_low_pc, DW_FORM_addr},
+ {DW_AT_high_pc, DW_FORM_addr},
+ {DW_AT_frame_base, DW_FORM_block1},
+ {DW_AT_trampoline, DW_FORM_flag},
+ },
+ },
+
/* FUNCTION_ABSTRACT */
{
DW_TAG_subprogram,
@@ -478,6 +498,19 @@ var abbrevs = [DW_NABRV]dwAbbrev{
},
},
+ /* WRAPPER_CONCRETE */
+ {
+ DW_TAG_subprogram,
+ DW_CHILDREN_yes,
+ []dwAttrForm{
+ {DW_AT_abstract_origin, DW_FORM_ref_addr},
+ {DW_AT_low_pc, DW_FORM_addr},
+ {DW_AT_high_pc, DW_FORM_addr},
+ {DW_AT_frame_base, DW_FORM_block1},
+ {DW_AT_trampoline, DW_FORM_flag},
+ },
+ },
+
/* INLINED_SUBROUTINE */
{
DW_TAG_inlined_subroutine,
@@ -854,6 +887,17 @@ var abbrevs = [DW_NABRV]dwAbbrev{
{DW_AT_type, DW_FORM_ref_addr},
},
},
+
+ /* DICT_INDEX */
+ {
+ DW_TAG_typedef,
+ DW_CHILDREN_no,
+ []dwAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_type, DW_FORM_ref_addr},
+ {DW_AT_go_dict_index, DW_FORM_udata},
+ },
+ },
}
// GetAbbrev returns the contents of the .debug_abbrev section.
@@ -1168,6 +1212,9 @@ func putPrunedScopes(ctxt Context, s *FnState, fnabbrev int) error {
sort.Sort(byChildIndex(pruned.Vars))
scopes[k] = pruned
}
+
+ s.dictIndexToOffset = putparamtypes(ctxt, s, scopes, fnabbrev)
+
var encbuf [20]byte
if putscope(ctxt, s, scopes, 0, fnabbrev, encbuf[:0]) < int32(len(scopes)) {
return errors.New("multiple toplevel scopes")
@@ -1329,11 +1376,14 @@ func putInlinedFunc(ctxt Context, s *FnState, callIdx int) error {
// for the function (which holds location-independent attributes such
// as name, type), then the remainder of the attributes are specific
// to this instance (location, frame base, etc).
-func PutConcreteFunc(ctxt Context, s *FnState) error {
+func PutConcreteFunc(ctxt Context, s *FnState, isWrapper bool) error {
if logDwarf {
ctxt.Logf("PutConcreteFunc(%v)\n", s.Info)
}
abbrev := DW_ABRV_FUNCTION_CONCRETE
+ if isWrapper {
+ abbrev = DW_ABRV_WRAPPER_CONCRETE
+ }
Uleb128put(ctxt, s.Info, int64(abbrev))
// Abstract origin.
@@ -1346,6 +1396,10 @@ func PutConcreteFunc(ctxt Context, s *FnState) error {
// cfa / frame base
putattr(ctxt, s.Info, abbrev, DW_FORM_block1, DW_CLS_BLOCK, 1, []byte{DW_OP_call_frame_cfa})
+ if isWrapper {
+ putattr(ctxt, s.Info, abbrev, DW_FORM_flag, DW_CLS_FLAG, int64(1), 0)
+ }
+
// Scopes
if err := putPrunedScopes(ctxt, s, abbrev); err != nil {
return err
@@ -1368,11 +1422,14 @@ func PutConcreteFunc(ctxt Context, s *FnState) error {
// when its containing package was compiled (hence there is no need to
// emit an abstract version for it to use as a base for inlined
// routine records).
-func PutDefaultFunc(ctxt Context, s *FnState) error {
+func PutDefaultFunc(ctxt Context, s *FnState, isWrapper bool) error {
if logDwarf {
ctxt.Logf("PutDefaultFunc(%v)\n", s.Info)
}
abbrev := DW_ABRV_FUNCTION
+ if isWrapper {
+ abbrev = DW_ABRV_WRAPPER
+ }
Uleb128put(ctxt, s.Info, int64(abbrev))
// Expand '"".' to import path.
@@ -1385,13 +1442,16 @@ func PutDefaultFunc(ctxt Context, s *FnState) error {
putattr(ctxt, s.Info, abbrev, DW_FORM_addr, DW_CLS_ADDRESS, 0, s.StartPC)
putattr(ctxt, s.Info, abbrev, DW_FORM_addr, DW_CLS_ADDRESS, s.Size, s.StartPC)
putattr(ctxt, s.Info, abbrev, DW_FORM_block1, DW_CLS_BLOCK, 1, []byte{DW_OP_call_frame_cfa})
- ctxt.AddFileRef(s.Info, s.Filesym)
-
- var ev int64
- if s.External {
- ev = 1
+ if isWrapper {
+ putattr(ctxt, s.Info, abbrev, DW_FORM_flag, DW_CLS_FLAG, int64(1), 0)
+ } else {
+ ctxt.AddFileRef(s.Info, s.Filesym)
+ var ev int64
+ if s.External {
+ ev = 1
+ }
+ putattr(ctxt, s.Info, abbrev, DW_FORM_flag, DW_CLS_FLAG, ev, 0)
}
- putattr(ctxt, s.Info, abbrev, DW_FORM_flag, DW_CLS_FLAG, ev, 0)
// Scopes
if err := putPrunedScopes(ctxt, s, abbrev); err != nil {
@@ -1410,6 +1470,47 @@ func PutDefaultFunc(ctxt Context, s *FnState) error {
return nil
}
+// putparamtypes writes typedef DIEs for any parametric types that are used by this function.
+func putparamtypes(ctxt Context, s *FnState, scopes []Scope, fnabbrev int) []int64 {
+ if fnabbrev == DW_ABRV_FUNCTION_CONCRETE {
+ return nil
+ }
+
+ maxDictIndex := uint16(0)
+
+ for i := range scopes {
+ for _, v := range scopes[i].Vars {
+ if v.DictIndex > maxDictIndex {
+ maxDictIndex = v.DictIndex
+ }
+ }
+ }
+
+ if maxDictIndex == 0 {
+ return nil
+ }
+
+ dictIndexToOffset := make([]int64, maxDictIndex)
+
+ for i := range scopes {
+ for _, v := range scopes[i].Vars {
+ if v.DictIndex == 0 || dictIndexToOffset[v.DictIndex-1] != 0 {
+ continue
+ }
+
+ dictIndexToOffset[v.DictIndex-1] = ctxt.CurrentOffset(s.Info)
+
+ Uleb128put(ctxt, s.Info, int64(DW_ABRV_DICT_INDEX))
+ n := fmt.Sprintf(".param%d", v.DictIndex-1)
+ putattr(ctxt, s.Info, DW_ABRV_DICT_INDEX, DW_FORM_string, DW_CLS_STRING, int64(len(n)), n)
+ putattr(ctxt, s.Info, DW_ABRV_DICT_INDEX, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, v.Type)
+ putattr(ctxt, s.Info, DW_ABRV_DICT_INDEX, DW_FORM_udata, DW_CLS_CONSTANT, int64(v.DictIndex-1), nil)
+ }
+ }
+
+ return dictIndexToOffset
+}
+
func putscope(ctxt Context, s *FnState, scopes []Scope, curscope int32, fnabbrev int, encbuf []byte) int32 {
if logDwarf {
@@ -1489,10 +1590,10 @@ func determineVarAbbrev(v *Var, fnabbrev int) (int, bool, bool) {
// Determine whether to use a concrete variable or regular variable DIE.
concrete := true
switch fnabbrev {
- case DW_ABRV_FUNCTION:
+ case DW_ABRV_FUNCTION, DW_ABRV_WRAPPER:
concrete = false
break
- case DW_ABRV_FUNCTION_CONCRETE:
+ case DW_ABRV_FUNCTION_CONCRETE, DW_ABRV_WRAPPER_CONCRETE:
// If we're emitting a concrete subprogram DIE and the variable
// in question is not part of the corresponding abstract function DIE,
// then use the default (non-concrete) abbrev for this param.
@@ -1583,7 +1684,12 @@ func putvar(ctxt Context, s *FnState, v *Var, absfn Sym, fnabbrev, inlIndex int,
putattr(ctxt, s.Info, abbrev, DW_FORM_flag, DW_CLS_FLAG, isReturn, nil)
}
putattr(ctxt, s.Info, abbrev, DW_FORM_udata, DW_CLS_CONSTANT, int64(v.DeclLine), nil)
- putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, v.Type)
+ if v.DictIndex > 0 && s.dictIndexToOffset != nil && s.dictIndexToOffset[v.DictIndex-1] != 0 {
+ // If the type of this variable is parametric use the entry emitted by putparamtypes
+ putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, s.dictIndexToOffset[v.DictIndex-1], s.Info)
+ } else {
+ putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, v.Type)
+ }
}
if abbrevUsesLoclist(abbrev) {
diff --git a/src/cmd/internal/goobj/objfile.go b/src/cmd/internal/goobj/objfile.go
index e2858bd57d..20bf0eba89 100644
--- a/src/cmd/internal/goobj/objfile.go
+++ b/src/cmd/internal/goobj/objfile.go
@@ -304,6 +304,7 @@ const (
const (
SymFlagUsedInIface = 1 << iota
SymFlagItab
+ SymFlagDict
)
// Returns the length of the name of the symbol.
@@ -333,6 +334,7 @@ func (s *Sym) ReflectMethod() bool { return s.Flag()&SymFlagReflectMethod != 0 }
func (s *Sym) IsGoType() bool { return s.Flag()&SymFlagGoType != 0 }
func (s *Sym) UsedInIface() bool { return s.Flag2()&SymFlagUsedInIface != 0 }
func (s *Sym) IsItab() bool { return s.Flag2()&SymFlagItab != 0 }
+func (s *Sym) IsDict() bool { return s.Flag2()&SymFlagDict != 0 }
func (s *Sym) SetName(x string, w *Writer) {
binary.LittleEndian.PutUint32(s[:], uint32(len(x)))
diff --git a/src/cmd/internal/obj/arm64/a.out.go b/src/cmd/internal/obj/arm64/a.out.go
index bf75bb4a89..aa7c54df9a 100644
--- a/src/cmd/internal/obj/arm64/a.out.go
+++ b/src/cmd/internal/obj/arm64/a.out.go
@@ -1060,9 +1060,10 @@ const (
const (
// shift types
- SHIFT_LL = 0 << 22
- SHIFT_LR = 1 << 22
- SHIFT_AR = 2 << 22
+ SHIFT_LL = 0 << 22
+ SHIFT_LR = 1 << 22
+ SHIFT_AR = 2 << 22
+ SHIFT_ROR = 3 << 22
)
// Arrangement for ARM64 SIMD instructions
diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go
index 8db25cf967..5d6caaed5f 100644
--- a/src/cmd/internal/obj/arm64/asm7.go
+++ b/src/cmd/internal/obj/arm64/asm7.go
@@ -3407,6 +3407,9 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) {
o4 = os[3]
case 13: /* addop $vcon, [R], R (64 bit literal); cmp $lcon,R -> addop $lcon,R, ZR */
+ if p.Reg == REGTMP {
+ c.ctxt.Diag("cannot use REGTMP as source: %v\n", p)
+ }
if p.To.Reg == REG_RSP && isADDSop(p.As) {
c.ctxt.Diag("illegal destination register: %v\n", p)
}
@@ -3724,6 +3727,9 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 |= (uint32(r&31) << 5) | uint32(rt&31)
case 28: /* logop $vcon, [R], R (64 bit literal) */
+ if p.Reg == REGTMP {
+ c.ctxt.Diag("cannot use REGTMP as source: %v\n", p)
+ }
o := uint32(0)
num := uint8(0)
cls := oclass(&p.From)
@@ -4354,6 +4360,9 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) {
/* reloc ops */
case 64: /* movT R,addr -> adrp + add + movT R, (REGTMP) */
+ if p.From.Reg == REGTMP {
+ c.ctxt.Diag("cannot use REGTMP as source: %v\n", p)
+ }
o1 = ADR(1, 0, REGTMP)
o2 = c.opirr(p, AADD) | REGTMP&31<<5 | REGTMP&31
rel := obj.Addrel(c.cursym)
@@ -4585,6 +4594,9 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) {
// add Rtmp, R, Rtmp
// ldp (Rtmp), (R1, R2)
r := int(p.From.Reg)
+ if r == REGTMP {
+ c.ctxt.Diag("REGTMP used in large offset load: %v", p)
+ }
if r == obj.REG_NONE {
r = int(o.param)
}
@@ -4601,6 +4613,9 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) {
case 76:
// add $O, R, Rtmp or sub $O, R, Rtmp
// stp (R1, R2), (Rtmp)
+ if p.From.Reg == REGTMP || p.From.Offset == REGTMP {
+ c.ctxt.Diag("cannot use REGTMP as source: %v", p)
+ }
r := int(p.To.Reg)
if r == obj.REG_NONE {
r = int(o.param)
@@ -4628,6 +4643,9 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) {
// add Rtmp, R, Rtmp
// stp (R1, R2), (Rtmp)
r := int(p.To.Reg)
+ if r == REGTMP || p.From.Reg == REGTMP || p.From.Offset == REGTMP {
+ c.ctxt.Diag("REGTMP used in large offset store: %v", p)
+ }
if r == obj.REG_NONE {
r = int(o.param)
}
@@ -4933,6 +4951,9 @@ func (c *ctxt7) asmout(p *obj.Prog, o *Optab, out []uint32) {
o1 |= (uint32(Q&1) << 30) | (uint32((r>>5)&7) << 16) | (uint32(r&0x1f) << 5) | uint32(rt&31)
case 87: /* stp (r,r), addr(SB) -> adrp + add + stp */
+ if p.From.Reg == REGTMP || p.From.Offset == REGTMP {
+ c.ctxt.Diag("cannot use REGTMP as source: %v", p)
+ }
o1 = ADR(1, 0, REGTMP)
o2 = c.opirr(p, AADD) | REGTMP&31<<5 | REGTMP&31
rel := obj.Addrel(c.cursym)
diff --git a/src/cmd/internal/obj/dwarf.go b/src/cmd/internal/obj/dwarf.go
index 6dd53ffd12..29e367aa4c 100644
--- a/src/cmd/internal/obj/dwarf.go
+++ b/src/cmd/internal/obj/dwarf.go
@@ -378,9 +378,9 @@ func (ctxt *Link) populateDWARF(curfn interface{}, s *LSym, myimportpath string)
if err != nil {
ctxt.Diag("emitting DWARF for %s failed: %v", s.Name, err)
}
- err = dwarf.PutConcreteFunc(dwctxt, fnstate)
+ err = dwarf.PutConcreteFunc(dwctxt, fnstate, s.Wrapper())
} else {
- err = dwarf.PutDefaultFunc(dwctxt, fnstate)
+ err = dwarf.PutDefaultFunc(dwctxt, fnstate, s.Wrapper())
}
if err != nil {
ctxt.Diag("emitting DWARF for %s failed: %v", s.Name, err)
diff --git a/src/cmd/internal/obj/objfile.go b/src/cmd/internal/obj/objfile.go
index 01466ea736..910e6ef0d9 100644
--- a/src/cmd/internal/obj/objfile.go
+++ b/src/cmd/internal/obj/objfile.go
@@ -340,6 +340,9 @@ func (w *writer) Sym(s *LSym) {
if strings.HasPrefix(s.Name, "go.itab.") && s.Type == objabi.SRODATA {
flag2 |= goobj.SymFlagItab
}
+ if strings.HasPrefix(s.Name, w.ctxt.Pkgpath) && strings.HasPrefix(s.Name[len(w.ctxt.Pkgpath):], "..dict") {
+ flag2 |= goobj.SymFlagDict
+ }
name := s.Name
if strings.HasPrefix(name, "gofile..") {
name = filepath.ToSlash(name)
diff --git a/src/cmd/internal/obj/ppc64/a.out.go b/src/cmd/internal/obj/ppc64/a.out.go
index e57beb3276..dda24a0b96 100644
--- a/src/cmd/internal/obj/ppc64/a.out.go
+++ b/src/cmd/internal/obj/ppc64/a.out.go
@@ -79,8 +79,10 @@ const (
REG_R30
REG_R31
- /* F0=4128 ... F31=4159 */
- REG_F0
+ /* Align FPR and VSR vectors such that when masked with 0x3F they produce
+ an equivalent VSX register. */
+ /* F0=4160 ... F31=4191 */
+ REG_F0 = obj.RBasePPC64 + iota + 32
REG_F1
REG_F2
REG_F3
@@ -113,7 +115,7 @@ const (
REG_F30
REG_F31
- /* V0=4160 ... V31=4191 */
+ /* V0=4192 ... V31=4223 */
REG_V0
REG_V1
REG_V2
@@ -147,7 +149,7 @@ const (
REG_V30
REG_V31
- /* VS0=4192 ... VS63=4255 */
+ /* VS0=4224 ... VS63=4287 */
REG_VS0
REG_VS1
REG_VS2
diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go
index e642413590..1d92c4866f 100644
--- a/src/cmd/internal/obj/ppc64/asm9.go
+++ b/src/cmd/internal/obj/ppc64/asm9.go
@@ -428,15 +428,13 @@ var optab = []Optab{
{as: ASTXSIWX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
/* VSX move from VSR */
- {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4}, /* vsx move from vsr, xx1-form */
+ {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
{as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
- {as: AMFVSRD, a1: C_VREG, a6: C_REG, type_: 88, size: 4},
/* VSX move to VSR */
- {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 88, size: 4}, /* vsx move to vsr, xx1-form */
- {as: AMTVSRD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 88, size: 4},
- {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 88, size: 4},
- {as: AMTVSRD, a1: C_REG, a6: C_VREG, type_: 88, size: 4},
+ {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
+ {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
+ {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
/* VSX logical */
{as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
@@ -1036,13 +1034,14 @@ func (c *ctxt9) oplook(p *obj.Prog) *Optab {
// c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
ops := oprange[p.As&obj.AMask]
c1 := &xcmp[a1]
+ c2 := &xcmp[a2]
c3 := &xcmp[a3]
c4 := &xcmp[a4]
c5 := &xcmp[a5]
c6 := &xcmp[a6]
for i := range ops {
op := &ops[i]
- if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
+ if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
return op
}
@@ -1116,6 +1115,12 @@ func cmp(a int, b int) bool {
return r0iszero != 0 /*TypeKind(100016)*/
}
+ case C_VSREG:
+ /* Allow any VR argument as a VSR operand. */
+ if b == C_VREG {
+ return true
+ }
+
case C_ANY:
return true
}
@@ -1594,7 +1599,6 @@ func buildop(ctxt *obj.Link) {
opset(AMTVRD, r0)
opset(AMTVSRWA, r0)
opset(AMTVSRWZ, r0)
- opset(AMTVSRDD, r0)
opset(AMTVSRWS, r0)
case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
@@ -1977,6 +1981,7 @@ func buildop(ctxt *obj.Link) {
ACMPEQB,
AECIWX,
ACLRLSLWI,
+ AMTVSRDD,
obj.ANOP,
obj.ATEXT,
obj.AUNDEF,
@@ -2075,50 +2080,32 @@ func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
}
/* XX1-form 3-register operands, 1 VSR operand */
-func AOP_XX1(op uint32, d uint32, a uint32, b uint32) uint32 {
- /* For the XX-form encodings, we need the VSX register number to be exactly */
- /* between 0-63, so we can properly set the rightmost bits. */
- r := d - REG_VS0
+func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
}
/* XX2-form 3-register operands, 2 VSR operands */
-func AOP_XX2(op uint32, d uint32, a uint32, b uint32) uint32 {
- xt := d - REG_VS0
- xb := b - REG_VS0
+func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
}
/* XX3-form 3 VSR operands */
-func AOP_XX3(op uint32, d uint32, a uint32, b uint32) uint32 {
- xt := d - REG_VS0
- xa := a - REG_VS0
- xb := b - REG_VS0
+func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
}
/* XX3-form 3 VSR operands + immediate */
-func AOP_XX3I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
- xt := d - REG_VS0
- xa := a - REG_VS0
- xb := b - REG_VS0
+func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
}
/* XX4-form, 4 VSR operands */
-func AOP_XX4(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
- xt := d - REG_VS0
- xa := a - REG_VS0
- xb := b - REG_VS0
- xc := c - REG_VS0
+func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
}
/* DQ-form, VSR register, register + offset operands */
-func AOP_DQ(op uint32, d uint32, a uint32, b uint32) uint32 {
- /* For the DQ-form encodings, we need the VSX register number to be exactly */
- /* between 0-63, so we can properly set the SX bit. */
- r := d - REG_VS0
+func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
/* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
/* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
/* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
@@ -2126,7 +2113,7 @@ func AOP_DQ(op uint32, d uint32, a uint32, b uint32) uint32 {
/* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
/* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
dq := b >> 4
- return op | (r&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (r&32)>>2
+ return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
}
/* Z23-form, 3-register operands + CY field */
@@ -3586,33 +3573,8 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
/* 3-register operand order: (RB)(RA*1), XT */
o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
- case 88: /* VSX instructions, XX1-form */
- /* reg reg none OR reg reg reg */
- /* 3-register operand order: RA, RB, XT */
- /* 2-register operand order: XS, RA or RA, XT */
- xt := int32(p.To.Reg)
- xs := int32(p.From.Reg)
- /* We need to treat the special case of extended mnemonics that may have a FREG/VREG as an argument */
- if REG_V0 <= xt && xt <= REG_V31 {
- /* Convert V0-V31 to VS32-VS63 */
- xt = xt + 64
- o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
- } else if REG_F0 <= xt && xt <= REG_F31 {
- /* Convert F0-F31 to VS0-VS31 */
- xt = xt + 64
- o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
- } else if REG_VS0 <= xt && xt <= REG_VS63 {
- o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
- } else if REG_V0 <= xs && xs <= REG_V31 {
- /* Likewise for XS */
- xs = xs + 64
- o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
- } else if REG_F0 <= xs && xs <= REG_F31 {
- xs = xs + 64
- o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
- } else if REG_VS0 <= xs && xs <= REG_VS63 {
- o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
- }
+ case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
+ o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
case 89: /* VSX instructions, XX2-form */
/* reg none reg OR reg imm reg */
@@ -3743,6 +3705,9 @@ func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
mb := uint32(c.regoff(&p.RestArgs[0].Addr))
me := uint32(c.regoff(&p.RestArgs[1].Addr))
o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
+
+ case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
+ o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
}
out[0] = o1
diff --git a/src/cmd/internal/obj/ppc64/asm_test.go b/src/cmd/internal/obj/ppc64/asm_test.go
index 70dabc2017..b851d3c86b 100644
--- a/src/cmd/internal/obj/ppc64/asm_test.go
+++ b/src/cmd/internal/obj/ppc64/asm_test.go
@@ -107,3 +107,44 @@ func TestPCalign(t *testing.T) {
t.Errorf("Invalid alignment not detected for PCALIGN\n")
}
}
+
+// Verify register constants are correctly aligned. Much of the ppc64 assembler assumes masking out significant
+// bits will produce a valid register number:
+// REG_Rx & 31 == x
+// REG_Fx & 31 == x
+// REG_Vx & 31 == x
+// REG_VSx & 63 == x
+// REG_SPRx & 1023 == x
+// REG_CRx & 7 == x
+//
+// VR and FPR disjointly overlap VSR, interpreting as VSR registers should produce the correctly overlapped VSR.
+// REG_FPx & 63 == x
+// REG_Vx & 63 == x + 32
+func TestRegValueAlignment(t *testing.T) {
+ tstFunc := func(rstart, rend, msk, rout int) {
+ for i := rstart; i <= rend; i++ {
+ if i&msk != rout {
+ t.Errorf("%v is not aligned to 0x%X (expected %d, got %d)\n", rconv(i), msk, rout, rstart&msk)
+ }
+ rout++
+ }
+ }
+ var testType = []struct {
+ rstart int
+ rend int
+ msk int
+ rout int
+ }{
+ {REG_VS0, REG_VS63, 63, 0},
+ {REG_R0, REG_R31, 31, 0},
+ {REG_F0, REG_F31, 31, 0},
+ {REG_V0, REG_V31, 31, 0},
+ {REG_V0, REG_V31, 63, 32},
+ {REG_F0, REG_F31, 63, 0},
+ {REG_SPR0, REG_SPR0 + 1023, 1023, 0},
+ {REG_CR0, REG_CR7, 7, 0},
+ }
+ for _, t := range testType {
+ tstFunc(t.rstart, t.rend, t.msk, t.rout)
+ }
+}
diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go
index c2722b0afb..ee93fe048b 100644
--- a/src/cmd/internal/obj/ppc64/obj9.go
+++ b/src/cmd/internal/obj/ppc64/obj9.go
@@ -294,9 +294,9 @@ func (c *ctxt9) rewriteToUseGot(p *obj.Prog) {
// BL (LR)
var sym *obj.LSym
if p.As == obj.ADUFFZERO {
- sym = c.ctxt.Lookup("runtime.duffzero")
+ sym = c.ctxt.LookupABI("runtime.duffzero", obj.ABIInternal)
} else {
- sym = c.ctxt.Lookup("runtime.duffcopy")
+ sym = c.ctxt.LookupABI("runtime.duffcopy", obj.ABIInternal)
}
offset := p.To.Offset
p.As = AMOVD
@@ -687,7 +687,6 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.From.Reg = REG_LR
q.To.Type = obj.TYPE_REG
q.To.Reg = REGTMP
-
prologueEnd = q
q = obj.Appendp(q, c.newprog)
@@ -787,14 +786,14 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.From.Reg = REGG
q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic
q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R3
+ q.To.Reg = REG_R22
q = obj.Appendp(q, c.newprog)
q.As = ACMP
q.From.Type = obj.TYPE_REG
q.From.Reg = REG_R0
q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R3
+ q.To.Reg = REG_R22
q = obj.Appendp(q, c.newprog)
q.As = ABEQ
@@ -804,10 +803,10 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q = obj.Appendp(q, c.newprog)
q.As = AMOVD
q.From.Type = obj.TYPE_MEM
- q.From.Reg = REG_R3
+ q.From.Reg = REG_R22
q.From.Offset = 0 // Panic.argp
q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R4
+ q.To.Reg = REG_R23
q = obj.Appendp(q, c.newprog)
q.As = AADD
@@ -815,14 +814,14 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.From.Offset = int64(autosize) + c.ctxt.FixedFrameSize()
q.Reg = REGSP
q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R5
+ q.To.Reg = REG_R24
q = obj.Appendp(q, c.newprog)
q.As = ACMP
q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R4
+ q.From.Reg = REG_R23
q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R5
+ q.To.Reg = REG_R24
q = obj.Appendp(q, c.newprog)
q.As = ABNE
@@ -835,14 +834,14 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q.From.Offset = c.ctxt.FixedFrameSize()
q.Reg = REGSP
q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_R6
+ q.To.Reg = REG_R25
q = obj.Appendp(q, c.newprog)
q.As = AMOVD
q.From.Type = obj.TYPE_REG
- q.From.Reg = REG_R6
+ q.From.Reg = REG_R25
q.To.Type = obj.TYPE_MEM
- q.To.Reg = REG_R3
+ q.To.Reg = REG_R22
q.To.Offset = 0 // Panic.argp
q = obj.Appendp(q, c.newprog)
@@ -1051,7 +1050,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p0 := p // save entry point, but skipping the two instructions setting R2 in shared mode
- // MOVD g_stackguard(g), R3
+ // MOVD g_stackguard(g), R22
p = obj.Appendp(p, c.newprog)
p.As = AMOVD
@@ -1062,7 +1061,7 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1
}
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R3
+ p.To.Reg = REG_R22
// Mark the stack bound check and morestack call async nonpreemptible.
// If we get preempted here, when resumed the preemption request is
@@ -1078,7 +1077,7 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.As = ACMPU
p.From.Type = obj.TYPE_REG
- p.From.Reg = REG_R3
+ p.From.Reg = REG_R22
p.To.Type = obj.TYPE_REG
p.To.Reg = REGSP
} else {
@@ -1108,14 +1107,14 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.From.Type = obj.TYPE_CONST
p.From.Offset = offset
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R4
+ p.To.Reg = REG_R23
p = obj.Appendp(p, c.newprog)
p.As = ACMPU
p.From.Type = obj.TYPE_REG
p.From.Reg = REGSP
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R4
+ p.To.Reg = REG_R23
}
p = obj.Appendp(p, c.newprog)
@@ -1134,14 +1133,14 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.From.Offset = -offset
p.Reg = REGSP
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R4
+ p.To.Reg = REG_R23
p = obj.Appendp(p, c.newprog)
p.As = ACMPU
p.From.Type = obj.TYPE_REG
- p.From.Reg = REG_R3
+ p.From.Reg = REG_R22
p.To.Type = obj.TYPE_REG
- p.To.Reg = REG_R4
+ p.To.Reg = REG_R23
}
// q1: BLT done
@@ -1151,17 +1150,25 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.As = ABLT
p.To.Type = obj.TYPE_BRANCH
- // MOVD LR, R5
p = obj.Appendp(p, c.newprog)
+ p.As = obj.ANOP // zero-width place holder
+
+ if q != nil {
+ q.To.SetTarget(p)
+ }
+
+ // Spill the register args that could be clobbered by the
+ // morestack code.
+ spill := c.cursym.Func().SpillRegisterArgs(p, c.newprog)
+
+ // MOVD LR, R5
+ p = obj.Appendp(spill, c.newprog)
p.As = AMOVD
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_LR
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R5
- if q != nil {
- q.To.SetTarget(p)
- }
p = c.ctxt.EmitEntryStackMap(c.cursym, p, c.newprog)
@@ -1181,8 +1188,7 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
// Fortunately, in shared mode, 8(SP) and 16(SP) are reserved in
// the caller's frame, but not used (0(SP) is caller's saved LR,
// 24(SP) is caller's saved R2). Use 8(SP) to save this function's R2.
-
- // MOVD R12, 8(SP)
+ // MOVD R2, 8(SP)
p = obj.Appendp(p, c.newprog)
p.As = AMOVD
p.From.Type = obj.TYPE_REG
@@ -1249,7 +1255,8 @@ func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
p.To.Reg = REG_R2
}
- p = c.ctxt.EndUnsafePoint(p, c.newprog, -1)
+ unspill := c.cursym.Func().UnspillRegisterArgs(p, c.newprog)
+ p = c.ctxt.EndUnsafePoint(unspill, c.newprog, -1)
// BR start
p = obj.Appendp(p, c.newprog)
diff --git a/src/cmd/internal/obj/riscv/anames.go b/src/cmd/internal/obj/riscv/anames.go
index 6581bb3402..d2a3674ebe 100644
--- a/src/cmd/internal/obj/riscv/anames.go
+++ b/src/cmd/internal/obj/riscv/anames.go
@@ -236,6 +236,8 @@ var Anames = []string{
"BLEZ",
"BLTZ",
"BNEZ",
+ "FABSD",
+ "FABSS",
"FNEGD",
"FNEGS",
"FNED",
diff --git a/src/cmd/internal/obj/riscv/cpu.go b/src/cmd/internal/obj/riscv/cpu.go
index 1519dc1a63..a258367ae9 100644
--- a/src/cmd/internal/obj/riscv/cpu.go
+++ b/src/cmd/internal/obj/riscv/cpu.go
@@ -590,6 +590,8 @@ const (
ABLEZ
ABLTZ
ABNEZ
+ AFABSD
+ AFABSS
AFNEGD
AFNEGS
AFNED
diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go
index fafde64062..f0ea21de97 100644
--- a/src/cmd/internal/obj/riscv/obj.go
+++ b/src/cmd/internal/obj/riscv/obj.go
@@ -1003,6 +1003,7 @@ func validateII(ctxt *obj.Link, ins *instruction) {
wantImmI(ctxt, ins.as, ins.imm, 12)
wantIntReg(ctxt, ins.as, "rd", ins.rd)
wantIntReg(ctxt, ins.as, "rs1", ins.rs1)
+ wantNoneReg(ctxt, ins.as, "rs2", ins.rs2)
wantNoneReg(ctxt, ins.as, "rs3", ins.rs3)
}
@@ -1010,6 +1011,7 @@ func validateIF(ctxt *obj.Link, ins *instruction) {
wantImmI(ctxt, ins.as, ins.imm, 12)
wantFloatReg(ctxt, ins.as, "rd", ins.rd)
wantIntReg(ctxt, ins.as, "rs1", ins.rs1)
+ wantNoneReg(ctxt, ins.as, "rs2", ins.rs2)
wantNoneReg(ctxt, ins.as, "rs3", ins.rs3)
}
@@ -1017,6 +1019,7 @@ func validateSI(ctxt *obj.Link, ins *instruction) {
wantImmI(ctxt, ins.as, ins.imm, 12)
wantIntReg(ctxt, ins.as, "rd", ins.rd)
wantIntReg(ctxt, ins.as, "rs1", ins.rs1)
+ wantNoneReg(ctxt, ins.as, "rs2", ins.rs2)
wantNoneReg(ctxt, ins.as, "rs3", ins.rs3)
}
@@ -1024,6 +1027,7 @@ func validateSF(ctxt *obj.Link, ins *instruction) {
wantImmI(ctxt, ins.as, ins.imm, 12)
wantIntReg(ctxt, ins.as, "rd", ins.rd)
wantFloatReg(ctxt, ins.as, "rs1", ins.rs1)
+ wantNoneReg(ctxt, ins.as, "rs2", ins.rs2)
wantNoneReg(ctxt, ins.as, "rs3", ins.rs3)
}
@@ -1567,7 +1571,7 @@ func instructionForProg(p *obj.Prog) *instruction {
func instructionsForOpImmediate(p *obj.Prog, as obj.As, rs int16) []*instruction {
// <opi> $imm, REG, TO
ins := instructionForProg(p)
- ins.as, ins.rs1 = as, uint32(rs)
+ ins.as, ins.rs1, ins.rs2 = as, uint32(rs), obj.REG_NONE
low, high, err := Split32BitImmediate(ins.imm)
if err != nil {
@@ -1990,7 +1994,7 @@ func instructionsForProg(p *obj.Prog) []*instruction {
case ASEQZ:
// SEQZ rs, rd -> SLTIU $1, rs, rd
ins.as = ASLTIU
- ins.rs1 = uint32(p.From.Reg)
+ ins.rs1, ins.rs2 = uint32(p.From.Reg), obj.REG_NONE
ins.imm = 1
case ASNEZ:
@@ -1998,6 +2002,16 @@ func instructionsForProg(p *obj.Prog) []*instruction {
ins.as = ASLTU
ins.rs1 = REG_ZERO
+ case AFABSS:
+ // FABSS rs, rd -> FSGNJXS rs, rs, rd
+ ins.as = AFSGNJXS
+ ins.rs1 = uint32(p.From.Reg)
+
+ case AFABSD:
+ // FABSD rs, rd -> FSGNJXD rs, rs, rd
+ ins.as = AFSGNJXD
+ ins.rs1 = uint32(p.From.Reg)
+
case AFNEGS:
// FNEGS rs, rd -> FSGNJNS rs, rs, rd
ins.as = AFSGNJNS
diff --git a/src/cmd/internal/obj/util.go b/src/cmd/internal/obj/util.go
index e8441a6969..0c9dde7965 100644
--- a/src/cmd/internal/obj/util.go
+++ b/src/cmd/internal/obj/util.go
@@ -7,6 +7,7 @@ package obj
import (
"bytes"
"cmd/internal/objabi"
+ "cmd/internal/src"
"fmt"
"internal/buildcfg"
"io"
@@ -47,6 +48,10 @@ func (p *Prog) InnermostFilename() string {
return pos.Filename()
}
+func (p *Prog) AllPos(result []src.Pos) []src.Pos {
+ return p.Ctxt.AllPos(p.Pos, result)
+}
+
var armCondCode = []string{
".EQ",
".NE",