aboutsummaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/ssa/gen
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2021-04-15 23:05:49 -0400
committerRuss Cox <rsc@golang.org>2021-04-16 19:20:53 +0000
commit95ed5c3800a87ddf9b0ec3958eaaa1a969306293 (patch)
treecb0c555f10ab706a5c491cbe48dd36da16658a1e /src/cmd/compile/internal/ssa/gen
parent2fc0ebb623e2859094ad3f41e61325df0c2163f8 (diff)
downloadgo-95ed5c3800a87ddf9b0ec3958eaaa1a969306293.tar.gz
go-95ed5c3800a87ddf9b0ec3958eaaa1a969306293.zip
internal/buildcfg: move build configuration out of cmd/internal/objabi
The go/build package needs access to this configuration, so move it into a new package available to the standard library. Change-Id: I868a94148b52350c76116451f4ad9191246adcff Reviewed-on: https://go-review.googlesource.com/c/go/+/310731 Trust: Russ Cox <rsc@golang.org> Run-TryBot: Russ Cox <rsc@golang.org> Reviewed-by: Austin Clements <austin@google.com> Reviewed-by: Jay Conrod <jayconrod@google.com>
Diffstat (limited to 'src/cmd/compile/internal/ssa/gen')
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64.rules2
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM.rules58
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64.rules38
-rw-r--r--src/cmd/compile/internal/ssa/gen/Wasm.rules6
-rw-r--r--src/cmd/compile/internal/ssa/gen/rulegen.go2
5 files changed, 53 insertions, 53 deletions
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
index 839d4a330e..7a88a488c0 100644
--- a/src/cmd/compile/internal/ssa/gen/AMD64.rules
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -460,7 +460,7 @@
(IsInBounds idx len) => (SETB (CMPQ idx len))
(IsSliceInBounds idx len) => (SETBE (CMPQ idx len))
(NilCheck ...) => (LoweredNilCheck ...)
-(GetG mem) && !(objabi.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal) => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register.
+(GetG mem) && !(buildcfg.Experiment.RegabiG && v.Block.Func.OwnAux.Fn.ABI() == obj.ABIInternal) => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register.
(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
(GetCallerPC ...) => (LoweredGetCallerPC ...)
(GetCallerSP ...) => (LoweredGetCallerSP ...)
diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules
index 5c6438a986..bcacbafe3a 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM.rules
@@ -66,17 +66,17 @@
// count trailing zero for ARMv5 and ARMv6
// 32 - CLZ(x&-x - 1)
-(Ctz32 <t> x) && objabi.GOARM<=6 =>
+(Ctz32 <t> x) && buildcfg.GOARM<=6 =>
(RSBconst [32] (CLZ <t> (SUBconst <t> (AND <t> x (RSBconst <t> [0] x)) [1])))
-(Ctz16 <t> x) && objabi.GOARM<=6 =>
+(Ctz16 <t> x) && buildcfg.GOARM<=6 =>
(RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x10000] x))) [1])))
-(Ctz8 <t> x) && objabi.GOARM<=6 =>
+(Ctz8 <t> x) && buildcfg.GOARM<=6 =>
(RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x100] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x100] x))) [1])))
// count trailing zero for ARMv7
-(Ctz32 <t> x) && objabi.GOARM==7 => (CLZ <t> (RBIT <t> x))
-(Ctz16 <t> x) && objabi.GOARM==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
-(Ctz8 <t> x) && objabi.GOARM==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
+(Ctz32 <t> x) && buildcfg.GOARM==7 => (CLZ <t> (RBIT <t> x))
+(Ctz16 <t> x) && buildcfg.GOARM==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
+(Ctz8 <t> x) && buildcfg.GOARM==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
// bit length
(BitLen32 <t> x) => (RSBconst [32] (CLZ <t> x))
@@ -90,13 +90,13 @@
// t5 = x right rotate 8 bits -- (d, a, b, c )
// result = t4 ^ t5 -- (d, c, b, a )
// using shifted ops this can be done in 4 instructions.
-(Bswap32 <t> x) && objabi.GOARM==5 =>
+(Bswap32 <t> x) && buildcfg.GOARM==5 =>
(XOR <t>
(SRLconst <t> (BICconst <t> (XOR <t> x (SRRconst <t> [16] x)) [0xff0000]) [8])
(SRRconst <t> x [8]))
// byte swap for ARMv6 and above
-(Bswap32 x) && objabi.GOARM>=6 => (REV x)
+(Bswap32 x) && buildcfg.GOARM>=6 => (REV x)
// boolean ops -- booleans are represented with 0=false, 1=true
(AndB ...) => (AND ...)
@@ -737,10 +737,10 @@
(SUBconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) => (ADDconst [-c] x)
(ANDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (BICconst [int32(^uint32(c))] x)
(BICconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (ANDconst [int32(^uint32(c))] x)
-(ADDconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (SUBconst [-c] x)
-(SUBconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (ADDconst [-c] x)
-(ANDconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (BICconst [int32(^uint32(c))] x)
-(BICconst [c] x) && objabi.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (ANDconst [int32(^uint32(c))] x)
+(ADDconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (SUBconst [-c] x)
+(SUBconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (ADDconst [-c] x)
+(ANDconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (BICconst [int32(^uint32(c))] x)
+(BICconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (ANDconst [int32(^uint32(c))] x)
(ADDconst [c] (MOVWconst [d])) => (MOVWconst [c+d])
(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
@@ -1144,7 +1144,7 @@
// UBFX instruction is supported by ARMv6T2, ARMv7 and above versions, REV16 is supported by
// ARMv6 and above versions. So for ARMv6, we need to match SLLconst, SRLconst and ORshiftLL.
((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x) => (REV16 x)
-((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && objabi.GOARM>=6 => (REV16 x)
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && buildcfg.GOARM>=6 => (REV16 x)
// use indexed loads and stores
(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVWloadidx ptr idx mem)
@@ -1214,25 +1214,25 @@
(BIC x x) => (MOVWconst [0])
(ADD (MUL x y) a) => (MULA x y a)
-(SUB a (MUL x y)) && objabi.GOARM == 7 => (MULS x y a)
-(RSB (MUL x y) a) && objabi.GOARM == 7 => (MULS x y a)
+(SUB a (MUL x y)) && buildcfg.GOARM == 7 => (MULS x y a)
+(RSB (MUL x y) a) && buildcfg.GOARM == 7 => (MULS x y a)
-(NEGF (MULF x y)) && objabi.GOARM >= 6 => (NMULF x y)
-(NEGD (MULD x y)) && objabi.GOARM >= 6 => (NMULD x y)
-(MULF (NEGF x) y) && objabi.GOARM >= 6 => (NMULF x y)
-(MULD (NEGD x) y) && objabi.GOARM >= 6 => (NMULD x y)
+(NEGF (MULF x y)) && buildcfg.GOARM >= 6 => (NMULF x y)
+(NEGD (MULD x y)) && buildcfg.GOARM >= 6 => (NMULD x y)
+(MULF (NEGF x) y) && buildcfg.GOARM >= 6 => (NMULF x y)
+(MULD (NEGD x) y) && buildcfg.GOARM >= 6 => (NMULD x y)
(NMULF (NEGF x) y) => (MULF x y)
(NMULD (NEGD x) y) => (MULD x y)
// the result will overwrite the addend, since they are in the same register
-(ADDF a (MULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULAF a x y)
-(ADDF a (NMULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULSF a x y)
-(ADDD a (MULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULAD a x y)
-(ADDD a (NMULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULSD a x y)
-(SUBF a (MULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULSF a x y)
-(SUBF a (NMULF x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULAF a x y)
-(SUBD a (MULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULSD a x y)
-(SUBD a (NMULD x y)) && a.Uses == 1 && objabi.GOARM >= 6 => (MULAD a x y)
+(ADDF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAF a x y)
+(ADDF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSF a x y)
+(ADDD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAD a x y)
+(ADDD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSD a x y)
+(SUBF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSF a x y)
+(SUBF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAF a x y)
+(SUBD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSD a x y)
+(SUBD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAD a x y)
(AND x (MVN y)) => (BIC x y)
@@ -1264,8 +1264,8 @@
(CMPD x (MOVDconst [0])) => (CMPD0 x)
// bit extraction
-(SRAconst (SLLconst x [c]) [d]) && objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFX [(d-c)|(32-d)<<8] x)
-(SRLconst (SLLconst x [c]) [d]) && objabi.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x)
+(SRAconst (SLLconst x [c]) [d]) && buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFX [(d-c)|(32-d)<<8] x)
+(SRLconst (SLLconst x [c]) [d]) && buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x)
// comparison simplification
((LT|LE|EQ|NE|GE|GT) (CMP x (RSBconst [0] y))) => ((LT|LE|EQ|NE|GE|GT) (CMN x y)) // sense of carry bit not preserved
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
index f83ff75761..ce4b324b5e 100644
--- a/src/cmd/compile/internal/ssa/gen/PPC64.rules
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -12,20 +12,20 @@
(Sub64F ...) => (FSUB ...)
// Combine 64 bit integer multiply and adds
-(ADD l:(MULLD x y) z) && objabi.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y z)
+(ADD l:(MULLD x y) z) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y z)
(Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y))
(Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
(Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y))
(Mod8u x y) => (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
-(Mod64 x y) && objabi.GOPPC64 >=9 => (MODSD x y)
-(Mod64 x y) && objabi.GOPPC64 <=8 => (SUB x (MULLD y (DIVD x y)))
-(Mod64u x y) && objabi.GOPPC64 >= 9 => (MODUD x y)
-(Mod64u x y) && objabi.GOPPC64 <= 8 => (SUB x (MULLD y (DIVDU x y)))
-(Mod32 x y) && objabi.GOPPC64 >= 9 => (MODSW x y)
-(Mod32 x y) && objabi.GOPPC64 <= 8 => (SUB x (MULLW y (DIVW x y)))
-(Mod32u x y) && objabi.GOPPC64 >= 9 => (MODUW x y)
-(Mod32u x y) && objabi.GOPPC64 <= 8 => (SUB x (MULLW y (DIVWU x y)))
+(Mod64 x y) && buildcfg.GOPPC64 >=9 => (MODSD x y)
+(Mod64 x y) && buildcfg.GOPPC64 <=8 => (SUB x (MULLD y (DIVD x y)))
+(Mod64u x y) && buildcfg.GOPPC64 >= 9 => (MODUD x y)
+(Mod64u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLD y (DIVDU x y)))
+(Mod32 x y) && buildcfg.GOPPC64 >= 9 => (MODSW x y)
+(Mod32 x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVW x y)))
+(Mod32u x y) && buildcfg.GOPPC64 >= 9 => (MODUW x y)
+(Mod32u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVWU x y)))
// (x + y) / 2 with x>=y => (x - y) / 2 + y
(Avg64u <t> x y) => (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
@@ -351,9 +351,9 @@
(Ctz32NonZero ...) => (Ctz32 ...)
(Ctz64NonZero ...) => (Ctz64 ...)
-(Ctz64 x) && objabi.GOPPC64<=8 => (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
+(Ctz64 x) && buildcfg.GOPPC64<=8 => (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
(Ctz64 x) => (CNTTZD x)
-(Ctz32 x) && objabi.GOPPC64<=8 => (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
+(Ctz32 x) && buildcfg.GOPPC64<=8 => (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
(Ctz32 x) => (CNTTZW (MOVWZreg x))
(Ctz16 x) => (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
(Ctz8 x) => (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
@@ -627,10 +627,10 @@
// Handle cases not handled above
// Lowered Short cases do not generate loops, and as a result don't clobber
// the address registers or flags.
-(Zero [s] ptr mem) && objabi.GOPPC64 <= 8 && s < 64 => (LoweredZeroShort [s] ptr mem)
-(Zero [s] ptr mem) && objabi.GOPPC64 <= 8 => (LoweredZero [s] ptr mem)
-(Zero [s] ptr mem) && s < 128 && objabi.GOPPC64 >= 9 => (LoweredQuadZeroShort [s] ptr mem)
-(Zero [s] ptr mem) && objabi.GOPPC64 >= 9 => (LoweredQuadZero [s] ptr mem)
+(Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 && s < 64 => (LoweredZeroShort [s] ptr mem)
+(Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 => (LoweredZero [s] ptr mem)
+(Zero [s] ptr mem) && s < 128 && buildcfg.GOPPC64 >= 9 => (LoweredQuadZeroShort [s] ptr mem)
+(Zero [s] ptr mem) && buildcfg.GOPPC64 >= 9 => (LoweredQuadZero [s] ptr mem)
// moves
(Move [0] _ _ mem) => mem
@@ -658,11 +658,11 @@
// Large move uses a loop. Since the address is computed and the
// offset is zero, any alignment can be used.
-(Move [s] dst src mem) && s > 8 && objabi.GOPPC64 <= 8 && logLargeCopy(v, s) =>
+(Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 <= 8 && logLargeCopy(v, s) =>
(LoweredMove [s] dst src mem)
-(Move [s] dst src mem) && s > 8 && s <= 64 && objabi.GOPPC64 >= 9 =>
+(Move [s] dst src mem) && s > 8 && s <= 64 && buildcfg.GOPPC64 >= 9 =>
(LoweredQuadMoveShort [s] dst src mem)
-(Move [s] dst src mem) && s > 8 && objabi.GOPPC64 >= 9 && logLargeCopy(v, s) =>
+(Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 >= 9 && logLargeCopy(v, s) =>
(LoweredQuadMove [s] dst src mem)
// Calls
@@ -1048,7 +1048,7 @@
(SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
(SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
// special case for power9
-(SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && objabi.GOPPC64 >= 9 => (EXTSWSLconst [c] x)
+(SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && buildcfg.GOPPC64 >= 9 => (EXTSWSLconst [c] x)
// Lose widening ops fed to stores
(MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstore [off] {sym} ptr x mem)
diff --git a/src/cmd/compile/internal/ssa/gen/Wasm.rules b/src/cmd/compile/internal/ssa/gen/Wasm.rules
index 7cda16b4b5..7ad3d1c72e 100644
--- a/src/cmd/compile/internal/ssa/gen/Wasm.rules
+++ b/src/cmd/compile/internal/ssa/gen/Wasm.rules
@@ -55,9 +55,9 @@
(ZeroExt32to64 x:(I64Load32U _ _)) => x
(ZeroExt16to(64|32) x:(I64Load16U _ _)) => x
(ZeroExt8to(64|32|16) x:(I64Load8U _ _)) => x
-(SignExt32to64 x) && objabi.GOWASM.SignExt => (I64Extend32S x)
-(SignExt8to(64|32|16) x) && objabi.GOWASM.SignExt => (I64Extend8S x)
-(SignExt16to(64|32) x) && objabi.GOWASM.SignExt => (I64Extend16S x)
+(SignExt32to64 x) && buildcfg.GOWASM.SignExt => (I64Extend32S x)
+(SignExt8to(64|32|16) x) && buildcfg.GOWASM.SignExt => (I64Extend8S x)
+(SignExt16to(64|32) x) && buildcfg.GOWASM.SignExt => (I64Extend16S x)
(SignExt32to64 x) => (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32]))
(SignExt16to(64|32) x) => (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48]))
(SignExt8to(64|32|16) x) => (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go
index fd672b2f74..fe8db4ed1f 100644
--- a/src/cmd/compile/internal/ssa/gen/rulegen.go
+++ b/src/cmd/compile/internal/ssa/gen/rulegen.go
@@ -584,9 +584,9 @@ func fprint(w io.Writer, n Node) {
fmt.Fprintf(w, "\npackage ssa\n")
for _, path := range append([]string{
"fmt",
+ "internal/buildcfg",
"math",
"cmd/internal/obj",
- "cmd/internal/objabi",
"cmd/compile/internal/base",
"cmd/compile/internal/types",
}, n.Arch.imports...) {