// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. (Add(Ptr|64|32|16|8) ...) => (ADDV ...) (Add(32|64)F ...) => (ADD(F|D) ...) (Sub(Ptr|64|32|16|8) ...) => (SUBV ...) (Sub(32|64)F ...) => (SUB(F|D) ...) (Mul(64|32|16|8) x y) => (Select1 (MULVU x y)) (Mul(32|64)F ...) => (MUL(F|D) ...) (Mul64uhilo ...) => (MULVU ...) (Select0 (Mul64uover x y)) => (Select1 (MULVU x y)) (Select1 (Mul64uover x y)) => (SGTU (Select0 (MULVU x y)) (MOVVconst [0])) (Hmul64 x y) => (Select0 (MULV x y)) (Hmul64u x y) => (Select0 (MULVU x y)) (Hmul32 x y) => (SRAVconst (Select1 (MULV (SignExt32to64 x) (SignExt32to64 y))) [32]) (Hmul32u x y) => (SRLVconst (Select1 (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32]) (Div64 x y) => (Select1 (DIVV x y)) (Div64u x y) => (Select1 (DIVVU x y)) (Div32 x y) => (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y))) (Div32u x y) => (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) (Div16 x y) => (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y))) (Div16u x y) => (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) (Div8 x y) => (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y))) (Div8u x y) => (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) (Div(32|64)F ...) => (DIV(F|D) ...) (Mod64 x y) => (Select0 (DIVV x y)) (Mod64u x y) => (Select0 (DIVVU x y)) (Mod32 x y) => (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y))) (Mod32u x y) => (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) (Mod16 x y) => (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y))) (Mod16u x y) => (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) (Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y))) (Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) (Select0 (Add64carry x y c)) => (ADDV (ADDV x y) c) (Select1 (Add64carry x y c)) => (OR (SGTU x s:(ADDV x y)) (SGTU s (ADDV s c))) (Select0 (Sub64borrow x y c)) => (SUBV (SUBV x y) c) (Select1 (Sub64borrow x y c)) => (OR (SGTU s:(SUBV x y) x) (SGTU (SUBV s c) s)) // math package intrinsics (Abs ...) => (ABSD ...) // (x + y) / 2 with x>=y => (x - y) / 2 + y (Avg64u x y) => (ADDV (SRLVconst (SUBV x y) [1]) y) (And(64|32|16|8) ...) => (AND ...) (Or(64|32|16|8) ...) => (OR ...) (Xor(64|32|16|8) ...) => (XOR ...) // shifts // hardware instruction uses only the low 6 bits of the shift // we compare to 64 to ensure Go semantics for large shifts (Lsh64x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) (Lsh64x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) (Lsh64x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) (Lsh64x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) (Lsh32x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) (Lsh32x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) (Lsh32x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) (Lsh32x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) (Lsh16x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) (Lsh16x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) (Lsh16x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) (Lsh16x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) (Lsh8x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) (Lsh8x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) (Lsh8x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) (Lsh8x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) (Rsh64Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV x y)) (Rsh64Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV x (ZeroExt32to64 y))) (Rsh64Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV x (ZeroExt16to64 y))) (Rsh64Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV x (ZeroExt8to64 y))) (Rsh32Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt32to64 x) y)) (Rsh32Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y))) (Rsh32Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y))) (Rsh32Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y))) (Rsh16Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt16to64 x) y)) (Rsh16Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y))) (Rsh16Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y))) (Rsh16Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y))) (Rsh8Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt8to64 x) y)) (Rsh8Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y))) (Rsh8Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y))) (Rsh8Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y))) (Rsh64x64 x y) => (SRAV x (OR (NEGV (SGTU y (MOVVconst [63]))) y)) (Rsh64x32 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) (Rsh64x16 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) (Rsh64x8 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) (Rsh32x64 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) (Rsh32x32 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) (Rsh32x16 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) (Rsh32x8 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) (Rsh16x64 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) (Rsh16x32 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) (Rsh16x16 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) (Rsh16x8 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) (Rsh8x64 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) (Rsh8x32 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) (Rsh8x16 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) (Rsh8x8 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) // rotates (RotateLeft8 x (MOVVconst [c])) => (Or8 (Lsh8x64 x (MOVVconst [c&7])) (Rsh8Ux64 x (MOVVconst [-c&7]))) (RotateLeft16 x (MOVVconst [c])) => (Or16 (Lsh16x64 x (MOVVconst [c&15])) (Rsh16Ux64 x (MOVVconst [-c&15]))) (RotateLeft32 x (MOVVconst [c])) => (Or32 (Lsh32x64 x (MOVVconst [c&31])) (Rsh32Ux64 x (MOVVconst [-c&31]))) (RotateLeft64 x (MOVVconst [c])) => (Or64 (Lsh64x64 x (MOVVconst [c&63])) (Rsh64Ux64 x (MOVVconst [-c&63]))) // unary ops (Neg(64|32|16|8) ...) => (NEGV ...) (Neg(32|64)F ...) => (NEG(F|D) ...) (Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x) (Sqrt ...) => (SQRTD ...) (Sqrt32 ...) => (SQRTF ...) // boolean ops -- booleans are represented with 0=false, 1=true (AndB ...) => (AND ...) (OrB ...) => (OR ...) (EqB x y) => (XOR (MOVVconst [1]) (XOR x y)) (NeqB ...) => (XOR ...) (Not x) => (XORconst [1] x) // constants (Const(64|32|16|8) [val]) => (MOVVconst [int64(val)]) (Const(32|64)F [val]) => (MOV(F|D)const [float64(val)]) (ConstNil) => (MOVVconst [0]) (ConstBool [t]) => (MOVVconst [int64(b2i(t))]) (Slicemask x) => (SRAVconst (NEGV x) [63]) // truncations // Because we ignore high parts of registers, truncates are just copies. (Trunc16to8 ...) => (Copy ...) (Trunc32to8 ...) => (Copy ...) (Trunc32to16 ...) => (Copy ...) (Trunc64to8 ...) => (Copy ...) (Trunc64to16 ...) => (Copy ...) (Trunc64to32 ...) => (Copy ...) // Zero-/Sign-extensions (ZeroExt8to16 ...) => (MOVBUreg ...) (ZeroExt8to32 ...) => (MOVBUreg ...) (ZeroExt16to32 ...) => (MOVHUreg ...) (ZeroExt8to64 ...) => (MOVBUreg ...) (ZeroExt16to64 ...) => (MOVHUreg ...) (ZeroExt32to64 ...) => (MOVWUreg ...) (SignExt8to16 ...) => (MOVBreg ...) (SignExt8to32 ...) => (MOVBreg ...) (SignExt16to32 ...) => (MOVHreg ...) (SignExt8to64 ...) => (MOVBreg ...) (SignExt16to64 ...) => (MOVHreg ...) (SignExt32to64 ...) => (MOVWreg ...) // float <=> int conversion (Cvt32to32F ...) => (MOVWF ...) (Cvt32to64F ...) => (MOVWD ...) (Cvt64to32F ...) => (MOVVF ...) (Cvt64to64F ...) => (MOVVD ...) (Cvt32Fto32 ...) => (TRUNCFW ...) (Cvt64Fto32 ...) => (TRUNCDW ...) (Cvt32Fto64 ...) => (TRUNCFV ...) (Cvt64Fto64 ...) => (TRUNCDV ...) (Cvt32Fto64F ...) => (MOVFD ...) (Cvt64Fto32F ...) => (MOVDF ...) (CvtBoolToUint8 ...) => (Copy ...) (Round(32|64)F ...) => (Copy ...) // comparisons (Eq8 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y))) (Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y))) (Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y))) (Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y)) (EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y)) (Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y)) (Neq8 x y) => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0])) (Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0])) (Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0])) (Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0])) (NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0])) (Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y)) (Less8 x y) => (SGT (SignExt8to64 y) (SignExt8to64 x)) (Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x)) (Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x)) (Less64 x y) => (SGT y x) (Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN (Less8U x y) => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)) (Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)) (Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)) (Less64U x y) => (SGTU y x) (Leq8 x y) => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y))) (Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y))) (Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y))) (Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y)) (Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN (Leq8U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))) (Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))) (Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))) (Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y)) (OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVVaddr [int32(off)] ptr) (OffPtr [off] ptr) => (ADDVconst [off] ptr) (Addr {sym} base) => (MOVVaddr {sym} base) (LocalAddr {sym} base mem) && t.Elem().HasPointers() => (MOVVaddr {sym} (SPanchored base mem)) (LocalAddr {sym} base _) && !t.Elem().HasPointers() => (MOVVaddr {sym} base) // loads (Load ptr mem) && t.IsBoolean() => (MOVBUload ptr mem) (Load ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem) (Load ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem) (Load ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem) (Load ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem) (Load ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem) (Load ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem) (Load ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem) (Load ptr mem) && is32BitFloat(t) => (MOVFload ptr mem) (Load ptr mem) && is64BitFloat(t) => (MOVDload ptr mem) // stores (Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) (Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem) (Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem) (Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVVstore ptr val mem) (Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVFstore ptr val mem) (Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVDstore ptr val mem) // zeroing (Zero [0] _ mem) => mem (Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem) (Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 => (MOVHstore ptr (MOVVconst [0]) mem) (Zero [2] ptr mem) => (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)) (Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 => (MOVWstore ptr (MOVVconst [0]) mem) (Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 => (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)) (Zero [4] ptr mem) => (MOVBstore [3] ptr (MOVVconst [0]) (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)))) (Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 => (MOVVstore ptr (MOVVconst [0]) mem) (Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 => (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem)) (Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 => (MOVHstore [6] ptr (MOVVconst [0]) (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)))) (Zero [3] ptr mem) => (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))) (Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 => (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))) (Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 => (MOVWstore [8] ptr (MOVVconst [0]) (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem))) (Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 => (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem)) (Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 => (MOVVstore [16] ptr (MOVVconst [0]) (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem))) // medium zeroing uses a duff device // 8, and 128 are magic constants, see runtime/mkduff.go (Zero [s] {t} ptr mem) && s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice => (DUFFZERO [8 * (128 - s/8)] ptr mem) // large or unaligned zeroing uses a loop (Zero [s] {t} ptr mem) && (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 => (LoweredZero [t.Alignment()] ptr (ADDVconst ptr [s-moveSize(t.Alignment(), config)]) mem) // moves (Move [0] _ _ mem) => mem (Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem) (Move [2] {t} dst src mem) && t.Alignment()%2 == 0 => (MOVHstore dst (MOVHload src mem) mem) (Move [2] dst src mem) => (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)) (Move [4] {t} dst src mem) && t.Alignment()%4 == 0 => (MOVWstore dst (MOVWload src mem) mem) (Move [4] {t} dst src mem) && t.Alignment()%2 == 0 => (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)) (Move [4] dst src mem) => (MOVBstore [3] dst (MOVBload [3] src mem) (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))) (Move [8] {t} dst src mem) && t.Alignment()%8 == 0 => (MOVVstore dst (MOVVload src mem) mem) (Move [8] {t} dst src mem) && t.Alignment()%4 == 0 => (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)) (Move [8] {t} dst src mem) && t.Alignment()%2 == 0 => (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))) (Move [3] dst src mem) => (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))) (Move [6] {t} dst src mem) && t.Alignment()%2 == 0 => (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))) (Move [12] {t} dst src mem) && t.Alignment()%4 == 0 => (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))) (Move [16] {t} dst src mem) && t.Alignment()%8 == 0 => (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem)) (Move [24] {t} dst src mem) && t.Alignment()%8 == 0 => (MOVVstore [16] dst (MOVVload [16] src mem) (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem))) // float <=> int register moves, with no conversion. // These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}. (MOVVload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) => (MOVVfpgp val) (MOVDload [off] {sym} ptr (MOVVstore [off] {sym} ptr val _)) => (MOVVgpfp val) (MOVWUload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _)) => (ZeroExt32to64 (MOVWfpgp val)) (MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (MOVWgpfp val) // Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set. (MOVVstore [off] {sym} ptr (MOVVfpgp val) mem) => (MOVDstore [off] {sym} ptr val mem) (MOVDstore [off] {sym} ptr (MOVVgpfp val) mem) => (MOVVstore [off] {sym} ptr val mem) (MOVWstore [off] {sym} ptr (MOVWfpgp val) mem) => (MOVFstore [off] {sym} ptr val mem) (MOVFstore [off] {sym} ptr (MOVWgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem) // medium move uses a duff device (Move [s] {t} dst src mem) && s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s) => (DUFFCOPY [16 * (128 - s/8)] dst src mem) // 16 and 128 are magic constants. 16 is the number of bytes to encode: // MOVV (R1), R23 // ADDV $8, R1 // MOVV R23, (R2) // ADDV $8, R2 // and 128 is the number of such blocks. See runtime/duff_mips64.s:duffcopy. // large or unaligned move uses a loop (Move [s] {t} dst src mem) && s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 => (LoweredMove [t.Alignment()] dst src (ADDVconst src [s-moveSize(t.Alignment(), config)]) mem) // calls (StaticCall ...) => (CALLstatic ...) (ClosureCall ...) => (CALLclosure ...) (InterCall ...) => (CALLinter ...) (TailCall ...) => (CALLtail ...) // atomic intrinsics (AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...) (AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...) (AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...) (AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...) (AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...) (AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...) (AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem) (AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...) // AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3,uint32(val) << ((ptr & 3) * 8)) (AtomicOr8 ptr val mem) && !config.BigEndian => (LoweredAtomicOr32 (AND (MOVVconst [^3]) ptr) (SLLV (ZeroExt8to32 val) (SLLVconst [3] (ANDconst [3] ptr))) mem) // AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8)))) (AtomicAnd8 ptr val mem) && !config.BigEndian => (LoweredAtomicAnd32 (AND (MOVVconst [^3]) ptr) (OR (SLLV (ZeroExt8to32 val) (SLLVconst [3] (ANDconst [3] ptr))) (NORconst [0] (SLLV (MOVVconst [0xff]) (SLLVconst [3] (ANDconst [3] ptr))))) mem) // AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8)) (AtomicOr8 ptr val mem) && config.BigEndian => (LoweredAtomicOr32 (AND (MOVVconst [^3]) ptr) (SLLV (ZeroExt8to32 val) (SLLVconst [3] (ANDconst [3] (XORconst [3] ptr)))) mem) // AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8)))) (AtomicAnd8 ptr val mem) && config.BigEndian => (LoweredAtomicAnd32 (AND (MOVVconst [^3]) ptr) (OR (SLLV (ZeroExt8to32 val) (SLLVconst [3] (ANDconst [3] (XORconst [3] ptr)))) (NORconst [0] (SLLV (MOVVconst [0xff]) (SLLVconst [3] (ANDconst [3] (XORconst [3] ptr)))))) mem) (AtomicAnd32 ...) => (LoweredAtomicAnd32 ...) (AtomicOr32 ...) => (LoweredAtomicOr32 ...) // checks (NilCheck ...) => (LoweredNilCheck ...) (IsNonNil ptr) => (SGTU ptr (MOVVconst [0])) (IsInBounds idx len) => (SGTU len idx) (IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len)) // pseudo-ops (GetClosurePtr ...) => (LoweredGetClosurePtr ...) (GetCallerSP ...) => (LoweredGetCallerSP ...) (GetCallerPC ...) => (LoweredGetCallerPC ...) (If cond yes no) => (NE cond yes no) // Write barrier. (WB ...) => (LoweredWB ...) (PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) (PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) (PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) // Optimizations // Absorb boolean tests into block (NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no) (NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no) (EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no) (EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no) (NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no) (NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no) (NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no) (NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no) (EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no) (EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no) (EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no) (EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no) (NE (SGTUconst [1] x) yes no) => (EQ x yes no) (EQ (SGTUconst [1] x) yes no) => (NE x yes no) (NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no) (EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no) (NE (SGTconst [0] x) yes no) => (LTZ x yes no) (EQ (SGTconst [0] x) yes no) => (GEZ x yes no) (NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no) (EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no) // fold offset into address (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr) // fold address into load/store (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBload [off1+int32(off2)] {sym} ptr mem) (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBUload [off1+int32(off2)] {sym} ptr mem) (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHload [off1+int32(off2)] {sym} ptr mem) (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHUload [off1+int32(off2)] {sym} ptr mem) (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWload [off1+int32(off2)] {sym} ptr mem) (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWUload [off1+int32(off2)] {sym} ptr mem) (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVload [off1+int32(off2)] {sym} ptr mem) (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVFload [off1+int32(off2)] {sym} ptr mem) (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVDload [off1+int32(off2)] {sym} ptr mem) (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem) (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHstore [off1+int32(off2)] {sym} ptr val mem) (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWstore [off1+int32(off2)] {sym} ptr val mem) (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVstore [off1+int32(off2)] {sym} ptr val mem) (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem) (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem) (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem) (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) // store zero (MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem) (MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem) (MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem) (MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVVstorezero [off] {sym} ptr mem) // don't extend after proper load (MOVBreg x:(MOVBload _ _)) => (MOVVreg x) (MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x) (MOVHreg x:(MOVBload _ _)) => (MOVVreg x) (MOVHreg x:(MOVBUload _ _)) => (MOVVreg x) (MOVHreg x:(MOVHload _ _)) => (MOVVreg x) (MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x) (MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x) (MOVWreg x:(MOVBload _ _)) => (MOVVreg x) (MOVWreg x:(MOVBUload _ _)) => (MOVVreg x) (MOVWreg x:(MOVHload _ _)) => (MOVVreg x) (MOVWreg x:(MOVHUload _ _)) => (MOVVreg x) (MOVWreg x:(MOVWload _ _)) => (MOVVreg x) (MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x) (MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x) (MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x) // fold double extensions (MOVBreg x:(MOVBreg _)) => (MOVVreg x) (MOVBUreg x:(MOVBUreg _)) => (MOVVreg x) (MOVHreg x:(MOVBreg _)) => (MOVVreg x) (MOVHreg x:(MOVBUreg _)) => (MOVVreg x) (MOVHreg x:(MOVHreg _)) => (MOVVreg x) (MOVHUreg x:(MOVBUreg _)) => (MOVVreg x) (MOVHUreg x:(MOVHUreg _)) => (MOVVreg x) (MOVWreg x:(MOVBreg _)) => (MOVVreg x) (MOVWreg x:(MOVBUreg _)) => (MOVVreg x) (MOVWreg x:(MOVHreg _)) => (MOVVreg x) (MOVWreg x:(MOVWreg _)) => (MOVVreg x) (MOVWUreg x:(MOVBUreg _)) => (MOVVreg x) (MOVWUreg x:(MOVHUreg _)) => (MOVVreg x) (MOVWUreg x:(MOVWUreg _)) => (MOVVreg x) // don't extend before store (MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem) (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) (MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem) (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) (MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem) (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) (MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem) (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) (MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem) (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) (MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem) (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem) // if a register move has only 1 use, just use the same register without emitting instruction // MOVVnop doesn't emit instruction, only for ensuring the type. (MOVVreg x) && x.Uses == 1 => (MOVVnop x) // TODO: we should be able to get rid of MOVVnop all together. // But for now, this is enough to get rid of lots of them. (MOVVnop (MOVVconst [c])) => (MOVVconst [c]) // fold constant into arithmetic ops (ADDV x (MOVVconst [c])) && is32Bit(c) && !t.IsPtr() => (ADDVconst [c] x) (SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x) (AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x) (OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x) (XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x) (NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x) (SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0]) (SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0]) (SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63]) (SLLV x (MOVVconst [c])) => (SLLVconst x [c]) (SRLV x (MOVVconst [c])) => (SRLVconst x [c]) (SRAV x (MOVVconst [c])) => (SRAVconst x [c]) (SGT (MOVVconst [c]) x) && is32Bit(c) => (SGTconst [c] x) (SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x) // mul by constant (Select1 (MULVU x (MOVVconst [-1]))) => (NEGV x) (Select1 (MULVU _ (MOVVconst [0]))) => (MOVVconst [0]) (Select1 (MULVU x (MOVVconst [1]))) => x (Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo(c) => (SLLVconst [log64(c)] x) // div by constant (Select1 (DIVVU x (MOVVconst [1]))) => x (Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo(c) => (SRLVconst [log64(c)] x) (Select0 (DIVVU _ (MOVVconst [1]))) => (MOVVconst [0]) // mod (Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo(c) => (ANDconst [c-1] x) // mod // generic simplifications (ADDV x (NEGV y)) => (SUBV x y) (SUBV x x) => (MOVVconst [0]) (SUBV (MOVVconst [0]) x) => (NEGV x) (AND x x) => x (OR x x) => x (XOR x x) => (MOVVconst [0]) // remove redundant *const ops (ADDVconst [0] x) => x (SUBVconst [0] x) => x (ANDconst [0] _) => (MOVVconst [0]) (ANDconst [-1] x) => x (ORconst [0] x) => x (ORconst [-1] _) => (MOVVconst [-1]) (XORconst [0] x) => x (XORconst [-1] x) => (NORconst [0] x) // generic constant folding (ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d]) (ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x) (ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x) (SUBVconst [c] (MOVVconst [d])) => (MOVVconst [d-c]) (SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x) (SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x) (SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d< (MOVVconst [int64(uint64(d)>>uint64(c))]) (SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)]) (Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c*d]) (Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [c/d]) (Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [int64(uint64(c)/uint64(d))]) (Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [c%d]) // mod (Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod (ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d]) (ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x) (ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d]) (ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x) (XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d]) (XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x) (NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)]) (NEGV (MOVVconst [c])) => (MOVVconst [-c]) (MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))]) (MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))]) (MOVHreg (MOVVconst [c])) => (MOVVconst [int64(int16(c))]) (MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))]) (MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))]) (MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))]) (MOVVreg (MOVVconst [c])) => (MOVVconst [c]) (LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem) (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem) (LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst64 [c] ptr mem) // constant comparisons (SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1]) (SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0]) (SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1]) (SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0]) // other known comparisons (SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1]) (SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0]) (SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1]) (SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0]) (SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1]) (SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1]) (SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0]) (SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1]) (SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0]) (SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1]) (SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0]) (SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1]) (SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1]) (SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1]) (SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1]) // absorb constants into branches (EQ (MOVVconst [0]) yes no) => (First yes no) (EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes) (NE (MOVVconst [0]) yes no) => (First no yes) (NE (MOVVconst [c]) yes no) && c != 0 => (First yes no) (LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no) (LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes) (LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no) (LEZ (MOVVconst [c]) yes no) && c > 0 => (First no yes) (GTZ (MOVVconst [c]) yes no) && c > 0 => (First yes no) (GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes) (GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no) (GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes) // SGT/SGTU with known outcomes. (SGT x x) => (MOVVconst [0]) (SGTU x x) => (MOVVconst [0]) // fold readonly sym load (MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read8(sym, int64(off)))]) (MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))]) (MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))]) (MOVVload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])