Text file src/cmd/compile/internal/ssa/_gen/PPC64.rules

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // GOPPC64 values indicate power8, power9, etc.
     6  // That means the code is compiled for that target,
     7  // and will not run on earlier targets.
     8  //
     9  (Add(Ptr|64|32|16|8) ...) => (ADD ...)
    10  (Add64F ...) => (FADD ...)
    11  (Add32F ...) => (FADDS ...)
    12  
    13  (Sub(Ptr|64|32|16|8) ...) => (SUB ...)
    14  (Sub32F ...) => (FSUBS ...)
    15  (Sub64F ...) => (FSUB ...)
    16  
    17  (Min(32|64)F x y) && buildcfg.GOPPC64 >= 9 => (XSMINJDP x y)
    18  (Max(32|64)F x y) && buildcfg.GOPPC64 >= 9 => (XSMAXJDP x y)
    19  
    20  // Combine 64 bit integer multiply and adds
    21  (ADD l:(MULLD x y) z) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y z)
    22  
    23  (Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y))
    24  (Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
    25  (Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y))
    26  (Mod8u x y) => (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
    27  (Mod64 x y) && buildcfg.GOPPC64 >=9 => (MODSD x y)
    28  (Mod64 x y) && buildcfg.GOPPC64 <=8 => (SUB x (MULLD y (DIVD x y)))
    29  (Mod64u x y) && buildcfg.GOPPC64 >= 9 => (MODUD x y)
    30  (Mod64u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLD y (DIVDU x y)))
    31  (Mod32 x y) && buildcfg.GOPPC64 >= 9 => (MODSW x y)
    32  (Mod32 x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVW x y)))
    33  (Mod32u x y) && buildcfg.GOPPC64 >= 9 => (MODUW x y)
    34  (Mod32u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVWU x y)))
    35  
    36  // (x + y) / 2 with x>=y => (x - y) / 2 + y
    37  (Avg64u <t> x y) => (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
    38  
    39  (Mul64 ...) => (MULLD ...)
    40  (Mul(32|16|8) ...) => (MULLW ...)
    41  (Select0 (Mul64uhilo x y)) => (MULHDU x y)
    42  (Select1 (Mul64uhilo x y)) => (MULLD x y)
    43  (Select0 (Mul64uover x y)) => (MULLD x y)
    44  (Select1 (Mul64uover x y)) => (SETBCR [2] (CMPconst [0] (MULHDU <x.Type> x y)))
    45  
    46  (Div64 [false] x y) => (DIVD x y)
    47  (Div64u ...) => (DIVDU ...)
    48  (Div32 [false] x y) => (DIVW x y)
    49  (Div32u ...) => (DIVWU ...)
    50  (Div16 [false]  x y) => (DIVW  (SignExt16to32 x) (SignExt16to32 y))
    51  (Div16u x y) => (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
    52  (Div8 x y) => (DIVW  (SignExt8to32 x) (SignExt8to32 y))
    53  (Div8u x y) => (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
    54  
    55  (Hmul(64|64u|32|32u) ...) => (MULH(D|DU|W|WU) ...)
    56  
    57  (Mul(32|64)F ...) => ((FMULS|FMUL) ...)
    58  
    59  (Div(32|64)F ...) => ((FDIVS|FDIV) ...)
    60  
    61  // Lowering float <=> int
    62  (Cvt32to(32|64)F x) => ((FCFIDS|FCFID) (MTVSRD (SignExt32to64 x)))
    63  (Cvt64to(32|64)F x) => ((FCFIDS|FCFID) (MTVSRD x))
    64  
    65  (Cvt32Fto(32|64) x) => (MFVSRD (FCTI(W|D)Z x))
    66  (Cvt64Fto(32|64) x) => (MFVSRD (FCTI(W|D)Z x))
    67  
    68  (Cvt32Fto64F ...) => (Copy ...) // Note v will have the wrong type for patterns dependent on Float32/Float64
    69  (Cvt64Fto32F ...) => (FRSP ...)
    70  
    71  (CvtBoolToUint8 ...) => (Copy ...)
    72  
    73  (Round(32|64)F ...) => (LoweredRound(32|64)F ...)
    74  
    75  (Sqrt ...) => (FSQRT ...)
    76  (Sqrt32 ...) => (FSQRTS ...)
    77  (Floor ...) => (FFLOOR ...)
    78  (Ceil ...) => (FCEIL ...)
    79  (Trunc ...) => (FTRUNC ...)
    80  (Round ...) => (FROUND ...)
    81  (Copysign x y) => (FCPSGN y x)
    82  (Abs ...) => (FABS ...)
    83  (FMA ...) => (FMADD ...)
    84  
    85  // Lowering extension
    86  // Note: we always extend to 64 bits even though some ops don't need that many result bits.
    87  (SignExt8to(16|32|64) ...) => (MOVBreg ...)
    88  (SignExt16to(32|64) ...) => (MOVHreg ...)
    89  (SignExt32to64 ...) => (MOVWreg ...)
    90  
    91  (ZeroExt8to(16|32|64) ...) => (MOVBZreg ...)
    92  (ZeroExt16to(32|64) ...) => (MOVHZreg ...)
    93  (ZeroExt32to64 ...) => (MOVWZreg ...)
    94  
    95  (Trunc(16|32|64)to8 <t> x) && t.IsSigned() => (MOVBreg x)
    96  (Trunc(16|32|64)to8  x) => (MOVBZreg x)
    97  (Trunc(32|64)to16 <t> x) && t.IsSigned() => (MOVHreg x)
    98  (Trunc(32|64)to16 x) => (MOVHZreg x)
    99  (Trunc64to32 <t> x) && t.IsSigned() => (MOVWreg x)
   100  (Trunc64to32 x) => (MOVWZreg x)
   101  
   102  // Lowering constants
   103  (Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
   104  (Const(32|64)F ...) => (FMOV(S|D)const ...)
   105  (ConstNil) => (MOVDconst [0])
   106  (ConstBool [t]) => (MOVDconst [b2i(t)])
   107  
   108  // Carrying addition.
   109  (Select0 (Add64carry x y c)) =>            (Select0 <typ.UInt64> (ADDE x y (Select1 <typ.UInt64> (ADDCconst c [-1]))))
   110  (Select1 (Add64carry x y c)) => (ADDZEzero (Select1 <typ.UInt64> (ADDE x y (Select1 <typ.UInt64> (ADDCconst c [-1])))))
   111  // Fold initial carry bit if 0.
   112  (ADDE x y (Select1 <typ.UInt64> (ADDCconst (MOVDconst [0]) [-1]))) => (ADDC x y)
   113  // Fold transfer of CA -> GPR -> CA. Note 2 uses when feeding into a chained Add64carry.
   114  (Select1 (ADDCconst n:(ADDZEzero x) [-1])) && n.Uses <= 2 => x
   115  (ADDE (MOVDconst [0]) y c) => (ADDZE y c)
   116  (ADDC x (MOVDconst [y])) && is16Bit(y) => (ADDCconst [y] x)
   117  
   118  // Borrowing subtraction.
   119  (Select0 (Sub64borrow x y c)) =>                 (Select0 <typ.UInt64> (SUBE x y (Select1 <typ.UInt64> (SUBCconst c [0]))))
   120  (Select1 (Sub64borrow x y c)) => (NEG (SUBZEzero (Select1 <typ.UInt64> (SUBE x y (Select1 <typ.UInt64> (SUBCconst c [0]))))))
   121  // Fold initial borrow bit if 0.
   122  (SUBE x y (Select1 <typ.UInt64> (SUBCconst (MOVDconst [0]) [0]))) => (SUBC x y)
   123  // Fold transfer of CA -> GPR -> CA. Note 2 uses when feeding into a chained Sub64borrow.
   124  (Select1 (SUBCconst n:(NEG (SUBZEzero x)) [0])) && n.Uses <= 2 => x
   125  
   126  // Constant folding
   127  (FABS (FMOVDconst [x])) => (FMOVDconst [math.Abs(x)])
   128  (FSQRT (FMOVDconst [x])) && x >= 0 => (FMOVDconst [math.Sqrt(x)])
   129  (FFLOOR (FMOVDconst [x])) => (FMOVDconst [math.Floor(x)])
   130  (FCEIL (FMOVDconst [x])) => (FMOVDconst [math.Ceil(x)])
   131  (FTRUNC (FMOVDconst [x])) => (FMOVDconst [math.Trunc(x)])
   132  
   133  // Rotates
   134  (RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
   135  (RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
   136  (RotateLeft(32|64) ...) => ((ROTLW|ROTL) ...)
   137  
   138  // Constant rotate generation
   139  (ROTLW  x (MOVDconst [c])) => (ROTLWconst  x [c&31])
   140  (ROTL   x (MOVDconst [c])) => (ROTLconst   x [c&63])
   141  
   142  // Combine rotate and mask operations
   143  (ANDconst [m] (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
   144  (AND (MOVDconst [m]) (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
   145  (ANDconst [m] (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
   146  (AND (MOVDconst [m]) (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
   147  
   148  // Note, any rotated word bitmask is still a valid word bitmask.
   149  (ROTLWconst [r] (AND (MOVDconst [m]) x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
   150  (ROTLWconst [r] (ANDconst [m] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
   151  
   152  (ANDconst [m] (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
   153  (ANDconst [m] (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
   154  (ANDconst [m] (SRDconst x [s])) && mergePPC64AndSrdi(m,s) != 0 => (RLWINM [mergePPC64AndSrdi(m,s)] x)
   155  (AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
   156  (AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
   157  (AND (MOVDconst [m]) (SRDconst x [s])) && mergePPC64AndSrdi(m,s) != 0 => (RLWINM [mergePPC64AndSrdi(m,s)] x)
   158  
   159  (SRWconst (ANDconst [m] x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
   160  (SRWconst (ANDconst [m] x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
   161  (SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
   162  (SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
   163  
   164  (ANDconst [m] (SLDconst x [s])) && mergePPC64AndSldi(m,s) != 0 => (RLWINM [mergePPC64AndSldi(m,s)] x)
   165  (AND (MOVDconst [m]) (SLDconst x [s])) && mergePPC64AndSldi(m,s) != 0 => (RLWINM [mergePPC64AndSldi(m,s)] x)
   166  
   167  // Merge shift right + shift left and clear left (e.g for a table lookup)
   168  (CLRLSLDI [c] (SRWconst [s] x)) && mergePPC64ClrlsldiSrw(int64(c),s) != 0 => (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x)
   169  (CLRLSLDI [c] (SRDconst [s] x)) && mergePPC64ClrlsldiSrd(int64(c),s) != 0 => (RLWINM [mergePPC64ClrlsldiSrd(int64(c),s)] x)
   170  (SLDconst [l] (SRWconst [r] x)) && mergePPC64SldiSrw(l,r) != 0 => (RLWINM [mergePPC64SldiSrw(l,r)] x)
   171  // The following reduction shows up frequently too. e.g b[(x>>14)&0xFF]
   172  (CLRLSLDI [c] i:(RLWINM [s] x)) && mergePPC64ClrlsldiRlwinm(c,s) != 0 => (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x)
   173  
   174  // large constant signed right shift, we leave the sign bit
   175  (Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 => (SRADconst x [63])
   176  (Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 => (SRAWconst x [63])
   177  (Rsh16x64 x (MOVDconst [c])) && uint64(c) >= 16 => (SRAWconst (SignExt16to32 x) [63])
   178  (Rsh8x64  x (MOVDconst [c])) && uint64(c) >= 8  => (SRAWconst (SignExt8to32  x) [63])
   179  
   180  // constant shifts
   181  ((Lsh64|Rsh64|Rsh64U)x64  x (MOVDconst [c])) && uint64(c) < 64 => (S(L|RA|R)Dconst x [c])
   182  ((Lsh32|Rsh32|Rsh32U)x64  x (MOVDconst [c])) && uint64(c) < 32 => (S(L|RA|R)Wconst x [c])
   183  ((Rsh16|Rsh16U)x64  x (MOVDconst [c])) && uint64(c) < 16 => (SR(AW|W)const ((Sign|Zero)Ext16to32 x) [c])
   184  (Lsh16x64  x (MOVDconst [c])) && uint64(c) < 16 => (SLWconst x [c])
   185  ((Rsh8|Rsh8U)x64  x (MOVDconst [c])) && uint64(c) < 8 => (SR(AW|W)const ((Sign|Zero)Ext8to32 x) [c])
   186  (Lsh8x64  x (MOVDconst [c])) && uint64(c) < 8 => (SLWconst x [c])
   187  
   188  // Lower bounded shifts first. No need to check shift value.
   189  (Lsh64x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLD x y)
   190  (Lsh32x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLW x y)
   191  (Lsh16x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLD x y)
   192  (Lsh8x(64|32|16|8)   x y) && shiftIsBounded(v) => (SLD x y)
   193  (Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD x y)
   194  (Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW x y)
   195  (Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD (MOVHZreg x) y)
   196  (Rsh8Ux(64|32|16|8)  x y) && shiftIsBounded(v) => (SRD (MOVBZreg x) y)
   197  (Rsh64x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAD x y)
   198  (Rsh32x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAW x y)
   199  (Rsh16x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAD (MOVHreg x) y)
   200  (Rsh8x(64|32|16|8)   x y) && shiftIsBounded(v) => (SRAD (MOVBreg x) y)
   201  
   202  // Unbounded shifts. Go shifts saturate to 0 or -1 when shifting beyond the number of
   203  // bits in a type, PPC64 shifts do not (see the ISA for details).
   204  //
   205  // Note, y is always non-negative.
   206  //
   207  // Note, ISELZ is intentionally not used in lower. Where possible, ISEL is converted to ISELZ in late lower
   208  // after all the ISEL folding rules have been exercised.
   209  
   210  ((Rsh64U|Lsh64)x64 <t> x y)  => (ISEL [0] (S(R|L)D <t> x y) (MOVDconst [0])        (CMPUconst y [64]))
   211  ((Rsh64U|Lsh64)x32 <t> x y)  => (ISEL [0] (S(R|L)D <t> x y) (MOVDconst [0])        (CMPWUconst y [64]))
   212  ((Rsh64U|Lsh64)x16 <t> x y)  => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0])        (CMPconst [0] (ANDconst [0xFFC0] y)))
   213  ((Rsh64U|Lsh64)x8  <t> x y)  => (ISEL [2] (S(R|L)D <t> x y) (MOVDconst [0])        (CMPconst [0] (ANDconst [0x00C0] y)))
   214  (Rsh64x(64|32)     <t> x y)  => (ISEL [0] (SRAD    <t> x y) (SRADconst <t> x [63]) (CMP(U|WU)const y [64]))
   215  (Rsh64x16          <t> x y)  => (ISEL [2] (SRAD    <t> x y) (SRADconst <t> x [63]) (CMPconst [0] (ANDconst [0xFFC0] y)))
   216  (Rsh64x8           <t> x y)  => (ISEL [2] (SRAD    <t> x y) (SRADconst <t> x [63]) (CMPconst [0] (ANDconst [0x00C0] y)))
   217  
   218  ((Rsh32U|Lsh32)x64 <t> x y)  => (ISEL [0] (S(R|L)W <t> x y) (MOVDconst [0])        (CMPUconst y [32]))
   219  ((Rsh32U|Lsh32)x32 <t> x y)  => (ISEL [0] (S(R|L)W <t> x y) (MOVDconst [0])        (CMPWUconst y [32]))
   220  ((Rsh32U|Lsh32)x16 <t> x y)  => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0])        (CMPconst [0] (ANDconst [0xFFE0] y)))
   221  ((Rsh32U|Lsh32)x8  <t> x y)  => (ISEL [2] (S(R|L)W <t> x y) (MOVDconst [0])        (CMPconst [0] (ANDconst [0x00E0] y)))
   222  (Rsh32x(64|32)     <t> x y)  => (ISEL [0] (SRAW    <t> x y) (SRAWconst <t> x [31]) (CMP(U|WU)const y [32]))
   223  (Rsh32x16          <t> x y)  => (ISEL [2] (SRAW    <t> x y) (SRAWconst <t> x [31]) (CMPconst [0] (ANDconst [0xFFE0] y)))
   224  (Rsh32x8           <t> x y)  => (ISEL [2] (SRAW    <t> x y) (SRAWconst <t> x [31]) (CMPconst [0] (ANDconst [0x00E0] y)))
   225  
   226  ((Rsh16U|Lsh16)x64 <t> x y) => (ISEL [0] (S(R|L)D  <t> (MOVHZreg x) y) (MOVDconst [0])                   (CMPUconst  y [16]))
   227  ((Rsh16U|Lsh16)x32 <t> x y) => (ISEL [0] (S(R|L)D  <t> (MOVHZreg x) y) (MOVDconst [0])                   (CMPWUconst y [16]))
   228  ((Rsh16U|Lsh16)x16 <t> x y) => (ISEL [2] (S(R|L)D  <t> (MOVHZreg x) y) (MOVDconst [0])                   (CMPconst [0] (ANDconst [0xFFF0] y)))
   229  ((Rsh16U|Lsh16)x8  <t> x y) => (ISEL [2] (S(R|L)D  <t> (MOVHZreg x) y) (MOVDconst [0])                   (CMPconst [0] (ANDconst [0x00F0] y)))
   230  (Rsh16x(64|32)     <t> x y) => (ISEL [0] (SRAD     <t> (MOVHreg  x) y) (SRADconst <t>  (MOVHreg x) [15]) (CMP(U|WU)const  y [16]))
   231  (Rsh16x16          <t> x y) => (ISEL [2] (SRAD     <t> (MOVHreg  x) y) (SRADconst <t>  (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0xFFF0] y)))
   232  (Rsh16x8           <t> x y) => (ISEL [2] (SRAD     <t> (MOVHreg  x) y) (SRADconst <t>  (MOVHreg x) [15]) (CMPconst [0] (ANDconst [0x00F0] y)))
   233  
   234  ((Rsh8U|Lsh8)x64 <t> x y) => (ISEL [0] (S(R|L)D  <t> (MOVBZreg x) y) (MOVDconst [0])                  (CMPUconst  y [8]))
   235  ((Rsh8U|Lsh8)x32 <t> x y) => (ISEL [0] (S(R|L)D  <t> (MOVBZreg x) y) (MOVDconst [0])                  (CMPWUconst y [8]))
   236  ((Rsh8U|Lsh8)x16 <t> x y) => (ISEL [2] (S(R|L)D  <t> (MOVBZreg x) y) (MOVDconst [0])                  (CMPconst [0] (ANDconst [0xFFF8] y)))
   237  ((Rsh8U|Lsh8)x8  <t> x y) => (ISEL [2] (S(R|L)D  <t> (MOVBZreg x) y) (MOVDconst [0])                  (CMPconst [0] (ANDconst [0x00F8] y)))
   238  (Rsh8x(64|32)    <t> x y) => (ISEL [0] (SRAD     <t> (MOVBreg  x) y) (SRADconst <t>  (MOVBreg x) [7]) (CMP(U|WU)const  y [8]))
   239  (Rsh8x16         <t> x y) => (ISEL [2] (SRAD     <t> (MOVBreg  x) y) (SRADconst <t>  (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0xFFF8] y)))
   240  (Rsh8x8          <t> x y) => (ISEL [2] (SRAD     <t> (MOVBreg  x) y) (SRADconst <t>  (MOVBreg x) [7]) (CMPconst [0] (ANDconst [0x00F8] y)))
   241  
   242  // Catch bounded shifts in situations like foo<<uint(shift&63) which might not be caught by the prove pass.
   243  (CMP(U|WU)const [d] (ANDconst z [c])) && uint64(d) > uint64(c) => (FlagLT)
   244  
   245  (ORN x (MOVDconst [-1])) => x
   246  
   247  (S(RAD|RD|LD) x (MOVDconst [c])) => (S(RAD|RD|LD)const [c&63 | (c>>6&1*63)] x)
   248  (S(RAW|RW|LW) x (MOVDconst [c])) => (S(RAW|RW|LW)const [c&31 | (c>>5&1*31)] x)
   249  
   250  (Addr {sym} base) => (MOVDaddr {sym} [0] base)
   251  (LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVDaddr {sym} (SPanchored base mem))
   252  (LocalAddr <t> {sym} base _)  && !t.Elem().HasPointers() => (MOVDaddr {sym} base)
   253  (OffPtr [off] ptr) => (ADD (MOVDconst <typ.Int64> [off]) ptr)
   254  (MOVDaddr {sym} [n] p:(ADD x y)) && sym == nil && n == 0 => p
   255  (MOVDaddr {sym} [n] ptr) && sym == nil && n == 0 && (ptr.Op == OpArgIntReg || ptr.Op == OpPhi) => ptr
   256  
   257  // TODO: optimize these cases?
   258  (Ctz32NonZero ...) => (Ctz32 ...)
   259  (Ctz64NonZero ...) => (Ctz64 ...)
   260  
   261  (Ctz64 x) && buildcfg.GOPPC64<=8 => (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
   262  (Ctz64 x) => (CNTTZD x)
   263  (Ctz32 x) && buildcfg.GOPPC64<=8 => (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
   264  (Ctz32 x) => (CNTTZW (MOVWZreg x))
   265  (Ctz16 x) => (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
   266  (Ctz8 x)  => (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
   267  
   268  (BitLen64 x) => (SUBFCconst [64] (CNTLZD <typ.Int> x))
   269  (BitLen32 x) => (SUBFCconst [32] (CNTLZW <typ.Int> x))
   270  
   271  (PopCount64 ...) => (POPCNTD ...)
   272  (PopCount(32|16|8) x) => (POPCNT(W|W|B) (MOV(W|H|B)Zreg x))
   273  
   274  (And(64|32|16|8) ...) => (AND ...)
   275  (Or(64|32|16|8) ...) => (OR ...)
   276  (Xor(64|32|16|8) ...) => (XOR ...)
   277  
   278  (Neg(64|32|16|8) ...) => (NEG ...)
   279  (Neg(64|32)F ...) => (FNEG ...)
   280  
   281  (Com(64|32|16|8) x) => (NOR x x)
   282  
   283  // Lowering boolean ops
   284  (AndB ...) => (AND ...)
   285  (OrB ...) => (OR ...)
   286  (Not x) => (XORconst [1] x)
   287  
   288  // Merge logical operations
   289  (AND x (NOR y y)) => (ANDN x y)
   290  (OR x (NOR y y)) => (ORN x y)
   291  
   292  // Lowering comparisons
   293  (EqB x y)  => (ANDconst [1] (EQV x y))
   294  // Sign extension dependence on operand sign sets up for sign/zero-extension elision later
   295  (Eq(8|16) x y) && x.Type.IsSigned() && y.Type.IsSigned() => (Equal (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
   296  (Eq(8|16) x y) => (Equal (CMPW (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
   297  (Eq(32|64|Ptr) x y) => (Equal ((CMPW|CMP|CMP) x y))
   298  (Eq(32|64)F x y) => (Equal (FCMPU x y))
   299  
   300  (NeqB ...) => (XOR ...)
   301  // Like Eq8 and Eq16, prefer sign extension likely to enable later elision.
   302  (Neq(8|16) x y) && x.Type.IsSigned() && y.Type.IsSigned() => (NotEqual (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
   303  (Neq(8|16) x y)  => (NotEqual (CMPW (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
   304  (Neq(32|64|Ptr) x y) => (NotEqual ((CMPW|CMP|CMP) x y))
   305  (Neq(32|64)F x y) => (NotEqual (FCMPU x y))
   306  
   307  (Less(8|16) x y)  => (LessThan (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
   308  (Less(32|64) x y) => (LessThan ((CMPW|CMP) x y))
   309  (Less(32|64)F x y) => (FLessThan (FCMPU x y))
   310  
   311  (Less(8|16)U x y)  => (LessThan (CMPWU (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
   312  (Less(32|64)U x y) => (LessThan ((CMPWU|CMPU) x y))
   313  
   314  (Leq(8|16) x y)  => (LessEqual (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
   315  (Leq(32|64) x y) => (LessEqual ((CMPW|CMP) x y))
   316  (Leq(32|64)F x y) => (FLessEqual (FCMPU x y))
   317  
   318  (Leq(8|16)U x y)  => (LessEqual (CMPWU (ZeroExt(8|16)to32 x) (ZeroExt(8|16)to32 y)))
   319  (Leq(32|64)U x y) => (LessEqual (CMP(WU|U) x y))
   320  
   321  // Absorb pseudo-ops into blocks.
   322  (If (Equal cc) yes no) => (EQ cc yes no)
   323  (If (NotEqual cc) yes no) => (NE cc yes no)
   324  (If (LessThan cc) yes no) => (LT cc yes no)
   325  (If (LessEqual cc) yes no) => (LE cc yes no)
   326  (If (GreaterThan cc) yes no) => (GT cc yes no)
   327  (If (GreaterEqual cc) yes no) => (GE cc yes no)
   328  (If (FLessThan cc) yes no) => (FLT cc yes no)
   329  (If (FLessEqual cc) yes no) => (FLE cc yes no)
   330  (If (FGreaterThan cc) yes no) => (FGT cc yes no)
   331  (If (FGreaterEqual cc) yes no) => (FGE cc yes no)
   332  
   333  (If cond yes no) => (NE (CMPconst [0] (ANDconst [1] cond)) yes no)
   334  
   335  // Absorb boolean tests into block
   336  (NE (CMPconst [0] (ANDconst [1] ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) cc))) yes no) => ((EQ|NE|LT|LE|GT|GE) cc yes no)
   337  (NE (CMPconst [0] (ANDconst [1] ((FLessThan|FLessEqual|FGreaterThan|FGreaterEqual) cc))) yes no) => ((FLT|FLE|FGT|FGE) cc yes no)
   338  
   339  // absorb flag constants into branches
   340  (EQ (FlagEQ) yes no) => (First yes no)
   341  (EQ (FlagLT) yes no) => (First no yes)
   342  (EQ (FlagGT) yes no) => (First no yes)
   343  
   344  (NE (FlagEQ) yes no) => (First no yes)
   345  (NE (FlagLT) yes no) => (First yes no)
   346  (NE (FlagGT) yes no) => (First yes no)
   347  
   348  (LT (FlagEQ) yes no) => (First no yes)
   349  (LT (FlagLT) yes no) => (First yes no)
   350  (LT (FlagGT) yes no) => (First no yes)
   351  
   352  (LE (FlagEQ) yes no) => (First yes no)
   353  (LE (FlagLT) yes no) => (First yes no)
   354  (LE (FlagGT) yes no) => (First no yes)
   355  
   356  (GT (FlagEQ) yes no) => (First no yes)
   357  (GT (FlagLT) yes no) => (First no yes)
   358  (GT (FlagGT) yes no) => (First yes no)
   359  
   360  (GE (FlagEQ) yes no) => (First yes no)
   361  (GE (FlagLT) yes no) => (First no yes)
   362  (GE (FlagGT) yes no) => (First yes no)
   363  
   364  // absorb InvertFlags into branches
   365  (LT (InvertFlags cmp) yes no) => (GT cmp yes no)
   366  (GT (InvertFlags cmp) yes no) => (LT cmp yes no)
   367  (LE (InvertFlags cmp) yes no) => (GE cmp yes no)
   368  (GE (InvertFlags cmp) yes no) => (LE cmp yes no)
   369  (EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
   370  (NE (InvertFlags cmp) yes no) => (NE cmp yes no)
   371  
   372  // constant comparisons
   373  (CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
   374  (CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y)  => (FlagLT)
   375  (CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y)  => (FlagGT)
   376  
   377  (CMPconst (MOVDconst [x]) [y]) && x==y => (FlagEQ)
   378  (CMPconst (MOVDconst [x]) [y]) && x<y  => (FlagLT)
   379  (CMPconst (MOVDconst [x]) [y]) && x>y  => (FlagGT)
   380  
   381  (CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y)  => (FlagEQ)
   382  (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) => (FlagLT)
   383  (CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) => (FlagGT)
   384  
   385  (CMPUconst (MOVDconst [x]) [y]) && x==y  => (FlagEQ)
   386  (CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) => (FlagLT)
   387  (CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) => (FlagGT)
   388  
   389  // absorb flag constants into boolean values
   390  (Equal (FlagEQ)) => (MOVDconst [1])
   391  (Equal (FlagLT)) => (MOVDconst [0])
   392  (Equal (FlagGT)) => (MOVDconst [0])
   393  
   394  (NotEqual (FlagEQ)) => (MOVDconst [0])
   395  (NotEqual (FlagLT)) => (MOVDconst [1])
   396  (NotEqual (FlagGT)) => (MOVDconst [1])
   397  
   398  (LessThan (FlagEQ)) => (MOVDconst [0])
   399  (LessThan (FlagLT)) => (MOVDconst [1])
   400  (LessThan (FlagGT)) => (MOVDconst [0])
   401  
   402  (LessEqual (FlagEQ)) => (MOVDconst [1])
   403  (LessEqual (FlagLT)) => (MOVDconst [1])
   404  (LessEqual (FlagGT)) => (MOVDconst [0])
   405  
   406  (GreaterThan (FlagEQ)) => (MOVDconst [0])
   407  (GreaterThan (FlagLT)) => (MOVDconst [0])
   408  (GreaterThan (FlagGT)) => (MOVDconst [1])
   409  
   410  (GreaterEqual (FlagEQ)) => (MOVDconst [1])
   411  (GreaterEqual (FlagLT)) => (MOVDconst [0])
   412  (GreaterEqual (FlagGT)) => (MOVDconst [1])
   413  
   414  // absorb InvertFlags into boolean values
   415  ((Equal|NotEqual|LessThan|GreaterThan|LessEqual|GreaterEqual) (InvertFlags x)) => ((Equal|NotEqual|GreaterThan|LessThan|GreaterEqual|LessEqual) x)
   416  
   417  
   418  // Elide compares of bit tests
   419  ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ANDCC x y)) yes no)
   420  ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (ORCC x y)) yes no)
   421  ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (Select1 <types.TypeFlags> (XORCC x y)) yes no)
   422  
   423  (CondSelect x y (SETBC [a] cmp))  => (ISEL [a] x y cmp)
   424  (CondSelect x y (SETBCR [a] cmp))  => (ISEL [a+4] x y cmp)
   425  // Only lower after bool is lowered. It should always lower. This helps ensure the folding below happens reliably.
   426  (CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (CMPconst [0] (ANDconst [1] bool)))
   427  // Fold any CR -> GPR -> CR transfers when applying the above rule.
   428  (ISEL [6] x y (CMPconst [0] (ANDconst [1] (SETBC [c] cmp)))) => (ISEL [c] x y cmp)
   429  (ISEL [6] x y ((CMP|CMPW)const [0] (SETBC [c] cmp))) => (ISEL [c] x y cmp)
   430  (ISEL [6] x y ((CMP|CMPW)const [0] (SETBCR [c] cmp))) => (ISEL [c+4] x y cmp)
   431  
   432  // Lowering loads
   433  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
   434  (Load <t> ptr mem) && is32BitInt(t) &&  t.IsSigned() => (MOVWload ptr mem)
   435  (Load <t> ptr mem) && is32BitInt(t) && !t.IsSigned() => (MOVWZload ptr mem)
   436  (Load <t> ptr mem) && is16BitInt(t) &&  t.IsSigned() => (MOVHload ptr mem)
   437  (Load <t> ptr mem) && is16BitInt(t) && !t.IsSigned() => (MOVHZload ptr mem)
   438  (Load <t> ptr mem) && t.IsBoolean() => (MOVBZload ptr mem)
   439  (Load <t> ptr mem) && is8BitInt(t) &&  t.IsSigned() => (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load.
   440  (Load <t> ptr mem) && is8BitInt(t) && !t.IsSigned() => (MOVBZload ptr mem)
   441  
   442  (Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
   443  (Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
   444  
   445  (Store {t} ptr val mem) && t.Size() == 8 &&  t.IsFloat() => (FMOVDstore ptr val mem)
   446  (Store {t} ptr val mem) && t.Size() == 4 &&  t.IsFloat() => (FMOVSstore ptr val mem)
   447  (Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
   448  (Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
   449  (Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
   450  (Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
   451  
   452  // Using Zero instead of LoweredZero allows the
   453  // target address to be folded where possible.
   454  (Zero [0] _ mem) => mem
   455  (Zero [1] destptr mem) => (MOVBstorezero destptr mem)
   456  (Zero [2] destptr mem) =>
   457  	(MOVHstorezero destptr mem)
   458  (Zero [3] destptr mem) =>
   459  	(MOVBstorezero [2] destptr
   460  		(MOVHstorezero destptr mem))
   461  (Zero [4] destptr mem) =>
   462  	(MOVWstorezero destptr mem)
   463  (Zero [5] destptr mem) =>
   464  	(MOVBstorezero [4] destptr
   465          	(MOVWstorezero destptr mem))
   466  (Zero [6] destptr mem) =>
   467  	(MOVHstorezero [4] destptr
   468  		(MOVWstorezero destptr mem))
   469  (Zero [7] destptr mem) =>
   470  	(MOVBstorezero [6] destptr
   471  		(MOVHstorezero [4] destptr
   472  			(MOVWstorezero destptr mem)))
   473  
   474  (Zero [8] {t} destptr mem) => (MOVDstorezero destptr mem)
   475  (Zero [12] {t} destptr mem) =>
   476          (MOVWstorezero [8] destptr
   477                  (MOVDstorezero [0] destptr mem))
   478  (Zero [16] {t} destptr mem) =>
   479         (MOVDstorezero [8] destptr
   480                  (MOVDstorezero [0] destptr mem))
   481  (Zero [24] {t} destptr mem) =>
   482         (MOVDstorezero [16] destptr
   483                 (MOVDstorezero [8] destptr
   484                         (MOVDstorezero [0] destptr mem)))
   485  (Zero [32] {t} destptr mem) =>
   486         (MOVDstorezero [24] destptr
   487                 (MOVDstorezero [16] destptr
   488                         (MOVDstorezero [8] destptr
   489                                 (MOVDstorezero [0] destptr mem))))
   490  
   491  // Handle cases not handled above
   492  // Lowered Short cases do not generate loops, and as a result don't clobber
   493  // the address registers or flags.
   494  (Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 && s < 64 => (LoweredZeroShort [s] ptr mem)
   495  (Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 => (LoweredZero [s] ptr mem)
   496  (Zero [s] ptr mem) && s < 128 && buildcfg.GOPPC64 >= 9 => (LoweredQuadZeroShort [s] ptr mem)
   497  (Zero [s] ptr mem) && buildcfg.GOPPC64 >= 9 => (LoweredQuadZero [s] ptr mem)
   498  
   499  // moves
   500  (Move [0] _ _ mem) => mem
   501  (Move [1] dst src mem) => (MOVBstore dst (MOVBZload src mem) mem)
   502  (Move [2] dst src mem) =>
   503          (MOVHstore dst (MOVHZload src mem) mem)
   504  (Move [4] dst src mem) =>
   505  	(MOVWstore dst (MOVWZload src mem) mem)
   506  // MOVD for load and store must have offsets that are multiple of 4
   507  (Move [8] {t} dst src mem) =>
   508  	(MOVDstore dst (MOVDload src mem) mem)
   509  (Move [3] dst src mem) =>
   510          (MOVBstore [2] dst (MOVBZload [2] src mem)
   511                  (MOVHstore dst (MOVHload src mem) mem))
   512  (Move [5] dst src mem) =>
   513          (MOVBstore [4] dst (MOVBZload [4] src mem)
   514                  (MOVWstore dst (MOVWZload src mem) mem))
   515  (Move [6] dst src mem) =>
   516          (MOVHstore [4] dst (MOVHZload [4] src mem)
   517                  (MOVWstore dst (MOVWZload src mem) mem))
   518  (Move [7] dst src mem) =>
   519          (MOVBstore [6] dst (MOVBZload [6] src mem)
   520                  (MOVHstore [4] dst (MOVHZload [4] src mem)
   521                          (MOVWstore dst (MOVWZload src mem) mem)))
   522  
   523  // Large move uses a loop. Since the address is computed and the
   524  // offset is zero, any alignment can be used.
   525  (Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 <= 8 && logLargeCopy(v, s) =>
   526          (LoweredMove [s] dst src mem)
   527  (Move [s] dst src mem) && s > 8 && s <= 64 && buildcfg.GOPPC64 >= 9 =>
   528          (LoweredQuadMoveShort [s] dst src mem)
   529  (Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 >= 9 && logLargeCopy(v, s) =>
   530          (LoweredQuadMove [s] dst src mem)
   531  
   532  // Calls
   533  // Lowering calls
   534  (StaticCall ...) => (CALLstatic ...)
   535  (ClosureCall ...) => (CALLclosure ...)
   536  (InterCall ...) => (CALLinter ...)
   537  (TailCall ...) => (CALLtail ...)
   538  
   539  // Miscellaneous
   540  (GetClosurePtr ...) => (LoweredGetClosurePtr ...)
   541  (GetCallerSP ...) => (LoweredGetCallerSP ...)
   542  (GetCallerPC ...) => (LoweredGetCallerPC ...)
   543  (IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
   544  (IsInBounds idx len) => (LessThan (CMPU idx len))
   545  (IsSliceInBounds idx len) => (LessEqual (CMPU idx len))
   546  (NilCheck ...) => (LoweredNilCheck ...)
   547  
   548  // Write barrier.
   549  (WB ...) => (LoweredWB ...)
   550  
   551  // Publication barrier as intrinsic
   552  (PubBarrier ...) => (LoweredPubBarrier ...)
   553  
   554  (PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
   555  (PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
   556  (PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
   557  
   558  // Optimizations
   559  // Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms,
   560  // so ORconst, XORconst easily expand into a pair.
   561  
   562  // Include very-large constants in the const-const case.
   563  (AND (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&d])
   564  (OR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|d])
   565  (XOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c^d])
   566  (ORN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|^d])
   567  (ANDN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&^d])
   568  (NOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [^(c|d)])
   569  
   570  // Discover consts
   571  (AND x (MOVDconst [-1])) => x
   572  (AND x (MOVDconst [c])) && isU16Bit(c) => (ANDconst [c] x)
   573  (XOR x (MOVDconst [c])) && isU32Bit(c) => (XORconst [c] x)
   574  (OR x (MOVDconst [c])) && isU32Bit(c) => (ORconst [c] x)
   575  
   576  // Simplify consts
   577  (ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
   578  (ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
   579  (XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
   580  (ANDconst [-1] x) => x
   581  (ANDconst [0] _) => (MOVDconst [0])
   582  (XORconst [0] x) => x
   583  (ORconst [-1] _) => (MOVDconst [-1])
   584  (ORconst [0] x) => x
   585  
   586  // zero-extend of small and => small and
   587  (MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF => y
   588  (MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y
   589  (MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF => y
   590  (MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF => y
   591  
   592  // sign extend of small-positive and => small-positive-and
   593  (MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F => y
   594  (MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF => y
   595  (MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
   596  (MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF => y
   597  
   598  // small and of zero-extend => either zero-extend or small and
   599  (ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF => y
   600  (ANDconst [0xFF] (MOVBreg x)) => (MOVBZreg x)
   601  (ANDconst [c] y:(MOVHZreg _))  && c&0xFFFF == 0xFFFF => y
   602  (ANDconst [0xFFFF] (MOVHreg x)) => (MOVHZreg x)
   603  
   604  (AND (MOVDconst [c]) y:(MOVWZreg _))  && c&0xFFFFFFFF == 0xFFFFFFFF => y
   605  (AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) => (MOVWZreg x)
   606  // normal case
   607  (ANDconst [c] (MOVBZreg x)) => (ANDconst [c&0xFF] x)
   608  (ANDconst [c] (MOVHZreg x)) => (ANDconst [c&0xFFFF] x)
   609  (ANDconst [c] (MOVWZreg x)) => (ANDconst [c&0xFFFFFFFF] x)
   610  
   611  // Eliminate unnecessary sign/zero extend following right shift
   612  (MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) => (SRWconst [c] (MOVBZreg x))
   613  (MOV(H|W)Zreg (SRWconst [c] (MOVHZreg x))) => (SRWconst [c] (MOVHZreg x))
   614  (MOVWZreg (SRWconst [c] (MOVWZreg x))) => (SRWconst [c] (MOVWZreg x))
   615  (MOV(B|H|W)reg (SRAWconst [c] (MOVBreg x))) => (SRAWconst [c] (MOVBreg x))
   616  (MOV(H|W)reg (SRAWconst [c] (MOVHreg x))) => (SRAWconst [c] (MOVHreg x))
   617  (MOVWreg (SRAWconst [c] (MOVWreg x))) => (SRAWconst [c] (MOVWreg x))
   618  
   619  (MOV(WZ|W)reg (S(R|RA)Wconst [c] x)) && x.Type.Size() <= 32 => (S(R|RA)Wconst [c] x)
   620  (MOV(HZ|H)reg (S(R|RA)Wconst [c] x)) && x.Type.Size() <= 16 => (S(R|RA)Wconst [c] x)
   621  (MOV(BZ|B)reg (S(R|RA)Wconst [c] x)) && x.Type.Size() == 8 => (S(R|RA)Wconst [c] x)
   622  
   623  // initial right shift will handle sign/zero extend
   624  (MOVBZreg (SRDconst [c] x)) && c>=56 => (SRDconst [c] x)
   625  (MOVBreg (SRDconst [c] x)) && c>56 => (SRDconst [c] x)
   626  (MOVBreg (SRDconst [c] x)) && c==56 => (SRADconst [c] x)
   627  (MOVBreg (SRADconst [c] x)) && c>=56 => (SRADconst [c] x)
   628  (MOVBZreg (SRWconst [c] x)) && c>=24 => (SRWconst [c] x)
   629  (MOVBreg (SRWconst [c] x)) && c>24 => (SRWconst [c] x)
   630  (MOVBreg (SRWconst [c] x)) && c==24 => (SRAWconst [c] x)
   631  (MOVBreg (SRAWconst [c] x)) && c>=24 => (SRAWconst [c] x)
   632  
   633  (MOVHZreg (SRDconst [c] x)) && c>=48 => (SRDconst [c] x)
   634  (MOVHreg (SRDconst [c] x)) && c>48 => (SRDconst [c] x)
   635  (MOVHreg (SRDconst [c] x)) && c==48 => (SRADconst [c] x)
   636  (MOVHreg (SRADconst [c] x)) && c>=48 => (SRADconst [c] x)
   637  (MOVHZreg (SRWconst [c] x)) && c>=16 => (SRWconst [c] x)
   638  (MOVHreg (SRWconst [c] x)) && c>16 => (SRWconst [c] x)
   639  (MOVHreg (SRAWconst [c] x)) && c>=16 => (SRAWconst [c] x)
   640  (MOVHreg (SRWconst [c] x)) && c==16 => (SRAWconst [c] x)
   641  
   642  (MOVWZreg (SRDconst [c] x)) && c>=32 => (SRDconst [c] x)
   643  (MOVWreg (SRDconst [c] x)) && c>32 => (SRDconst [c] x)
   644  (MOVWreg (SRADconst [c] x)) && c>=32 => (SRADconst [c] x)
   645  (MOVWreg (SRDconst [c] x)) && c==32 => (SRADconst [c] x)
   646  
   647  // Various redundant zero/sign extension combinations.
   648  (MOVBZreg y:(MOVBZreg _)) => y  // repeat
   649  (MOVBreg y:(MOVBreg _)) => y // repeat
   650  (MOVBreg (MOVBZreg x)) => (MOVBreg x)
   651  (MOVBZreg (MOVBreg x)) => (MOVBZreg x)
   652  
   653  // Catch any remaining rotate+shift cases
   654  (MOVBZreg (SRWconst x [s])) && mergePPC64AndSrwi(0xFF,s) != 0 => (RLWINM [mergePPC64AndSrwi(0xFF,s)] x)
   655  (MOVBZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFF,r)] y)
   656  (MOVHZreg (RLWINM [r] y)) && mergePPC64AndRlwinm(0xFFFF,r) != 0 => (RLWINM [mergePPC64AndRlwinm(0xFFFF,r)] y)
   657  (MOVWZreg (RLWINM [r] y)) && mergePPC64MovwzregRlwinm(r) != 0 => (RLWINM [mergePPC64MovwzregRlwinm(r)] y)
   658  (ANDconst [m] (RLWINM [r] y)) && mergePPC64AndRlwinm(uint32(m),r) != 0 => (RLWINM [mergePPC64AndRlwinm(uint32(m),r)] y)
   659  (SLDconst [s] (RLWINM [r] y)) && mergePPC64SldiRlwinm(s,r) != 0 => (RLWINM [mergePPC64SldiRlwinm(s,r)] y)
   660  (RLWINM [r] (MOVHZreg u)) && mergePPC64RlwinmAnd(r,0xFFFF) != 0 => (RLWINM [mergePPC64RlwinmAnd(r,0xFFFF)] u)
   661  (RLWINM [r] (ANDconst [a] u)) && mergePPC64RlwinmAnd(r,uint32(a)) != 0 => (RLWINM [mergePPC64RlwinmAnd(r,uint32(a))] u)
   662  // SLWconst is a special case of RLWNM which always zero-extends the result.
   663  (SLWconst [s] (MOVWZreg w)) => (SLWconst [s] w)
   664  (MOVWZreg w:(SLWconst u)) => w
   665  
   666  // H - there are more combinations than these
   667  
   668  (MOVHZreg y:(MOV(H|B)Zreg _)) => y // repeat
   669  (MOVHZreg y:(MOVHBRload _ _)) => y
   670  
   671  (MOVHreg y:(MOV(H|B)reg _)) => y // repeat
   672  
   673  (MOV(H|HZ)reg y:(MOV(HZ|H)reg x)) => (MOV(H|HZ)reg x)
   674  
   675  // W - there are more combinations than these
   676  
   677  (MOV(WZ|WZ|WZ|W|W|W)reg y:(MOV(WZ|HZ|BZ|W|H|B)reg _)) => y // repeat
   678  (MOVWZreg y:(MOV(H|W)BRload _ _)) => y
   679  
   680  (MOV(W|WZ)reg y:(MOV(WZ|W)reg x)) => (MOV(W|WZ)reg x)
   681  
   682  // Truncate then logical then truncate: omit first, lesser or equal truncate
   683  (MOVWZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVWZreg ((OR|XOR|AND) <t> x y))
   684  (MOVHZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVHZreg ((OR|XOR|AND) <t> x y))
   685  (MOVHZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVHZreg ((OR|XOR|AND) <t> x y))
   686  (MOVBZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
   687  (MOVBZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
   688  (MOVBZreg ((OR|XOR|AND) <t> x (MOVBZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
   689  
   690  (MOV(B|H|W)Zreg z:(ANDconst [c] (MOVBZload ptr x))) => z
   691  (MOV(B|H|W)Zreg z:(AND y (MOV(B|H|W)Zload ptr x))) => z
   692  (MOV(H|W)Zreg z:(ANDconst [c] (MOVHZload ptr x))) => z
   693  (MOVWZreg z:(ANDconst [c] (MOVWZload ptr x))) => z
   694  
   695  // Arithmetic constant ops
   696  
   697  (ADD x (MOVDconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDconst [c] x)
   698  (ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) => (ADDconst [c+d] x)
   699  (ADDconst [0] x) => x
   700  (SUB x (MOVDconst [c])) && is32Bit(-c) => (ADDconst [-c] x)
   701  
   702  (ADDconst [c] (MOVDaddr [d] {sym} x)) && is32Bit(c+int64(d)) => (MOVDaddr [int32(c+int64(d))] {sym} x)
   703  (ADDconst [c] x:(SP)) && is32Bit(c) => (MOVDaddr [int32(c)] x) // so it is rematerializeable
   704  
   705  (MULL(W|D) x (MOVDconst [c])) && is16Bit(c) => (MULL(W|D)const [int32(c)] x)
   706  
   707  // Subtract from (with carry, but ignored) constant.
   708  // Note, these clobber the carry bit.
   709  (SUB (MOVDconst [c]) x) && is32Bit(c) => (SUBFCconst [c] x)
   710  (SUBFCconst [c] (NEG x)) => (ADDconst [c] x)
   711  (SUBFCconst [c] (SUBFCconst [d] x)) && is32Bit(c-d) => (ADDconst [c-d] x)
   712  (SUBFCconst [0] x) => (NEG x)
   713  (ADDconst [c] (SUBFCconst [d] x)) && is32Bit(c+d) => (SUBFCconst [c+d] x)
   714  (NEG (ADDconst [c] x)) && is32Bit(-c) => (SUBFCconst [-c] x)
   715  (NEG (SUBFCconst [c] x)) && is32Bit(-c) => (ADDconst [-c] x)
   716  (NEG (SUB x y)) => (SUB y x)
   717  (NEG (NEG x)) => x
   718  
   719  // Use register moves instead of stores and loads to move int<=>float values
   720  // Common with math Float64bits, Float64frombits
   721  (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) => (MFVSRD x)
   722  (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) => (MTVSRD x)
   723  
   724  (FMOVDstore [off] {sym} ptr (MTVSRD x) mem) => (MOVDstore [off] {sym} ptr x mem)
   725  (MOVDstore [off] {sym} ptr (MFVSRD x) mem) => (FMOVDstore [off] {sym} ptr x mem)
   726  
   727  (MTVSRD (MOVDconst [c])) && !math.IsNaN(math.Float64frombits(uint64(c))) => (FMOVDconst [math.Float64frombits(uint64(c))])
   728  (MFVSRD (FMOVDconst [c])) => (MOVDconst [int64(math.Float64bits(c))])
   729  
   730  (MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (FMOVDload [off] {sym} ptr mem)
   731  (MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVDload [off] {sym} ptr mem)
   732  
   733  // Rules for MOV* or FMOV* ops determine when indexed (MOV*loadidx or MOV*storeidx)
   734  // or non-indexed (MOV*load or MOV*store) should be used. Indexed instructions
   735  // require an extra instruction and register to load the index so non-indexed is preferred.
   736  // Indexed ops generate indexed load or store instructions for all GOPPC64 values.
   737  // Non-indexed ops generate DS-form loads and stores when the offset fits in 16 bits,
   738  // and on power8 and power9, a multiple of 4 is required for MOVW and MOVD ops.
   739  // On power10, prefixed loads and stores can be used for offsets > 16 bits and <= 32 bits.
   740  // and support for PC relative addressing must be available if relocation is needed.
   741  // On power10, the assembler will determine when to use DS-form or prefixed
   742  // instructions for non-indexed ops depending on the value of the offset.
   743  //
   744  // Fold offsets for stores.
   745  (MOV(D|W|H|B)store [off1] {sym} (ADDconst [off2] x) val mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (MOV(D|W|H|B)store [off1+int32(off2)] {sym} x val mem)
   746  
   747  (FMOV(S|D)store [off1] {sym} (ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (FMOV(S|D)store [off1+int32(off2)] {sym} ptr val mem)
   748  
   749  // Fold address into load/store.
   750  // If power10 with PCRel is not available, then
   751  // the assembler needs to generate several instructions and use
   752  // temp register for accessing global, and each time it will reload
   753  // the temp register. So don't fold address of global in that case if there is more than
   754  // one use.
   755  (MOV(B|H|W|D)store [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   756  	&& ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
   757          (MOV(B|H|W|D)store [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   758  
   759  (FMOV(S|D)store [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   760  	&& ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
   761          (FMOV(S|D)store [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   762  
   763  (MOV(B|H|W)Zload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   764  	&& ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
   765          (MOV(B|H|W)Zload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   766  (MOV(H|W|D)load [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   767  	&& ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
   768          (MOV(H|W|D)load [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   769  (FMOV(S|D)load [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   770  	&& ((is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
   771          (FMOV(S|D)load [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   772  
   773  // Fold offsets for loads.
   774  (FMOV(S|D)load [off1] {sym} (ADDconst [off2] ptr) mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (FMOV(S|D)load [off1+int32(off2)] {sym} ptr mem)
   775  
   776  (MOV(D|W|WZ|H|HZ|BZ)load [off1] {sym} (ADDconst [off2] x) mem) && (is16Bit(int64(off1)+off2) || (supportsPPC64PCRel() && is32Bit(int64(off1)+off2))) => (MOV(D|W|WZ|H|HZ|BZ)load [off1+int32(off2)] {sym} x mem)
   777  
   778  // Determine load + addressing that can be done as a register indexed load
   779  (MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 => (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem)
   780  
   781  // See comments above concerning selection of indexed vs. non-indexed ops.
   782  // These cases don't have relocation.
   783  (MOV(D|W)loadidx ptr (MOVDconst [c]) mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(D|W)load [int32(c)] ptr mem)
   784  (MOV(WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
   785  (MOV(D|W)loadidx (MOVDconst [c]) ptr mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(D|W)load [int32(c)] ptr mem)
   786  (MOV(WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
   787  
   788  // Store of zero => storezero
   789  (MOV(D|W|H|B)store [off] {sym} ptr (MOVDconst [0]) mem) => (MOV(D|W|H|B)storezero [off] {sym} ptr mem)
   790  
   791  // Fold offsets for storezero
   792  (MOV(D|W|H|B)storezero [off1] {sym} (ADDconst [off2] x) mem) && ((supportsPPC64PCRel() && is32Bit(int64(off1)+off2)) || (is16Bit(int64(off1)+off2))) =>
   793      (MOV(D|W|H|B)storezero [off1+int32(off2)] {sym} x mem)
   794  
   795  // Stores with addressing that can be done as indexed stores
   796  (MOV(D|W|H|B)store [0] {sym} p:(ADD ptr idx) val mem) && sym == nil && p.Uses == 1 => (MOV(D|W|H|B)storeidx ptr idx val mem)
   797  
   798  (MOVDstoreidx ptr (MOVDconst [c]) val mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOVDstore [int32(c)] ptr val mem)
   799  (MOV(W|H|B)storeidx ptr (MOVDconst [c]) val mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(W|H|B)store [int32(c)] ptr val mem)
   800  (MOVDstoreidx (MOVDconst [c]) ptr val mem) && ((is16Bit(c) && c%4 == 0) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOVDstore [int32(c)] ptr val mem)
   801  (MOV(W|H|B)storeidx (MOVDconst [c]) ptr val mem) && (is16Bit(c) || (buildcfg.GOPPC64 >= 10 && is32Bit(c))) => (MOV(W|H|B)store [int32(c)] ptr val mem)
   802  
   803  // Fold symbols into storezero
   804  (MOV(D|W|H|B)storezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
   805  	&& ((is16Bit(int64(off1+off2)) && (x.Op != OpSB || p.Uses == 1)) || (supportsPPC64PCRel() && is32Bit(int64(off1+off2)))) =>
   806      (MOV(D|W|H|B)storezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
   807  
   808  // atomic intrinsics
   809  (AtomicLoad(8|32|64|Ptr)  ptr mem) => (LoweredAtomicLoad(8|32|64|Ptr) [1] ptr mem)
   810  (AtomicLoadAcq(32|64)     ptr mem) => (LoweredAtomicLoad(32|64) [0] ptr mem)
   811  
   812  (AtomicStore(8|32|64)    ptr val mem) => (LoweredAtomicStore(8|32|64) [1] ptr val mem)
   813  (AtomicStoreRel(32|64)   ptr val mem) => (LoweredAtomicStore(32|64) [0] ptr val mem)
   814  
   815  (AtomicExchange(8|32|64) ...) => (LoweredAtomicExchange(8|32|64) ...)
   816  
   817  (AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
   818  
   819  (AtomicCompareAndSwap(32|64) ptr old new_ mem) => (LoweredAtomicCas(32|64) [1] ptr old new_ mem)
   820  (AtomicCompareAndSwapRel32   ptr old new_ mem) => (LoweredAtomicCas32 [0] ptr old new_ mem)
   821  
   822  (AtomicAnd(8|32)  ...) => (LoweredAtomicAnd(8|32)  ...)
   823  (AtomicOr(8|32)   ...) => (LoweredAtomicOr(8|32)   ...)
   824  
   825  (Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
   826  (ANDconst [1] z:(SRADconst [63] x)) && z.Uses == 1  => (SRDconst [63] x)
   827  
   828  // Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
   829  // This may interact with other patterns in the future. (Compare with arm64)
   830  (MOV(B|H|W)Zreg x:(MOVBZload _ _)) => x
   831  (MOV(B|H|W)Zreg x:(MOVBZloadidx _ _ _)) => x
   832  (MOV(H|W)Zreg x:(MOVHZload _ _)) => x
   833  (MOV(H|W)Zreg x:(MOVHZloadidx _ _ _)) => x
   834  (MOV(H|W)reg x:(MOVHload _ _)) => x
   835  (MOV(H|W)reg x:(MOVHloadidx _ _ _)) => x
   836  (MOV(WZ|W)reg x:(MOV(WZ|W)load _ _)) => x
   837  (MOV(WZ|W)reg x:(MOV(WZ|W)loadidx _ _ _)) => x
   838  (MOV(B|W)Zreg x:(Select0 (LoweredAtomicLoad(8|32) _ _))) => x
   839  
   840  // don't extend if argument is already extended
   841  (MOVBreg x:(Arg <t>)) && is8BitInt(t) && t.IsSigned() => x
   842  (MOVBZreg x:(Arg <t>)) && is8BitInt(t) && !t.IsSigned() => x
   843  (MOVHreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && t.IsSigned() => x
   844  (MOVHZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && !t.IsSigned() => x
   845  (MOVWreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && t.IsSigned() => x
   846  (MOVWZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !t.IsSigned() => x
   847  
   848  (MOVBZreg (MOVDconst [c]))  => (MOVDconst [int64(uint8(c))])
   849  (MOVBreg (MOVDconst [c]))  => (MOVDconst [int64(int8(c))])
   850  (MOVHZreg (MOVDconst [c]))  => (MOVDconst [int64(uint16(c))])
   851  (MOVHreg (MOVDconst [c]))  => (MOVDconst [int64(int16(c))])
   852  (MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
   853  (MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
   854  
   855  // Implement clrsldi and clrslwi extended mnemonics as described in
   856  // ISA 3.0 section C.8. AuxInt field contains values needed for
   857  // the instructions, packed together since there is only one available.
   858  (SLDconst [c] z:(MOVBZreg x)) && c < 8 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x)
   859  (SLDconst [c] z:(MOVHZreg x)) && c < 16 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x)
   860  (SLDconst [c] z:(MOVWZreg x)) && c < 32 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x)
   861  
   862  (SLDconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
   863  (SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
   864  (SLWconst [c] z:(MOVBZreg x)) && z.Uses == 1 && c < 8 => (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x)
   865  (SLWconst [c] z:(MOVHZreg x)) && z.Uses == 1 && c < 16 => (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x)
   866  (SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
   867  (SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
   868  // special case for power9
   869  (SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && buildcfg.GOPPC64 >= 9 => (EXTSWSLconst [c] x)
   870  
   871  // Lose widening ops fed to stores
   872  (MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   873  (MOVHstore [off] {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstore [off] {sym} ptr x mem)
   874  (MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) => (MOVWstore [off] {sym} ptr x mem)
   875  (MOVBstore [off] {sym} ptr (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
   876  (MOVBstore [off] {sym} ptr (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
   877  (MOVBstoreidx ptr idx (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstoreidx ptr idx x mem)
   878  (MOVHstoreidx ptr idx (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstoreidx ptr idx x mem)
   879  (MOVWstoreidx ptr idx (MOV(W|WZ)reg x) mem) => (MOVWstoreidx ptr idx x mem)
   880  (MOVBstoreidx ptr idx (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
   881  (MOVBstoreidx ptr idx (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
   882  (MOVHBRstore ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHBRstore ptr x mem)
   883  (MOVWBRstore ptr (MOV(W|WZ)reg x) mem) => (MOVWBRstore ptr x mem)
   884  
   885  // Lose W-widening ops fed to compare-W
   886  (CMP(W|WU) x (MOV(W|WZ)reg y)) => (CMP(W|WU) x y)
   887  (CMP(W|WU) (MOV(W|WZ)reg x) y) => (CMP(W|WU) x y)
   888  
   889  (CMP x (MOVDconst [c])) && is16Bit(c) => (CMPconst x [c])
   890  (CMP (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPconst y [c]))
   891  (CMPW x (MOVDconst [c])) && is16Bit(c) => (CMPWconst x [int32(c)])
   892  (CMPW (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPWconst y [int32(c)]))
   893  
   894  (CMPU x (MOVDconst [c])) && isU16Bit(c) => (CMPUconst x [c])
   895  (CMPU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPUconst y [c]))
   896  (CMPWU x (MOVDconst [c])) && isU16Bit(c) => (CMPWUconst x [int32(c)])
   897  (CMPWU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPWUconst y [int32(c)]))
   898  
   899  // Canonicalize the order of arguments to comparisons - helps with CSE.
   900  ((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
   901  
   902  // n is always a zero-extended uint16 value, so n & z is always a non-negative 32 or 64 bit value.
   903  // Rewrite to a cmp int64(0) to lower into ANDCCconst in the latelower pass.
   904  (CMP(W|U|WU)const [0] a:(ANDconst [n] z)) => (CMPconst [0] a)
   905  
   906  // SETBC auxInt values 0=LT 1=GT 2=EQ   Crbit==1 ? 1 : 0
   907  // SETBCR auxInt values 0=LT 1=GT 2=EQ   Crbit==1 ? 0 : 1
   908  (Equal cmp) => (SETBC [2] cmp)
   909  (NotEqual cmp) => (SETBCR [2] cmp)
   910  (LessThan cmp) => (SETBC [0] cmp)
   911  (FLessThan cmp) => (SETBC [0] cmp)
   912  (FLessEqual cmp) => (OR (SETBC [2] cmp) (SETBC [0] cmp))
   913  (GreaterEqual cmp) => (SETBCR [0] cmp)
   914  (GreaterThan cmp)  => (SETBC [1] cmp)
   915  (FGreaterEqual cmp) => (OR (SETBC [2] cmp) (SETBC [1] cmp))
   916  (FGreaterThan cmp)  => (SETBC [1] cmp)
   917  (LessEqual cmp) => (SETBCR [1] cmp)
   918  
   919  (SETBC [0] (FlagLT)) => (MOVDconst [1])
   920  (SETBC [0] (Flag(GT|EQ))) => (MOVDconst [0])
   921  (SETBC [1] (FlagGT)) => (MOVDconst [1])
   922  (SETBC [1] (Flag(LT|EQ))) => (MOVDconst [0])
   923  (SETBC [2] (FlagEQ)) => (MOVDconst [1])
   924  (SETBC [2] (Flag(LT|GT))) => (MOVDconst [0])
   925  
   926  (SETBCR [0] (FlagLT)) => (MOVDconst [0])
   927  (SETBCR [0] (Flag(GT|EQ))) => (MOVDconst [1])
   928  (SETBCR [1] (FlagGT)) => (MOVDconst [0])
   929  (SETBCR [1] (Flag(LT|EQ))) => (MOVDconst [1])
   930  (SETBCR [2] (FlagEQ)) => (MOVDconst [0])
   931  (SETBCR [2] (Flag(LT|GT))) => (MOVDconst [1])
   932  
   933  (SETBC [0] (InvertFlags bool)) => (SETBC [1] bool)
   934  (SETBC [1] (InvertFlags bool)) => (SETBC [0] bool)
   935  (SETBC [2] (InvertFlags bool)) => (SETBC [2] bool)
   936  
   937  (SETBCR [0] (InvertFlags bool)) => (SETBCR [1] bool)
   938  (SETBCR [1] (InvertFlags bool)) => (SETBCR [0] bool)
   939  (SETBCR [2] (InvertFlags bool)) => (SETBCR [2] bool)
   940  
   941  // ISEL auxInt values 0=LT 1=GT 2=EQ   arg2 ? arg0 : arg1
   942  // ISEL auxInt values 4=GE 5=LE 6=NE   !arg2 ? arg1 : arg0
   943  
   944  (ISEL [2] x _ (FlagEQ)) => x
   945  (ISEL [2] _ y (Flag(LT|GT))) => y
   946  
   947  (ISEL [6] _ y (FlagEQ)) => y
   948  (ISEL [6] x _ (Flag(LT|GT))) => x
   949  
   950  (ISEL [0] _ y (Flag(EQ|GT))) => y
   951  (ISEL [0] x _ (FlagLT)) => x
   952  
   953  (ISEL [5] _ x (Flag(EQ|LT))) => x
   954  (ISEL [5] y _ (FlagGT)) => y
   955  
   956  (ISEL [1] _ y (Flag(EQ|LT))) => y
   957  (ISEL [1] x _ (FlagGT)) => x
   958  
   959  (ISEL [4] x _ (Flag(EQ|GT))) => x
   960  (ISEL [4] _ y (FlagLT)) => y
   961  
   962  (SETBC [n] (InvertFlags bool)) => (SETBCR [n] bool)
   963  (SETBCR [n] (InvertFlags bool)) => (SETBC [n] bool)
   964  
   965  (ISEL [n] x y (InvertFlags bool)) && n%4 == 0 => (ISEL [n+1] x y bool)
   966  (ISEL [n] x y (InvertFlags bool)) && n%4 == 1 => (ISEL [n-1] x y bool)
   967  (ISEL [n] x y (InvertFlags bool)) && n%4 == 2 => (ISEL [n] x y bool)
   968  (XORconst [1] (SETBCR [n] cmp)) => (SETBC [n] cmp)
   969  (XORconst [1] (SETBC [n] cmp)) => (SETBCR [n] cmp)
   970  
   971  (SETBC  [2] (CMPconst [0] a:(ANDconst [1] _))) => (XORconst [1] a)
   972  (SETBCR [2] (CMPconst [0] a:(ANDconst [1] _))) => a
   973  
   974  // Only CMPconst for these in case AND|OR|XOR result is > 32 bits
   975  (SETBC [2] (CMPconst [0] a:(AND y z))) && a.Uses == 1 => (SETBC [2] (Select1 <types.TypeFlags> (ANDCC y z )))
   976  (SETBCR [2] (CMPconst [0] a:(AND y z))) && a.Uses == 1 => (SETBCR [2] (Select1 <types.TypeFlags> (ANDCC y z )))
   977  
   978  (SETBC [2] (CMPconst [0] o:(OR y z))) && o.Uses == 1 => (SETBC [2] (Select1 <types.TypeFlags> (ORCC y z )))
   979  (SETBCR [2] (CMPconst [0] o:(OR y z))) && o.Uses == 1 => (SETBCR [2] (Select1 <types.TypeFlags> (ORCC y z )))
   980  
   981  (SETBC [2] (CMPconst [0] a:(XOR y z))) && a.Uses == 1 => (SETBC [2] (Select1 <types.TypeFlags> (XORCC y z )))
   982  (SETBCR [2] (CMPconst [0] a:(XOR y z))) && a.Uses == 1 => (SETBCR [2] (Select1 <types.TypeFlags> (XORCC y z )))
   983  
   984  // A particular pattern seen in cgo code:
   985  (AND (MOVDconst [c]) x:(MOVBZload _ _)) => (ANDconst [c&0xFF] x)
   986  
   987  // floating point negative abs
   988  (FNEG (F(ABS|NABS) x)) => (F(NABS|ABS) x)
   989  
   990  // floating-point fused multiply-add/sub
   991  (F(ADD|SUB) (FMUL x y) z) && x.Block.Func.useFMA(v) => (FM(ADD|SUB) x y z)
   992  (F(ADDS|SUBS) (FMULS x y) z) && x.Block.Func.useFMA(v) => (FM(ADDS|SUBS) x y z)
   993  
   994  // Arch-specific inlining for small or disjoint runtime.memmove
   995  (SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore  _ src s3:(MOVDstore {t} _ dst mem)))))
   996          && sz >= 0
   997          && isSameCall(sym, "runtime.memmove")
   998          && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
   999          && isInlinableMemmove(dst, src, sz, config)
  1000          && clobber(s1, s2, s3, call)
  1001          => (Move [sz] dst src mem)
  1002  
  1003  // Match post-lowering calls, register version.
  1004  (SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
  1005          && sz >= 0
  1006          && isSameCall(sym, "runtime.memmove")
  1007          && call.Uses == 1
  1008          && isInlinableMemmove(dst, src, sz, config)
  1009          && clobber(call)
  1010          => (Move [sz] dst src mem)
  1011  
  1012  // Prefetch instructions (TH specified using aux field)
  1013  // For DCBT Ra,Rb,TH, A value of TH indicates:
  1014  //     0, hint this cache line will be used soon. (PrefetchCache)
  1015  //     16, hint this cache line will not be used for long. (PrefetchCacheStreamed)
  1016  // See ISA 3.0 Book II 4.3.2 for more detail. https://openpower.foundation/specifications/isa/
  1017  (PrefetchCache ptr mem)          => (DCBT ptr mem [0])
  1018  (PrefetchCacheStreamed ptr mem)  => (DCBT ptr mem [16])
  1019  
  1020  // Use byte reverse instructions on Power10
  1021  (Bswap(16|32|64) x) && buildcfg.GOPPC64>=10 => (BR(H|W|D) x)
  1022  
  1023  // Fold bit reversal into loads.
  1024  (BR(W|H) x:(MOV(W|H)Zload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOV(W|H)BRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
  1025  (BR(W|H) x:(MOV(W|H)Zloadidx ptr idx      mem)) && x.Uses == 1 => @x.Block (MOV(W|H)BRloadidx ptr idx mem)
  1026  (BRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 => @x.Block (MOVDBRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
  1027  (BRD x:(MOVDloadidx ptr idx      mem)) && x.Uses == 1 => @x.Block (MOVDBRloadidx ptr idx mem)
  1028  
  1029  // Fold bit reversal into stores.
  1030  (MOV(D|W|H)store [off] {sym} ptr r:(BR(D|W|H) val) mem) && r.Uses == 1 => (MOV(D|W|H)BRstore (MOVDaddr <ptr.Type> [off] {sym} ptr) val mem)
  1031  (MOV(D|W|H)storeidx ptr idx      r:(BR(D|W|H) val) mem) && r.Uses == 1 => (MOV(D|W|H)BRstoreidx ptr idx val mem)
  1032  
  1033  // GOPPC64<10 rules.
  1034  // These Bswap operations should only be introduced by the memcombine pass in places where they can be folded into loads or stores.
  1035  (Bswap(32|16) x:(MOV(W|H)Zload [off] {sym} ptr mem)) => @x.Block (MOV(W|H)BRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
  1036  (Bswap(32|16) x:(MOV(W|H)Zloadidx ptr idx      mem)) => @x.Block (MOV(W|H)BRloadidx ptr idx mem)
  1037  (Bswap64 x:(MOVDload [off] {sym} ptr mem)) => @x.Block (MOVDBRload (MOVDaddr <ptr.Type> [off] {sym} ptr) mem)
  1038  (Bswap64 x:(MOVDloadidx ptr idx      mem)) => @x.Block (MOVDBRloadidx ptr idx mem)
  1039  (MOV(D|W|H)store [off] {sym} ptr (Bswap(64|32|16) val) mem) => (MOV(D|W|H)BRstore (MOVDaddr <ptr.Type> [off] {sym} ptr) val mem)
  1040  (MOV(D|W|H)storeidx ptr idx      (Bswap(64|32|16) val) mem) => (MOV(D|W|H)BRstoreidx ptr idx val mem)
  1041  

View as plain text