Text file src/cmd/compile/internal/ssa/_gen/RISCV64.rules

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Lowering arithmetic
     6  (Add(Ptr|64|32|16|8) ...) => (ADD ...)
     7  (Add(64|32)F ...) => (FADD(D|S) ...)
     8  
     9  (Sub(Ptr|64|32|16|8) ...) => (SUB ...)
    10  (Sub(64|32)F ...) => (FSUB(D|S) ...)
    11  
    12  (Mul64 ...) => (MUL  ...)
    13  (Mul64uhilo ...) => (LoweredMuluhilo ...)
    14  (Mul64uover ...) => (LoweredMuluover ...)
    15  (Mul(32|16|8) ...) => (MULW ...)
    16  (Mul(64|32)F ...) => (FMUL(D|S) ...)
    17  
    18  (Div(64|32)F ...) => (FDIV(D|S) ...)
    19  
    20  (Div64 x y [false])  => (DIV x y)
    21  (Div64u ...) => (DIVU ...)
    22  (Div32 x y [false])  => (DIVW x y)
    23  (Div32u ...) => (DIVUW ...)
    24  (Div16 x y [false])  => (DIVW  (SignExt16to32 x) (SignExt16to32 y))
    25  (Div16u x y) => (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
    26  (Div8 x y)   => (DIVW  (SignExt8to32 x)  (SignExt8to32 y))
    27  (Div8u x y)  => (DIVUW (ZeroExt8to32 x)  (ZeroExt8to32 y))
    28  
    29  (Hmul64 ...)  => (MULH  ...)
    30  (Hmul64u ...) => (MULHU ...)
    31  (Hmul32 x y)  => (SRAI [32] (MUL  (SignExt32to64 x) (SignExt32to64 y)))
    32  (Hmul32u x y) => (SRLI [32] (MUL  (ZeroExt32to64 x) (ZeroExt32to64 y)))
    33  
    34  (Select0 (Add64carry x y c)) => (ADD (ADD <typ.UInt64> x y) c)
    35  (Select1 (Add64carry x y c)) =>
    36  	(OR (SLTU <typ.UInt64> s:(ADD <typ.UInt64> x y) x) (SLTU <typ.UInt64> (ADD <typ.UInt64> s c) s))
    37  
    38  (Select0 (Sub64borrow x y c)) => (SUB (SUB <typ.UInt64> x y) c)
    39  (Select1 (Sub64borrow x y c)) =>
    40  	(OR (SLTU <typ.UInt64> x s:(SUB <typ.UInt64> x y)) (SLTU <typ.UInt64> s (SUB <typ.UInt64> s c)))
    41  
    42  // (x + y) / 2 => (x / 2) + (y / 2) + (x & y & 1)
    43  (Avg64u <t> x y) => (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
    44  
    45  (Mod64 x y [false])  => (REM x y)
    46  (Mod64u ...) => (REMU  ...)
    47  (Mod32 x y [false])  => (REMW x y)
    48  (Mod32u ...) => (REMUW ...)
    49  (Mod16 x y [false])  => (REMW  (SignExt16to32 x) (SignExt16to32 y))
    50  (Mod16u x y) => (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
    51  (Mod8 x y)   => (REMW  (SignExt8to32  x) (SignExt8to32  y))
    52  (Mod8u x y)  => (REMUW (ZeroExt8to32  x) (ZeroExt8to32  y))
    53  
    54  (And(64|32|16|8) ...) => (AND ...)
    55  (Or(64|32|16|8) ...) => (OR ...)
    56  (Xor(64|32|16|8) ...) => (XOR ...)
    57  
    58  (Neg(64|32|16|8) ...) => (NEG ...)
    59  (Neg(64|32)F ...) => (FNEG(D|S) ...)
    60  
    61  (Com(64|32|16|8) ...) => (NOT ...)
    62  
    63  (Sqrt ...) => (FSQRTD ...)
    64  (Sqrt32 ...) => (FSQRTS ...)
    65  
    66  (Copysign ...) => (FSGNJD ...)
    67  
    68  (Abs ...) => (FABSD ...)
    69  
    70  (FMA ...) => (FMADDD ...)
    71  
    72  (Min(64|32)F ...) => (LoweredFMIN(D|S) ...)
    73  (Max(64|32)F ...) => (LoweredFMAX(D|S) ...)
    74  
    75  // Sign and zero extension.
    76  
    77  (SignExt8to16  ...) => (MOVBreg ...)
    78  (SignExt8to32  ...) => (MOVBreg ...)
    79  (SignExt8to64  ...) => (MOVBreg ...)
    80  (SignExt16to32 ...) => (MOVHreg ...)
    81  (SignExt16to64 ...) => (MOVHreg ...)
    82  (SignExt32to64 ...) => (MOVWreg ...)
    83  
    84  (ZeroExt8to16  ...) => (MOVBUreg ...)
    85  (ZeroExt8to32  ...) => (MOVBUreg ...)
    86  (ZeroExt8to64  ...) => (MOVBUreg ...)
    87  (ZeroExt16to32 ...) => (MOVHUreg ...)
    88  (ZeroExt16to64 ...) => (MOVHUreg ...)
    89  (ZeroExt32to64 ...) => (MOVWUreg ...)
    90  
    91  (Cvt32to32F ...) => (FCVTSW ...)
    92  (Cvt32to64F ...) => (FCVTDW ...)
    93  (Cvt64to32F ...) => (FCVTSL ...)
    94  (Cvt64to64F ...) => (FCVTDL ...)
    95  
    96  (Cvt32Fto32 ...) => (FCVTWS ...)
    97  (Cvt32Fto64 ...) => (FCVTLS ...)
    98  (Cvt64Fto32 ...) => (FCVTWD ...)
    99  (Cvt64Fto64 ...) => (FCVTLD ...)
   100  
   101  (Cvt32Fto64F ...) => (FCVTDS ...)
   102  (Cvt64Fto32F ...) => (FCVTSD ...)
   103  
   104  (CvtBoolToUint8 ...) => (Copy ...)
   105  
   106  (Round(32|64)F ...) => (LoweredRound(32|64)F ...)
   107  
   108  (Slicemask <t> x) => (SRAI [63] (NEG <t> x))
   109  
   110  // Truncations
   111  // We ignore the unused high parts of registers, so truncates are just copies.
   112  (Trunc16to8  ...) => (Copy ...)
   113  (Trunc32to8  ...) => (Copy ...)
   114  (Trunc32to16 ...) => (Copy ...)
   115  (Trunc64to8  ...) => (Copy ...)
   116  (Trunc64to16 ...) => (Copy ...)
   117  (Trunc64to32 ...) => (Copy ...)
   118  
   119  // Shifts
   120  
   121  // SLL only considers the bottom 6 bits of y. If y > 64, the result should
   122  // always be 0.
   123  //
   124  // Breaking down the operation:
   125  //
   126  // (SLL x y) generates x << (y & 63).
   127  //
   128  // If y < 64, this is the value we want. Otherwise, we want zero.
   129  //
   130  // So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise.
   131  (Lsh8x8   <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
   132  (Lsh8x16  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
   133  (Lsh8x32  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
   134  (Lsh8x64  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8  <t> (SLTIU <t> [64] y)))
   135  (Lsh16x8  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
   136  (Lsh16x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
   137  (Lsh16x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
   138  (Lsh16x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
   139  (Lsh32x8  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
   140  (Lsh32x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
   141  (Lsh32x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
   142  (Lsh32x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
   143  (Lsh64x8  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
   144  (Lsh64x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
   145  (Lsh64x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
   146  (Lsh64x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
   147  
   148  (Lsh8x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLL x y)
   149  (Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
   150  (Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
   151  (Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
   152  
   153  // SRL only considers the bottom 6 bits of y, similarly SRLW only considers the
   154  // bottom 5 bits of y. Ensure that the result is always zero if the shift exceeds
   155  // the maximum value. See Lsh above for a detailed description.
   156  (Rsh8Ux8   <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
   157  (Rsh8Ux16  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
   158  (Rsh8Ux32  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
   159  (Rsh8Ux64  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] y)))
   160  (Rsh16Ux8  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
   161  (Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
   162  (Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
   163  (Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
   164  (Rsh32Ux8  <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t>  x                y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt8to64  y))))
   165  (Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t>  x                y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt16to64 y))))
   166  (Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t>  x                y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt32to64 y))))
   167  (Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t>  x                y) (Neg32 <t> (SLTIU <t> [32] y)))
   168  (Rsh64Ux8  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t>  x                y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
   169  (Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t>  x                y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
   170  (Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t>  x                y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
   171  (Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t>  x                y) (Neg64 <t> (SLTIU <t> [64] y)))
   172  
   173  (Rsh8Ux(64|32|16|8)  x y) && shiftIsBounded(v) => (SRL  (ZeroExt8to64  x) y)
   174  (Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL  (ZeroExt16to64 x) y)
   175  (Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLW x                 y)
   176  (Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL  x                 y)
   177  
   178  // SRA only considers the bottom 6 bits of y, similarly SRAW only considers the
   179  // bottom 5 bits. If y is greater than the maximum value (either 63 or 31
   180  // depending on the instruction),  the result of the shift should be either 0
   181  // or -1 based on the sign bit of x.
   182  //
   183  // We implement this by performing the max shift (-1) if y > the maximum value.
   184  //
   185  // We OR (uint64(y < 64) - 1) into y before passing it to SRA. This leaves
   186  // us with -1 (0xffff...) if y >= 64.  Similarly, we OR (uint64(y < 32) - 1) into y
   187  // before passing it to SRAW.
   188  //
   189  // We don't need to sign-extend the OR result, as it will be at minimum 8 bits,
   190  // more than the 5 or 6 bits SRAW and SRA care about.
   191  (Rsh8x8   <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64  y)))))
   192  (Rsh8x16  <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
   193  (Rsh8x32  <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
   194  (Rsh8x64  <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
   195  (Rsh16x8  <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64  y)))))
   196  (Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
   197  (Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
   198  (Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
   199  (Rsh32x8  <t> x y) && !shiftIsBounded(v) => (SRAW <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt8to64  y)))))
   200  (Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt16to64 y)))))
   201  (Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt32to64 y)))))
   202  (Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] y))))
   203  (Rsh64x8  <t> x y) && !shiftIsBounded(v) => (SRA  <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64  y)))))
   204  (Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRA  <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
   205  (Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRA  <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
   206  (Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRA  <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
   207  
   208  (Rsh8x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRA  (SignExt8to64  x) y)
   209  (Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA  (SignExt16to64 x) y)
   210  (Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW  x                y)
   211  (Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA   x                y)
   212  
   213  // Rotates.
   214  (RotateLeft8  <t> x y) => (OR (SLL  <t> x (ANDI [7]  <y.Type> y)) (SRL <t> (ZeroExt8to64  x) (ANDI [7]  <y.Type> (NEG <y.Type> y))))
   215  (RotateLeft16 <t> x y) => (OR (SLL  <t> x (ANDI [15] <y.Type> y)) (SRL <t> (ZeroExt16to64 x) (ANDI [15] <y.Type> (NEG <y.Type> y))))
   216  (RotateLeft32 ...) => (ROLW ...)
   217  (RotateLeft64 ...) => (ROL  ...)
   218  
   219  // Count trailing zeros (note that these will only be emitted for rva22u64 and above).
   220  (Ctz(64|32|16|8)NonZero ...) => (Ctz64 ...)
   221  (Ctz64 ...) => (CTZ  ...)
   222  (Ctz32 ...) => (CTZW ...)
   223  (Ctz16 x) => (CTZW (ORI <typ.UInt32> [1<<16] x))
   224  (Ctz8  x) => (CTZW (ORI <typ.UInt32> [1<<8]  x))
   225  
   226  // Bit length (note that these will only be emitted for rva22u64 and above).
   227  (BitLen64 <t> x) => (SUB (MOVDconst [64]) (CLZ  <t> x))
   228  (BitLen32 <t> x) => (SUB (MOVDconst [32]) (CLZW <t> x))
   229  (BitLen16 x) => (BitLen64 (ZeroExt16to64 x))
   230  (BitLen8  x) => (BitLen64 (ZeroExt8to64 x))
   231  
   232  // Byte swap (note that these will only be emitted for rva22u64 and above).
   233  (Bswap64 ...) => (REV8 ...)
   234  (Bswap32 <t> x) => (SRLI [32] (REV8 <t> x))
   235  (Bswap16 <t> x) => (SRLI [48] (REV8 <t> x))
   236  
   237  // Population count (note that these will be emitted with guards for rva20u64).
   238  (PopCount64 ...) => (CPOP  ...)
   239  (PopCount32 ...) => (CPOPW ...)
   240  (PopCount16 x) => (CPOP (ZeroExt16to64 x))
   241  (PopCount8  x) => (CPOP (ZeroExt8to64  x))
   242  
   243  (Less64  ...) => (SLT  ...)
   244  (Less32  x y) => (SLT  (SignExt32to64 x) (SignExt32to64 y))
   245  (Less16  x y) => (SLT  (SignExt16to64 x) (SignExt16to64 y))
   246  (Less8   x y) => (SLT  (SignExt8to64  x) (SignExt8to64  y))
   247  (Less64U ...) => (SLTU ...)
   248  (Less32U x y) => (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
   249  (Less16U x y) => (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
   250  (Less8U  x y) => (SLTU (ZeroExt8to64  x) (ZeroExt8to64  y))
   251  (Less(64|32)F ...) => (FLT(D|S) ...)
   252  
   253  // Convert x <= y to !(y > x).
   254  (Leq(64|32|16|8)  x y) => (Not (Less(64|32|16|8)  y x))
   255  (Leq(64|32|16|8)U x y) => (Not (Less(64|32|16|8)U y x))
   256  (Leq(64|32)F ...) => (FLE(D|S) ...)
   257  
   258  (EqPtr x y) => (SEQZ (SUB <typ.Uintptr> x y))
   259  (Eq64  x y) => (SEQZ (SUB <x.Type> x y))
   260  (Eq32  x y) &&  x.Type.IsSigned() => (SEQZ (SUB <x.Type> (SignExt32to64 x) (SignExt32to64 y)))
   261  (Eq32  x y) && !x.Type.IsSigned() => (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
   262  (Eq16  x y) => (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
   263  (Eq8   x y) => (SEQZ (SUB <x.Type> (ZeroExt8to64  x) (ZeroExt8to64  y)))
   264  (Eq(64|32)F ...) => (FEQ(D|S) ...)
   265  
   266  (NeqPtr x y) => (Not (EqPtr x y))
   267  (Neq64  x y) => (Not (Eq64  x y))
   268  (Neq32  x y) => (Not (Eq32  x y))
   269  (Neq16  x y) => (Not (Eq16  x y))
   270  (Neq8   x y) => (Not (Eq8   x y))
   271  (Neq(64|32)F ...) => (FNE(D|S) ...)
   272  
   273  // Loads
   274  (Load <t> ptr mem) &&  t.IsBoolean()                   => (MOVBUload ptr mem)
   275  (Load <t> ptr mem) && ( is8BitInt(t) &&  t.IsSigned()) => (MOVBload  ptr mem)
   276  (Load <t> ptr mem) && ( is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
   277  (Load <t> ptr mem) && (is16BitInt(t) &&  t.IsSigned()) => (MOVHload  ptr mem)
   278  (Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
   279  (Load <t> ptr mem) && (is32BitInt(t) &&  t.IsSigned()) => (MOVWload  ptr mem)
   280  (Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
   281  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t))      => (MOVDload  ptr mem)
   282  (Load <t> ptr mem) &&  is32BitFloat(t)                 => (FMOVWload ptr mem)
   283  (Load <t> ptr mem) &&  is64BitFloat(t)                 => (FMOVDload ptr mem)
   284  
   285  // Stores
   286  (Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
   287  (Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
   288  (Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
   289  (Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
   290  (Store {t} ptr val mem) && t.Size() == 4 &&  t.IsFloat() => (FMOVWstore ptr val mem)
   291  (Store {t} ptr val mem) && t.Size() == 8 &&  t.IsFloat() => (FMOVDstore ptr val mem)
   292  
   293  // We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis
   294  // knows what variables are being read/written by the ops.
   295  (MOV(B|BU|H|HU|W|WU|D)load [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) &&
   296  	is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
   297  	(base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   298  	(MOV(B|BU|H|HU|W|WU|D)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
   299  
   300  (FMOV(W|D)load [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) &&
   301  	is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
   302  	(base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   303  	(FMOV(W|D)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
   304  
   305  (MOV(B|H|W|D)store [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) &&
   306  	is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
   307  	(base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   308  	(MOV(B|H|W|D)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   309  
   310  (MOV(B|H|W|D)storezero [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) &&
   311  	canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) &&
   312  	(base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   313  	(MOV(B|H|W|D)storezero [off1+off2] {mergeSym(sym1,sym2)} base mem)
   314  
   315  (FMOV(W|D)store [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) &&
   316  	is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
   317  	(base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   318  	(FMOV(W|D)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   319  
   320  (MOV(B|BU|H|HU|W|WU|D)load [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
   321  	(MOV(B|BU|H|HU|W|WU|D)load [off1+int32(off2)] {sym} base mem)
   322  
   323  (FMOV(W|D)load [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
   324  	(FMOV(W|D)load [off1+int32(off2)] {sym} base mem)
   325  
   326  (MOV(B|H|W|D)store [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
   327  	(MOV(B|H|W|D)store [off1+int32(off2)] {sym} base val mem)
   328  
   329  (MOV(B|H|W|D)storezero [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
   330  	(MOV(B|H|W|D)storezero [off1+int32(off2)] {sym} base mem)
   331  
   332  (FMOV(W|D)store [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
   333  	(FMOV(W|D)store [off1+int32(off2)] {sym} base val mem)
   334  
   335  // Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
   336  // with OffPtr -> ADDI.
   337  (ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x)
   338  
   339  // Small zeroing
   340  (Zero [0] _ mem) => mem
   341  (Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
   342  (Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
   343  	(MOVHstore ptr (MOVDconst [0]) mem)
   344  (Zero [2] ptr mem) =>
   345  	(MOVBstore [1] ptr (MOVDconst [0])
   346  		(MOVBstore ptr (MOVDconst [0]) mem))
   347  (Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
   348  	(MOVWstore ptr (MOVDconst [0]) mem)
   349  (Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
   350  	(MOVHstore [2] ptr (MOVDconst [0])
   351  		(MOVHstore ptr (MOVDconst [0]) mem))
   352  (Zero [4] ptr mem) =>
   353  	(MOVBstore [3] ptr (MOVDconst [0])
   354  		(MOVBstore [2] ptr (MOVDconst [0])
   355  			(MOVBstore [1] ptr (MOVDconst [0])
   356  				(MOVBstore ptr (MOVDconst [0]) mem))))
   357  (Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
   358  	(MOVDstore ptr (MOVDconst [0]) mem)
   359  (Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
   360  	(MOVWstore [4] ptr (MOVDconst [0])
   361  		(MOVWstore ptr (MOVDconst [0]) mem))
   362  (Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
   363  	(MOVHstore [6] ptr (MOVDconst [0])
   364  		(MOVHstore [4] ptr (MOVDconst [0])
   365  			(MOVHstore [2] ptr (MOVDconst [0])
   366  				(MOVHstore ptr (MOVDconst [0]) mem))))
   367  
   368  (Zero [3] ptr mem) =>
   369  	(MOVBstore [2] ptr (MOVDconst [0])
   370  		(MOVBstore [1] ptr (MOVDconst [0])
   371  			(MOVBstore ptr (MOVDconst [0]) mem)))
   372  (Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
   373  	(MOVHstore [4] ptr (MOVDconst [0])
   374  		(MOVHstore [2] ptr (MOVDconst [0])
   375  			(MOVHstore ptr (MOVDconst [0]) mem)))
   376  
   377  // Unroll zeroing in medium size (at most 192 bytes i.e. 3 cachelines)
   378  (Zero [s] {t} ptr mem) && s <= 24*moveSize(t.Alignment(), config) =>
   379  	(LoweredZero [makeValAndOff(int32(s),int32(t.Alignment()))] ptr mem)
   380  
   381  // Generic zeroing uses a loop
   382  (Zero [s] {t} ptr mem) && s > 24*moveSize(t.Alignment(), config) =>
   383  	(LoweredZeroLoop [makeValAndOff(int32(s),int32(t.Alignment()))] ptr mem)
   384  
   385  // Checks
   386  (IsNonNil ...) => (SNEZ ...)
   387  (IsInBounds ...) => (Less64U ...)
   388  (IsSliceInBounds ...) => (Leq64U ...)
   389  
   390  // Trivial lowering
   391  (NilCheck ...) => (LoweredNilCheck ...)
   392  (GetClosurePtr ...) => (LoweredGetClosurePtr ...)
   393  (GetCallerSP ...) => (LoweredGetCallerSP ...)
   394  (GetCallerPC ...) => (LoweredGetCallerPC ...)
   395  
   396  // Write barrier.
   397  (WB ...) => (LoweredWB ...)
   398  
   399  // Publication barrier as intrinsic
   400  (PubBarrier ...) => (LoweredPubBarrier ...)
   401  
   402  (PanicBounds ...) => (LoweredPanicBoundsRR ...)
   403  (LoweredPanicBoundsRR [kind] x (MOVDconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
   404  (LoweredPanicBoundsRR [kind] (MOVDconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
   405  (LoweredPanicBoundsRC [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
   406  (LoweredPanicBoundsCR [kind] {p} (MOVDconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
   407  
   408  // Small moves
   409  (Move [0] _ _ mem) => mem
   410  (Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
   411  (Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
   412  	(MOVHstore dst (MOVHload src mem) mem)
   413  (Move [2] dst src mem) =>
   414  	(MOVBstore [1] dst (MOVBload [1] src mem)
   415  		(MOVBstore dst (MOVBload src mem) mem))
   416  (Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
   417  	(MOVWstore dst (MOVWload src mem) mem)
   418  (Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
   419  	(MOVHstore [2] dst (MOVHload [2] src mem)
   420  		(MOVHstore dst (MOVHload src mem) mem))
   421  (Move [4] dst src mem) =>
   422  	(MOVBstore [3] dst (MOVBload [3] src mem)
   423  		(MOVBstore [2] dst (MOVBload [2] src mem)
   424  			(MOVBstore [1] dst (MOVBload [1] src mem)
   425  				(MOVBstore dst (MOVBload src mem) mem))))
   426  (Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
   427  	(MOVDstore dst (MOVDload src mem) mem)
   428  (Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
   429  	(MOVWstore [4] dst (MOVWload [4] src mem)
   430  		(MOVWstore dst (MOVWload src mem) mem))
   431  (Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
   432  	(MOVHstore [6] dst (MOVHload [6] src mem)
   433  		(MOVHstore [4] dst (MOVHload [4] src mem)
   434  			(MOVHstore [2] dst (MOVHload [2] src mem)
   435  				(MOVHstore dst (MOVHload src mem) mem))))
   436  
   437  (Move [3] dst src mem) =>
   438  	(MOVBstore [2] dst (MOVBload [2] src mem)
   439  		(MOVBstore [1] dst (MOVBload [1] src mem)
   440  			(MOVBstore dst (MOVBload src mem) mem)))
   441  (Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
   442  	(MOVHstore [4] dst (MOVHload [4] src mem)
   443  		(MOVHstore [2] dst (MOVHload [2] src mem)
   444  			(MOVHstore dst (MOVHload src mem) mem)))
   445  
   446  // Generic move
   447  (Move [s] {t} dst src mem) && s > 0 && s <= 3*8*moveSize(t.Alignment(), config)
   448  	&& logLargeCopy(v, s) =>
   449  	(LoweredMove [makeValAndOff(int32(s),int32(t.Alignment()))] dst src mem)
   450  
   451  // Generic move uses a loop
   452  (Move [s] {t} dst src mem) && s > 3*8*moveSize(t.Alignment(), config)
   453  	&& logLargeCopy(v, s) =>
   454  	(LoweredMoveLoop [makeValAndOff(int32(s),int32(t.Alignment()))] dst src mem)
   455  
   456  // Boolean ops; 0=false, 1=true
   457  (AndB ...) => (AND ...)
   458  (OrB  ...) => (OR  ...)
   459  (EqB  x y) => (SEQZ (SUB <typ.Bool> x y))
   460  (NeqB x y) => (SNEZ (SUB <typ.Bool> x y))
   461  (Not  ...) => (SEQZ ...)
   462  
   463  // Lowering pointer arithmetic
   464  // TODO: Special handling for SP offsets, like ARM
   465  (OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVaddr [int32(off)] ptr)
   466  (OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr)
   467  (OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
   468  
   469  (Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
   470  (Const32F [val]) => (FMVSX (MOVDconst [int64(math.Float32bits(val))]))
   471  (Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
   472  (ConstNil) => (MOVDconst [0])
   473  (ConstBool [val]) => (MOVDconst [int64(b2i(val))])
   474  
   475  (Addr {sym} base) => (MOVaddr {sym} [0] base)
   476  (LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVaddr {sym} (SPanchored base mem))
   477  (LocalAddr <t> {sym} base _)  && !t.Elem().HasPointers() => (MOVaddr {sym} base)
   478  
   479  // Calls
   480  (StaticCall  ...) => (CALLstatic  ...)
   481  (ClosureCall ...) => (CALLclosure ...)
   482  (InterCall   ...) => (CALLinter   ...)
   483  (TailCall ...) => (CALLtail ...)
   484  
   485  // Atomic Intrinsics
   486  (AtomicLoad(Ptr|64|32|8)  ...) => (LoweredAtomicLoad(64|64|32|8) ...)
   487  (AtomicStore(PtrNoWB|64|32|8) ...) => (LoweredAtomicStore(64|64|32|8) ...)
   488  (AtomicAdd(64|32) ...) => (LoweredAtomicAdd(64|32) ...)
   489  
   490  // AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8)))
   491  (AtomicAnd8 ptr val mem) =>
   492  	(LoweredAtomicAnd32 (ANDI <typ.Uintptr> [^3] ptr)
   493  		(NOT <typ.UInt32> (SLL <typ.UInt32> (XORI <typ.UInt32> [0xff] (ZeroExt8to32 val))
   494  			(SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr)))) mem)
   495  
   496  (AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
   497  
   498  (AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
   499  (AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
   500  
   501  (AtomicExchange(64|32) ...) => (LoweredAtomicExchange(64|32) ...)
   502  
   503  // AtomicOr8(ptr,val)  => LoweredAtomicOr32(ptr&^3, uint32(val)<<((ptr&3)*8))
   504  (AtomicOr8 ptr val mem) =>
   505  	(LoweredAtomicOr32 (ANDI <typ.Uintptr> [^3] ptr)
   506  		(SLL <typ.UInt32> (ZeroExt8to32 val)
   507  			(SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr))) mem)
   508  
   509  (AtomicOr32  ...) => (LoweredAtomicOr32  ...)
   510  
   511  // Conditional branches
   512  (If cond yes no) => (BNEZ (MOVBUreg <typ.UInt64> cond) yes no)
   513  
   514  // Optimizations
   515  
   516  // Absorb SEQZ/SNEZ into branch.
   517  (BEQZ (SEQZ x) yes no) => (BNEZ x yes no)
   518  (BEQZ (SNEZ x) yes no) => (BEQZ x yes no)
   519  (BNEZ (SEQZ x) yes no) => (BEQZ x yes no)
   520  (BNEZ (SNEZ x) yes no) => (BNEZ x yes no)
   521  
   522  // Remove redundant NEG from BEQZ/BNEZ.
   523  (BEQZ (NEG x) yes no) => (BEQZ x yes no)
   524  (BNEZ (NEG x) yes no) => (BNEZ x yes no)
   525  
   526  // Negate comparison with FNES/FNED.
   527  (BEQZ (FNES <t> x y) yes no) => (BNEZ (FEQS <t> x y) yes no)
   528  (BNEZ (FNES <t> x y) yes no) => (BEQZ (FEQS <t> x y) yes no)
   529  (BEQZ (FNED <t> x y) yes no) => (BNEZ (FEQD <t> x y) yes no)
   530  (BNEZ (FNED <t> x y) yes no) => (BEQZ (FEQD <t> x y) yes no)
   531  
   532  // Convert BEQZ/BNEZ into more optimal branch conditions.
   533  (BEQZ (SUB x y) yes no) => (BEQ x y yes no)
   534  (BNEZ (SUB x y) yes no) => (BNE x y yes no)
   535  (BEQZ (SLT x y) yes no) => (BGE x y yes no)
   536  (BNEZ (SLT x y) yes no) => (BLT x y yes no)
   537  (BEQZ (SLTU x y) yes no) => (BGEU x y yes no)
   538  (BNEZ (SLTU x y) yes no) => (BLTU x y yes no)
   539  (BEQZ (SLTI [x] y) yes no) => (BGE y (MOVDconst [x]) yes no)
   540  (BNEZ (SLTI [x] y) yes no) => (BLT y (MOVDconst [x]) yes no)
   541  (BEQZ (SLTIU [x] y) yes no) => (BGEU y (MOVDconst [x]) yes no)
   542  (BNEZ (SLTIU [x] y) yes no) => (BLTU y (MOVDconst [x]) yes no)
   543  
   544  // Convert branch with zero to more optimal branch zero.
   545  (BEQ  (MOVDconst [0]) cond yes no) => (BEQZ cond yes no)
   546  (BEQ  cond (MOVDconst [0]) yes no) => (BEQZ cond yes no)
   547  (BNE  (MOVDconst [0]) cond yes no) => (BNEZ cond yes no)
   548  (BNE  cond (MOVDconst [0]) yes no) => (BNEZ cond yes no)
   549  (BLT  (MOVDconst [0]) cond yes no) => (BGTZ cond yes no)
   550  (BLT  cond (MOVDconst [0]) yes no) => (BLTZ cond yes no)
   551  (BLTU (MOVDconst [0]) cond yes no) => (BNEZ cond yes no)
   552  (BGE  (MOVDconst [0]) cond yes no) => (BLEZ cond yes no)
   553  (BGE  cond (MOVDconst [0]) yes no) => (BGEZ cond yes no)
   554  (BGEU (MOVDconst [0]) cond yes no) => (BEQZ cond yes no)
   555  
   556  // Remove redundant NEG from SEQZ/SNEZ.
   557  (SEQZ (NEG x)) => (SEQZ x)
   558  (SNEZ (NEG x)) => (SNEZ x)
   559  
   560  // Remove redundant SEQZ/SNEZ.
   561  (SEQZ (SEQZ x)) => (SNEZ x)
   562  (SEQZ (SNEZ x)) => (SEQZ x)
   563  (SNEZ (SEQZ x)) => (SEQZ x)
   564  (SNEZ (SNEZ x)) => (SNEZ x)
   565  
   566  // Store zero.
   567  (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
   568  (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
   569  (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
   570  (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
   571  
   572  // Boolean ops are already extended.
   573  (MOVBUreg x:((FLES|FLTS|FEQS|FNES) _ _)) => x
   574  (MOVBUreg x:((FLED|FLTD|FEQD|FNED) _ _)) => x
   575  (MOVBUreg x:((SEQZ|SNEZ) _)) => x
   576  (MOVBUreg x:((SLT|SLTU) _ _)) => x
   577  
   578  // Avoid extending when already sufficiently masked.
   579  (MOVBreg  x:(ANDI [c] y)) && c >= 0 && int64(int8(c)) == c => x
   580  (MOVHreg  x:(ANDI [c] y)) && c >= 0 && int64(int16(c)) == c => x
   581  (MOVWreg  x:(ANDI [c] y)) && c >= 0 && int64(int32(c)) == c => x
   582  (MOVBUreg x:(ANDI [c] y)) && c >= 0 && int64(uint8(c)) == c => x
   583  (MOVHUreg x:(ANDI [c] y)) && c >= 0 && int64(uint16(c)) == c => x
   584  (MOVWUreg x:(ANDI [c] y)) && c >= 0 && int64(uint32(c)) == c => x
   585  
   586  // Combine masking and zero extension.
   587  (MOVBUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint8(c))] x)
   588  (MOVHUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint16(c))] x)
   589  (MOVWUreg (ANDI [c] x)) && c < 0 => (AND (MOVDconst [int64(uint32(c))]) x)
   590  
   591  // Combine negation and sign extension.
   592  (MOVWreg (NEG x)) => (NEGW x)
   593  
   594  // Avoid sign/zero extension for consts.
   595  (MOVBreg  (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
   596  (MOVHreg  (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
   597  (MOVWreg  (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
   598  (MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
   599  (MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
   600  (MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
   601  
   602  // Avoid sign/zero extension after properly typed load.
   603  (MOVBreg  x:(MOVBload  _ _)) => (MOVDreg x)
   604  (MOVHreg  x:(MOVBload  _ _)) => (MOVDreg x)
   605  (MOVHreg  x:(MOVBUload _ _)) => (MOVDreg x)
   606  (MOVHreg  x:(MOVHload  _ _)) => (MOVDreg x)
   607  (MOVWreg  x:(MOVBload  _ _)) => (MOVDreg x)
   608  (MOVWreg  x:(MOVBUload _ _)) => (MOVDreg x)
   609  (MOVWreg  x:(MOVHload  _ _)) => (MOVDreg x)
   610  (MOVWreg  x:(MOVHUload _ _)) => (MOVDreg x)
   611  (MOVWreg  x:(MOVWload  _ _)) => (MOVDreg x)
   612  (MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x)
   613  (MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x)
   614  (MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x)
   615  (MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x)
   616  (MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x)
   617  (MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x)
   618  
   619  // Avoid zero extension after properly typed atomic operation.
   620  (MOVBUreg x:(Select0 (LoweredAtomicLoad8 _ _))) => (MOVDreg x)
   621  (MOVBUreg x:(Select0 (LoweredAtomicCas32 _ _ _ _))) => (MOVDreg x)
   622  (MOVBUreg x:(Select0 (LoweredAtomicCas64 _ _ _ _))) => (MOVDreg x)
   623  
   624  // Avoid sign extension after word arithmetic.
   625  (MOVWreg x:(ADDIW   _)) => (MOVDreg x)
   626  (MOVWreg x:(SUBW  _ _)) => (MOVDreg x)
   627  (MOVWreg x:(NEGW    _)) => (MOVDreg x)
   628  (MOVWreg x:(MULW  _ _)) => (MOVDreg x)
   629  (MOVWreg x:(DIVW  _ _)) => (MOVDreg x)
   630  (MOVWreg x:(DIVUW _ _)) => (MOVDreg x)
   631  (MOVWreg x:(REMW  _ _)) => (MOVDreg x)
   632  (MOVWreg x:(REMUW _ _)) => (MOVDreg x)
   633  (MOVWreg x:(ROLW  _ _)) => (MOVDreg x)
   634  (MOVWreg x:(RORW  _ _)) => (MOVDreg x)
   635  (MOVWreg x:(RORIW   _)) => (MOVDreg x)
   636  
   637  // Fold double extensions.
   638  (MOVBreg  x:(MOVBreg  _)) => (MOVDreg x)
   639  (MOVHreg  x:(MOVBreg  _)) => (MOVDreg x)
   640  (MOVHreg  x:(MOVBUreg _)) => (MOVDreg x)
   641  (MOVHreg  x:(MOVHreg  _)) => (MOVDreg x)
   642  (MOVWreg  x:(MOVBreg  _)) => (MOVDreg x)
   643  (MOVWreg  x:(MOVBUreg _)) => (MOVDreg x)
   644  (MOVWreg  x:(MOVHreg  _)) => (MOVDreg x)
   645  (MOVWreg  x:(MOVWreg  _)) => (MOVDreg x)
   646  (MOVBUreg x:(MOVBUreg _)) => (MOVDreg x)
   647  (MOVHUreg x:(MOVBUreg _)) => (MOVDreg x)
   648  (MOVHUreg x:(MOVHUreg _)) => (MOVDreg x)
   649  (MOVWUreg x:(MOVBUreg _)) => (MOVDreg x)
   650  (MOVWUreg x:(MOVHUreg _)) => (MOVDreg x)
   651  (MOVWUreg x:(MOVWUreg _)) => (MOVDreg x)
   652  
   653  // Do not extend before store.
   654  (MOVBstore [off] {sym} ptr (MOVBreg  x) mem) => (MOVBstore [off] {sym} ptr x mem)
   655  (MOVBstore [off] {sym} ptr (MOVHreg  x) mem) => (MOVBstore [off] {sym} ptr x mem)
   656  (MOVBstore [off] {sym} ptr (MOVWreg  x) mem) => (MOVBstore [off] {sym} ptr x mem)
   657  (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   658  (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   659  (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   660  (MOVHstore [off] {sym} ptr (MOVHreg  x) mem) => (MOVHstore [off] {sym} ptr x mem)
   661  (MOVHstore [off] {sym} ptr (MOVWreg  x) mem) => (MOVHstore [off] {sym} ptr x mem)
   662  (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
   663  (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
   664  (MOVWstore [off] {sym} ptr (MOVWreg  x) mem) => (MOVWstore [off] {sym} ptr x mem)
   665  (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
   666  
   667  // Replace extend after load with alternate load where possible.
   668  (MOVBreg  <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload  <t> [off] {sym} ptr mem)
   669  (MOVHreg  <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload  <t> [off] {sym} ptr mem)
   670  (MOVWreg  <t> x:(MOVWUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload  <t> [off] {sym} ptr mem)
   671  (MOVBUreg <t> x:(MOVBload  [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem)
   672  (MOVHUreg <t> x:(MOVHload  [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
   673  (MOVWUreg <t> x:(MOVWload  [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload <t> [off] {sym} ptr mem)
   674  
   675  // Replace load from same location as preceding store with copy.
   676  (MOV(D|W|H|B)load [off] {sym} ptr1 (MOV(D|W|H|B)store [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOV(D|W|H|B)reg x)
   677  (MOV(W|H|B)Uload  [off] {sym} ptr1 (MOV(W|H|B)store   [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOV(W|H|B)Ureg  x)
   678  (MOVDload  [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVXD x)
   679  (FMOVDload [off] {sym} ptr1 (MOVDstore  [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVDX x)
   680  (MOVWload  [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVXS x)
   681  (MOVWUload [off] {sym} ptr1 (FMOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWUreg (FMVXS x))
   682  (FMOVWload [off] {sym} ptr1 (MOVWstore  [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (FMVSX x)
   683  
   684  // If a register move has only 1 use, just use the same register without emitting instruction
   685  // MOVnop does not emit an instruction, only for ensuring the type.
   686  (MOVDreg x) && x.Uses == 1 => (MOVDnop x)
   687  
   688  // TODO: we should be able to get rid of MOVDnop all together.
   689  // But for now, this is enough to get rid of lots of them.
   690  (MOVDnop (MOVDconst [c])) => (MOVDconst [c])
   691  
   692  // Avoid unnecessary zero and sign extension when right shifting.
   693  (SRAI <t> [x] (MOVWreg  y)) && x >= 0 && x <= 31 => (SRAIW <t> [int64(x)] y)
   694  (SRLI <t> [x] (MOVWUreg y)) && x >= 0 && x <= 31 => (SRLIW <t> [int64(x)] y)
   695  
   696  // Replace right shifts that exceed size of signed type.
   697  (SRAI <t> [x] (MOVBreg y)) && x >=  8 => (SRAI  [63] (SLLI <t> [56] y))
   698  (SRAI <t> [x] (MOVHreg y)) && x >= 16 => (SRAI  [63] (SLLI <t> [48] y))
   699  (SRAI <t> [x] (MOVWreg y)) && x >= 32 => (SRAIW [31] y)
   700  
   701  // Eliminate right shifts that exceed size of unsigned type.
   702  (SRLI <t> [x] (MOVBUreg y)) && x >=  8 => (MOVDconst <t> [0])
   703  (SRLI <t> [x] (MOVHUreg y)) && x >= 16 => (MOVDconst <t> [0])
   704  (SRLI <t> [x] (MOVWUreg y)) && x >= 32 => (MOVDconst <t> [0])
   705  
   706  // Fold constant into immediate instructions where possible.
   707  (ADD (MOVDconst <t> [val]) x) && is32Bit(val) && !t.IsPtr() => (ADDI [val] x)
   708  (AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x)
   709  (OR  (MOVDconst [val]) x) && is32Bit(val) => (ORI  [val] x)
   710  (XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x)
   711  (ROL  x (MOVDconst [val])) => (RORI  [int64(int8(-val)&63)] x)
   712  (ROLW x (MOVDconst [val])) => (RORIW [int64(int8(-val)&31)] x)
   713  (ROR  x (MOVDconst [val])) => (RORI  [int64(val&63)] x)
   714  (RORW x (MOVDconst [val])) => (RORIW [int64(val&31)] x)
   715  (SLL  x (MOVDconst [val])) => (SLLI [int64(val&63)] x)
   716  (SRL  x (MOVDconst [val])) => (SRLI [int64(val&63)] x)
   717  (SLLW x (MOVDconst [val])) => (SLLIW [int64(val&31)] x)
   718  (SRLW x (MOVDconst [val])) => (SRLIW [int64(val&31)] x)
   719  (SRA  x (MOVDconst [val])) => (SRAI [int64(val&63)] x)
   720  (SRAW x (MOVDconst [val])) => (SRAIW [int64(val&31)] x)
   721  (SLT  x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTI  [val] x)
   722  (SLTU x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTIU [val] x)
   723  
   724  // Replace negated left rotation with right rotation.
   725  (ROL  x (NEG y)) => (ROR  x y)
   726  (ROLW x (NEG y)) => (RORW x y)
   727  
   728  // generic simplifications
   729  (ADD x (NEG y)) => (SUB x y)
   730  (SUB x (NEG y)) => (ADD x y)
   731  (SUB x x) => (MOVDconst [0])
   732  (AND x x) => x
   733  (OR  x x) => x
   734  (ORN x x) => (MOVDconst [-1])
   735  (XOR x x) => (MOVDconst [0])
   736  
   737  // Convert const subtraction into ADDI with negative immediate, where possible.
   738  (SUB x (MOVDconst [val])) && is32Bit(-val) => (ADDI [-val] x)
   739  (SUB <t> (MOVDconst [val]) y) && is32Bit(-val) => (NEG (ADDI <t> [-val] y))
   740  
   741  // Subtraction of zero.
   742  (SUB  x (MOVDconst [0])) => x
   743  (SUBW x (MOVDconst [0])) => (ADDIW [0] x)
   744  
   745  // Subtraction from zero.
   746  (SUB  (MOVDconst [0]) x) => (NEG x)
   747  (SUBW (MOVDconst [0]) x) => (NEGW x)
   748  
   749  // Fold negation into subtraction.
   750  (NEG (SUB x y)) => (SUB y x)
   751  (NEG <t> s:(ADDI [val] (SUB x y))) && s.Uses == 1 && is32Bit(-val) => (ADDI [-val] (SUB <t> y x))
   752  
   753  // Double negation.
   754  (NEG (NEG x)) => x
   755  (NEG <t> s:(ADDI [val] (NEG x))) && s.Uses == 1 && is32Bit(-val) => (ADDI [-val] x)
   756  
   757  // Addition of zero or two constants.
   758  (ADDI [0] x) => x
   759  (ADDI [x] (MOVDconst [y])) && is32Bit(x + y) => (MOVDconst [x + y])
   760  
   761  // ANDI with all zeros, all ones or two constants.
   762  (ANDI [0]  x) => (MOVDconst [0])
   763  (ANDI [-1] x) => x
   764  (ANDI [x] (MOVDconst [y])) => (MOVDconst [x & y])
   765  
   766  // ORI with all zeroes, all ones or two constants.
   767  (ORI [0]  x) => x
   768  (ORI [-1] x) => (MOVDconst [-1])
   769  (ORI [x] (MOVDconst [y])) => (MOVDconst [x | y])
   770  
   771  // Combine operations with immediate.
   772  (ADDI [x] (ADDI [y] z)) && is32Bit(x + y) => (ADDI [x + y] z)
   773  (ANDI [x] (ANDI [y] z)) => (ANDI [x & y] z)
   774  (ORI  [x] (ORI  [y] z)) => (ORI  [x | y] z)
   775  
   776  // Negation of a constant.
   777  (NEG  (MOVDconst [x])) => (MOVDconst [-x])
   778  (NEGW (MOVDconst [x])) => (MOVDconst [int64(int32(-x))])
   779  
   780  // Shift of a constant.
   781  (SLLI [x] (MOVDconst [y])) && is32Bit(y << uint32(x)) => (MOVDconst [y << uint32(x)])
   782  (SRLI [x] (MOVDconst [y])) => (MOVDconst [int64(uint64(y) >> uint32(x))])
   783  (SRAI [x] (MOVDconst [y])) => (MOVDconst [int64(y) >> uint32(x)])
   784  
   785  // SLTI/SLTIU with constants.
   786  (SLTI  [x] (MOVDconst [y])) => (MOVDconst [b2i(int64(y) < int64(x))])
   787  (SLTIU [x] (MOVDconst [y])) => (MOVDconst [b2i(uint64(y) < uint64(x))])
   788  
   789  // SLTI/SLTIU with known outcomes.
   790  (SLTI  [x] (ANDI [y] _)) && y >= 0 && int64(y) < int64(x) => (MOVDconst [1])
   791  (SLTIU [x] (ANDI [y] _)) && y >= 0 && uint64(y) < uint64(x) => (MOVDconst [1])
   792  (SLTI  [x] (ORI  [y] _)) && y >= 0 && int64(y) >= int64(x) => (MOVDconst [0])
   793  (SLTIU [x] (ORI  [y] _)) && y >= 0 && uint64(y) >= uint64(x) => (MOVDconst [0])
   794  
   795  // SLT/SLTU with known outcomes.
   796  (SLT  x x) => (MOVDconst [0])
   797  (SLTU x x) => (MOVDconst [0])
   798  
   799  // Deadcode for LoweredMuluhilo
   800  (Select0 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MULHU x y)
   801  (Select1 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MUL x y)
   802  
   803  (FADD(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FMADD(S|D) x y a)
   804  (FSUB(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FNMSUB(S|D) x y a)
   805  (FSUB(S|D) (FMUL(S|D) x y) a) && a.Block.Func.useFMA(v) => (FMSUB(S|D) x y a)
   806  
   807  // Merge negation into fused multiply-add and multiply-subtract.
   808  //
   809  // Key:
   810  //
   811  //   [+ -](x * y [+ -] z).
   812  //    _ N         A S
   813  //                D U
   814  //                D B
   815  //
   816  // Note: multiplication commutativity handled by rule generator.
   817  (F(MADD|NMADD|MSUB|NMSUB)S neg:(FNEGS x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)S x y z)
   818  (F(MADD|NMADD|MSUB|NMSUB)S x y neg:(FNEGS z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)S x y z)
   819  (F(MADD|NMADD|MSUB|NMSUB)D neg:(FNEGD x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)D x y z)
   820  (F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z)
   821  
   822  // Test for -∞ (bit 0) using 64 bit classify instruction.
   823  (FLTD x (FMVDX (MOVDconst [int64(math.Float64bits(-math.MaxFloat64))]))) => (ANDI [1] (FCLASSD x))
   824  (FLED (FMVDX (MOVDconst [int64(math.Float64bits(-math.MaxFloat64))])) x) => (SNEZ (ANDI <typ.Int64> [0xff &^ 1] (FCLASSD x)))
   825  (FEQD x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(-1)))])))  => (ANDI [1] (FCLASSD x))
   826  (FNED x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(-1)))])))  => (SEQZ (ANDI <typ.Int64> [1] (FCLASSD x)))
   827  
   828  // Test for +∞ (bit 7) using 64 bit classify instruction.
   829  (FLTD (FMVDX (MOVDconst [int64(math.Float64bits(math.MaxFloat64))])) x) => (SNEZ (ANDI <typ.Int64> [1<<7] (FCLASSD x)))
   830  (FLED x (FMVDX (MOVDconst [int64(math.Float64bits(math.MaxFloat64))]))) => (SNEZ (ANDI <typ.Int64> [0xff &^ (1<<7)] (FCLASSD x)))
   831  (FEQD x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(1)))]))) => (SNEZ (ANDI <typ.Int64> [1<<7] (FCLASSD x)))
   832  (FNED x (FMVDX (MOVDconst [int64(math.Float64bits(math.Inf(1)))]))) => (SEQZ (ANDI <typ.Int64> [1<<7] (FCLASSD x)))
   833  
   834  //
   835  // Optimisations for rva22u64 and above.
   836  //
   837  
   838  // Combine left shift and addition.
   839  (ADD (SLLI [1] x) y) && buildcfg.GORISCV64 >= 22 => (SH1ADD x y)
   840  (ADD (SLLI [2] x) y) && buildcfg.GORISCV64 >= 22 => (SH2ADD x y)
   841  (ADD (SLLI [3] x) y) && buildcfg.GORISCV64 >= 22 => (SH3ADD x y)
   842  
   843  // Integer minimum and maximum.
   844  (Min64  x y) && buildcfg.GORISCV64 >= 22 => (MIN  x y)
   845  (Max64  x y) && buildcfg.GORISCV64 >= 22 => (MAX  x y)
   846  (Min64u x y) && buildcfg.GORISCV64 >= 22 => (MINU x y)
   847  (Max64u x y) && buildcfg.GORISCV64 >= 22 => (MAXU x y)
   848  

View as plain text