1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 // Lowering arithmetic
6 (Add(Ptr|64|32|16|8) ...) => (ADD ...)
7 (Add(64|32)F ...) => (FADD(D|S) ...)
8
9 (Sub(Ptr|64|32|16|8) ...) => (SUB ...)
10 (Sub(64|32)F ...) => (FSUB(D|S) ...)
11
12 (Mul64 ...) => (MUL ...)
13 (Mul64uhilo ...) => (LoweredMuluhilo ...)
14 (Mul64uover ...) => (LoweredMuluover ...)
15 (Mul32 ...) => (MULW ...)
16 (Mul16 x y) => (MULW (SignExt16to32 x) (SignExt16to32 y))
17 (Mul8 x y) => (MULW (SignExt8to32 x) (SignExt8to32 y))
18 (Mul(64|32)F ...) => (FMUL(D|S) ...)
19
20 (Div(64|32)F ...) => (FDIV(D|S) ...)
21
22 (Div64 x y [false]) => (DIV x y)
23 (Div64u ...) => (DIVU ...)
24 (Div32 x y [false]) => (DIVW x y)
25 (Div32u ...) => (DIVUW ...)
26 (Div16 x y [false]) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
27 (Div16u x y) => (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
28 (Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
29 (Div8u x y) => (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y))
30
31 (Hmul64 ...) => (MULH ...)
32 (Hmul64u ...) => (MULHU ...)
33 (Hmul32 x y) => (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y)))
34 (Hmul32u x y) => (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y)))
35
36 (Select0 (Add64carry x y c)) => (ADD (ADD <typ.UInt64> x y) c)
37 (Select1 (Add64carry x y c)) =>
38 (OR (SLTU <typ.UInt64> s:(ADD <typ.UInt64> x y) x) (SLTU <typ.UInt64> (ADD <typ.UInt64> s c) s))
39
40 (Select0 (Sub64borrow x y c)) => (SUB (SUB <typ.UInt64> x y) c)
41 (Select1 (Sub64borrow x y c)) =>
42 (OR (SLTU <typ.UInt64> x s:(SUB <typ.UInt64> x y)) (SLTU <typ.UInt64> s (SUB <typ.UInt64> s c)))
43
44 // (x + y) / 2 => (x / 2) + (y / 2) + (x & y & 1)
45 (Avg64u <t> x y) => (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
46
47 (Mod64 x y [false]) => (REM x y)
48 (Mod64u ...) => (REMU ...)
49 (Mod32 x y [false]) => (REMW x y)
50 (Mod32u ...) => (REMUW ...)
51 (Mod16 x y [false]) => (REMW (SignExt16to32 x) (SignExt16to32 y))
52 (Mod16u x y) => (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
53 (Mod8 x y) => (REMW (SignExt8to32 x) (SignExt8to32 y))
54 (Mod8u x y) => (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
55
56 (And(64|32|16|8) ...) => (AND ...)
57 (Or(64|32|16|8) ...) => (OR ...)
58 (Xor(64|32|16|8) ...) => (XOR ...)
59
60 (Neg(64|32|16|8) ...) => (NEG ...)
61 (Neg(64|32)F ...) => (FNEG(D|S) ...)
62
63 (Com(64|32|16|8) ...) => (NOT ...)
64
65 (Sqrt ...) => (FSQRTD ...)
66 (Sqrt32 ...) => (FSQRTS ...)
67
68 (Copysign ...) => (FSGNJD ...)
69
70 (Abs ...) => (FABSD ...)
71
72 (FMA ...) => (FMADDD ...)
73
74 (Min(64|32)F ...) => (LoweredFMIN(D|S) ...)
75 (Max(64|32)F ...) => (LoweredFMAX(D|S) ...)
76
77 // Sign and zero extension.
78
79 (SignExt8to16 ...) => (MOVBreg ...)
80 (SignExt8to32 ...) => (MOVBreg ...)
81 (SignExt8to64 ...) => (MOVBreg ...)
82 (SignExt16to32 ...) => (MOVHreg ...)
83 (SignExt16to64 ...) => (MOVHreg ...)
84 (SignExt32to64 ...) => (MOVWreg ...)
85
86 (ZeroExt8to16 ...) => (MOVBUreg ...)
87 (ZeroExt8to32 ...) => (MOVBUreg ...)
88 (ZeroExt8to64 ...) => (MOVBUreg ...)
89 (ZeroExt16to32 ...) => (MOVHUreg ...)
90 (ZeroExt16to64 ...) => (MOVHUreg ...)
91 (ZeroExt32to64 ...) => (MOVWUreg ...)
92
93 (Cvt32to32F ...) => (FCVTSW ...)
94 (Cvt32to64F ...) => (FCVTDW ...)
95 (Cvt64to32F ...) => (FCVTSL ...)
96 (Cvt64to64F ...) => (FCVTDL ...)
97
98 (Cvt32Fto32 ...) => (FCVTWS ...)
99 (Cvt32Fto64 ...) => (FCVTLS ...)
100 (Cvt64Fto32 ...) => (FCVTWD ...)
101 (Cvt64Fto64 ...) => (FCVTLD ...)
102
103 (Cvt32Fto64F ...) => (FCVTDS ...)
104 (Cvt64Fto32F ...) => (FCVTSD ...)
105
106 (CvtBoolToUint8 ...) => (Copy ...)
107
108 (Round(32|64)F ...) => (LoweredRound(32|64)F ...)
109
110 (Slicemask <t> x) => (SRAI [63] (NEG <t> x))
111
112 // Truncations
113 // We ignore the unused high parts of registers, so truncates are just copies.
114 (Trunc16to8 ...) => (Copy ...)
115 (Trunc32to8 ...) => (Copy ...)
116 (Trunc32to16 ...) => (Copy ...)
117 (Trunc64to8 ...) => (Copy ...)
118 (Trunc64to16 ...) => (Copy ...)
119 (Trunc64to32 ...) => (Copy ...)
120
121 // Shifts
122
123 // SLL only considers the bottom 6 bits of y. If y > 64, the result should
124 // always be 0.
125 //
126 // Breaking down the operation:
127 //
128 // (SLL x y) generates x << (y & 63).
129 //
130 // If y < 64, this is the value we want. Otherwise, we want zero.
131 //
132 // So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise.
133 (Lsh8x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
134 (Lsh8x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
135 (Lsh8x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
136 (Lsh8x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
137 (Lsh16x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
138 (Lsh16x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
139 (Lsh16x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
140 (Lsh16x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
141 (Lsh32x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
142 (Lsh32x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
143 (Lsh32x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
144 (Lsh32x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
145 (Lsh64x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
146 (Lsh64x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
147 (Lsh64x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
148 (Lsh64x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
149
150 (Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
151 (Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
152 (Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
153 (Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
154
155 // SRL only considers the bottom 6 bits of y, similarly SRLW only considers the
156 // bottom 5 bits of y. Ensure that the result is always zero if the shift exceeds
157 // the maximum value. See Lsh above for a detailed description.
158 (Rsh8Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
159 (Rsh8Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
160 (Rsh8Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
161 (Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
162 (Rsh16Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
163 (Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
164 (Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
165 (Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
166 (Rsh32Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt8to64 y))))
167 (Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt16to64 y))))
168 (Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt32to64 y))))
169 (Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] y)))
170 (Rsh64Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
171 (Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
172 (Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
173 (Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
174
175 (Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt8to64 x) y)
176 (Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt16to64 x) y)
177 (Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLW x y)
178 (Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y)
179
180 // SRA only considers the bottom 6 bits of y, similarly SRAW only considers the
181 // bottom 5 bits. If y is greater than the maximum value (either 63 or 31
182 // depending on the instruction), the result of the shift should be either 0
183 // or -1 based on the sign bit of x.
184 //
185 // We implement this by performing the max shift (-1) if y > the maximum value.
186 //
187 // We OR (uint64(y < 64) - 1) into y before passing it to SRA. This leaves
188 // us with -1 (0xffff...) if y >= 64. Similarly, we OR (uint64(y < 32) - 1) into y
189 // before passing it to SRAW.
190 //
191 // We don't need to sign-extend the OR result, as it will be at minimum 8 bits,
192 // more than the 5 or 6 bits SRAW and SRA care about.
193 (Rsh8x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
194 (Rsh8x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
195 (Rsh8x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
196 (Rsh8x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
197 (Rsh16x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
198 (Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
199 (Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
200 (Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
201 (Rsh32x8 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt8to64 y)))))
202 (Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt16to64 y)))))
203 (Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt32to64 y)))))
204 (Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] y))))
205 (Rsh64x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
206 (Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
207 (Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
208 (Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
209
210 (Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt8to64 x) y)
211 (Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt16to64 x) y)
212 (Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y)
213 (Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y)
214
215 // Rotates.
216 (RotateLeft8 <t> x y) => (OR (SLL <t> x (ANDI [7] <y.Type> y)) (SRL <t> (ZeroExt8to64 x) (ANDI [7] <y.Type> (NEG <y.Type> y))))
217 (RotateLeft16 <t> x y) => (OR (SLL <t> x (ANDI [15] <y.Type> y)) (SRL <t> (ZeroExt16to64 x) (ANDI [15] <y.Type> (NEG <y.Type> y))))
218 (RotateLeft32 ...) => (ROLW ...)
219 (RotateLeft64 ...) => (ROL ...)
220
221 (Less64 ...) => (SLT ...)
222 (Less32 x y) => (SLT (SignExt32to64 x) (SignExt32to64 y))
223 (Less16 x y) => (SLT (SignExt16to64 x) (SignExt16to64 y))
224 (Less8 x y) => (SLT (SignExt8to64 x) (SignExt8to64 y))
225 (Less64U ...) => (SLTU ...)
226 (Less32U x y) => (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
227 (Less16U x y) => (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
228 (Less8U x y) => (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y))
229 (Less(64|32)F ...) => (FLT(D|S) ...)
230
231 // Convert x <= y to !(y > x).
232 (Leq(64|32|16|8) x y) => (Not (Less(64|32|16|8) y x))
233 (Leq(64|32|16|8)U x y) => (Not (Less(64|32|16|8)U y x))
234 (Leq(64|32)F ...) => (FLE(D|S) ...)
235
236 (EqPtr x y) => (SEQZ (SUB <typ.Uintptr> x y))
237 (Eq64 x y) => (SEQZ (SUB <x.Type> x y))
238 (Eq32 x y) && x.Type.IsSigned() => (SEQZ (SUB <x.Type> (SignExt32to64 x) (SignExt32to64 y)))
239 (Eq32 x y) && !x.Type.IsSigned() => (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
240 (Eq16 x y) => (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
241 (Eq8 x y) => (SEQZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
242 (Eq(64|32)F ...) => (FEQ(D|S) ...)
243
244 (NeqPtr x y) => (Not (EqPtr x y))
245 (Neq64 x y) => (Not (Eq64 x y))
246 (Neq32 x y) => (Not (Eq32 x y))
247 (Neq16 x y) => (Not (Eq16 x y))
248 (Neq8 x y) => (Not (Eq8 x y))
249 (Neq(64|32)F ...) => (FNE(D|S) ...)
250
251 // Loads
252 (Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
253 (Load <t> ptr mem) && ( is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
254 (Load <t> ptr mem) && ( is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
255 (Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
256 (Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
257 (Load <t> ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem)
258 (Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
259 (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
260 (Load <t> ptr mem) && is32BitFloat(t) => (FMOVWload ptr mem)
261 (Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
262
263 // Stores
264 (Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
265 (Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
266 (Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
267 (Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
268 (Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (FMOVWstore ptr val mem)
269 (Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (FMOVDstore ptr val mem)
270
271 // We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis
272 // knows what variables are being read/written by the ops.
273 (MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
274 (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
275 (MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
276 (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
277 (MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
278 (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
279 (MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
280 (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
281 (MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
282 (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
283 (MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
284 (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
285 (MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
286 (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
287
288 (MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
289 (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
290 (MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
291 (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
292 (MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
293 (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
294 (MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
295 (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
296 (MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
297 (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
298 (MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
299 (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
300 (MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
301 (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
302 (MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
303 (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
304
305 (MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
306 (MOVBUload [off1+int32(off2)] {sym} base mem)
307 (MOVBload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
308 (MOVBload [off1+int32(off2)] {sym} base mem)
309 (MOVHUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
310 (MOVHUload [off1+int32(off2)] {sym} base mem)
311 (MOVHload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
312 (MOVHload [off1+int32(off2)] {sym} base mem)
313 (MOVWUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
314 (MOVWUload [off1+int32(off2)] {sym} base mem)
315 (MOVWload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
316 (MOVWload [off1+int32(off2)] {sym} base mem)
317 (MOVDload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
318 (MOVDload [off1+int32(off2)] {sym} base mem)
319
320 (MOVBstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
321 (MOVBstore [off1+int32(off2)] {sym} base val mem)
322 (MOVHstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
323 (MOVHstore [off1+int32(off2)] {sym} base val mem)
324 (MOVWstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
325 (MOVWstore [off1+int32(off2)] {sym} base val mem)
326 (MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
327 (MOVDstore [off1+int32(off2)] {sym} base val mem)
328 (MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
329 (MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
330 (MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
331 (MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
332
333 // Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
334 // with OffPtr -> ADDI.
335 (ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x)
336
337 // Small zeroing
338 (Zero [0] _ mem) => mem
339 (Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
340 (Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
341 (MOVHstore ptr (MOVDconst [0]) mem)
342 (Zero [2] ptr mem) =>
343 (MOVBstore [1] ptr (MOVDconst [0])
344 (MOVBstore ptr (MOVDconst [0]) mem))
345 (Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
346 (MOVWstore ptr (MOVDconst [0]) mem)
347 (Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
348 (MOVHstore [2] ptr (MOVDconst [0])
349 (MOVHstore ptr (MOVDconst [0]) mem))
350 (Zero [4] ptr mem) =>
351 (MOVBstore [3] ptr (MOVDconst [0])
352 (MOVBstore [2] ptr (MOVDconst [0])
353 (MOVBstore [1] ptr (MOVDconst [0])
354 (MOVBstore ptr (MOVDconst [0]) mem))))
355 (Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
356 (MOVDstore ptr (MOVDconst [0]) mem)
357 (Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
358 (MOVWstore [4] ptr (MOVDconst [0])
359 (MOVWstore ptr (MOVDconst [0]) mem))
360 (Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
361 (MOVHstore [6] ptr (MOVDconst [0])
362 (MOVHstore [4] ptr (MOVDconst [0])
363 (MOVHstore [2] ptr (MOVDconst [0])
364 (MOVHstore ptr (MOVDconst [0]) mem))))
365
366 (Zero [3] ptr mem) =>
367 (MOVBstore [2] ptr (MOVDconst [0])
368 (MOVBstore [1] ptr (MOVDconst [0])
369 (MOVBstore ptr (MOVDconst [0]) mem)))
370 (Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
371 (MOVHstore [4] ptr (MOVDconst [0])
372 (MOVHstore [2] ptr (MOVDconst [0])
373 (MOVHstore ptr (MOVDconst [0]) mem)))
374 (Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
375 (MOVWstore [8] ptr (MOVDconst [0])
376 (MOVWstore [4] ptr (MOVDconst [0])
377 (MOVWstore ptr (MOVDconst [0]) mem)))
378 (Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
379 (MOVDstore [8] ptr (MOVDconst [0])
380 (MOVDstore ptr (MOVDconst [0]) mem))
381 (Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
382 (MOVDstore [16] ptr (MOVDconst [0])
383 (MOVDstore [8] ptr (MOVDconst [0])
384 (MOVDstore ptr (MOVDconst [0]) mem)))
385 (Zero [32] {t} ptr mem) && t.Alignment()%8 == 0 =>
386 (MOVDstore [24] ptr (MOVDconst [0])
387 (MOVDstore [16] ptr (MOVDconst [0])
388 (MOVDstore [8] ptr (MOVDconst [0])
389 (MOVDstore ptr (MOVDconst [0]) mem))))
390
391 // Medium 8-aligned zeroing uses a Duff's device
392 // 8 and 128 are magic constants, see runtime/mkduff.go
393 (Zero [s] {t} ptr mem)
394 && s%8 == 0 && s <= 8*128
395 && t.Alignment()%8 == 0 && !config.noDuffDevice =>
396 (DUFFZERO [8 * (128 - s/8)] ptr mem)
397
398 // Generic zeroing uses a loop
399 (Zero [s] {t} ptr mem) =>
400 (LoweredZero [t.Alignment()]
401 ptr
402 (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)]))
403 mem)
404
405 // Checks
406 (IsNonNil ...) => (SNEZ ...)
407 (IsInBounds ...) => (Less64U ...)
408 (IsSliceInBounds ...) => (Leq64U ...)
409
410 // Trivial lowering
411 (NilCheck ...) => (LoweredNilCheck ...)
412 (GetClosurePtr ...) => (LoweredGetClosurePtr ...)
413 (GetCallerSP ...) => (LoweredGetCallerSP ...)
414 (GetCallerPC ...) => (LoweredGetCallerPC ...)
415
416 // Write barrier.
417 (WB ...) => (LoweredWB ...)
418
419 // Publication barrier as intrinsic
420 (PubBarrier ...) => (LoweredPubBarrier ...)
421
422 (PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
423 (PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
424 (PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
425
426 // Small moves
427 (Move [0] _ _ mem) => mem
428 (Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
429 (Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
430 (MOVHstore dst (MOVHload src mem) mem)
431 (Move [2] dst src mem) =>
432 (MOVBstore [1] dst (MOVBload [1] src mem)
433 (MOVBstore dst (MOVBload src mem) mem))
434 (Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
435 (MOVWstore dst (MOVWload src mem) mem)
436 (Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
437 (MOVHstore [2] dst (MOVHload [2] src mem)
438 (MOVHstore dst (MOVHload src mem) mem))
439 (Move [4] dst src mem) =>
440 (MOVBstore [3] dst (MOVBload [3] src mem)
441 (MOVBstore [2] dst (MOVBload [2] src mem)
442 (MOVBstore [1] dst (MOVBload [1] src mem)
443 (MOVBstore dst (MOVBload src mem) mem))))
444 (Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
445 (MOVDstore dst (MOVDload src mem) mem)
446 (Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
447 (MOVWstore [4] dst (MOVWload [4] src mem)
448 (MOVWstore dst (MOVWload src mem) mem))
449 (Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
450 (MOVHstore [6] dst (MOVHload [6] src mem)
451 (MOVHstore [4] dst (MOVHload [4] src mem)
452 (MOVHstore [2] dst (MOVHload [2] src mem)
453 (MOVHstore dst (MOVHload src mem) mem))))
454
455 (Move [3] dst src mem) =>
456 (MOVBstore [2] dst (MOVBload [2] src mem)
457 (MOVBstore [1] dst (MOVBload [1] src mem)
458 (MOVBstore dst (MOVBload src mem) mem)))
459 (Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
460 (MOVHstore [4] dst (MOVHload [4] src mem)
461 (MOVHstore [2] dst (MOVHload [2] src mem)
462 (MOVHstore dst (MOVHload src mem) mem)))
463 (Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
464 (MOVWstore [8] dst (MOVWload [8] src mem)
465 (MOVWstore [4] dst (MOVWload [4] src mem)
466 (MOVWstore dst (MOVWload src mem) mem)))
467 (Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
468 (MOVDstore [8] dst (MOVDload [8] src mem)
469 (MOVDstore dst (MOVDload src mem) mem))
470 (Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
471 (MOVDstore [16] dst (MOVDload [16] src mem)
472 (MOVDstore [8] dst (MOVDload [8] src mem)
473 (MOVDstore dst (MOVDload src mem) mem)))
474 (Move [32] {t} dst src mem) && t.Alignment()%8 == 0 =>
475 (MOVDstore [24] dst (MOVDload [24] src mem)
476 (MOVDstore [16] dst (MOVDload [16] src mem)
477 (MOVDstore [8] dst (MOVDload [8] src mem)
478 (MOVDstore dst (MOVDload src mem) mem))))
479
480 // Medium 8-aligned move uses a Duff's device
481 // 16 and 128 are magic constants, see runtime/mkduff.go
482 (Move [s] {t} dst src mem)
483 && s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0
484 && !config.noDuffDevice && logLargeCopy(v, s) =>
485 (DUFFCOPY [16 * (128 - s/8)] dst src mem)
486
487 // Generic move uses a loop
488 (Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) =>
489 (LoweredMove [t.Alignment()]
490 dst
491 src
492 (ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src)
493 mem)
494
495 // Boolean ops; 0=false, 1=true
496 (AndB ...) => (AND ...)
497 (OrB ...) => (OR ...)
498 (EqB x y) => (SEQZ (SUB <typ.Bool> x y))
499 (NeqB x y) => (SNEZ (SUB <typ.Bool> x y))
500 (Not ...) => (SEQZ ...)
501
502 // Lowering pointer arithmetic
503 // TODO: Special handling for SP offsets, like ARM
504 (OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVaddr [int32(off)] ptr)
505 (OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr)
506 (OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
507
508 (Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
509 (Const32F [val]) => (FMVSX (MOVDconst [int64(math.Float32bits(val))]))
510 (Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
511 (ConstNil) => (MOVDconst [0])
512 (ConstBool [val]) => (MOVDconst [int64(b2i(val))])
513
514 (Addr {sym} base) => (MOVaddr {sym} [0] base)
515 (LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVaddr {sym} (SPanchored base mem))
516 (LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVaddr {sym} base)
517
518 // Calls
519 (StaticCall ...) => (CALLstatic ...)
520 (ClosureCall ...) => (CALLclosure ...)
521 (InterCall ...) => (CALLinter ...)
522 (TailCall ...) => (CALLtail ...)
523
524 // Atomic Intrinsics
525 (AtomicLoad(Ptr|64|32|8) ...) => (LoweredAtomicLoad(64|64|32|8) ...)
526 (AtomicStore(PtrNoWB|64|32|8) ...) => (LoweredAtomicStore(64|64|32|8) ...)
527 (AtomicAdd(64|32) ...) => (LoweredAtomicAdd(64|32) ...)
528
529 // AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8)))
530 (AtomicAnd8 ptr val mem) =>
531 (LoweredAtomicAnd32 (ANDI <typ.Uintptr> [^3] ptr)
532 (NOT <typ.UInt32> (SLL <typ.UInt32> (XORI <typ.UInt32> [0xff] (ZeroExt8to32 val))
533 (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr)))) mem)
534
535 (AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
536
537 (AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
538 (AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
539
540 (AtomicExchange(64|32) ...) => (LoweredAtomicExchange(64|32) ...)
541
542 // AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3, uint32(val)<<((ptr&3)*8))
543 (AtomicOr8 ptr val mem) =>
544 (LoweredAtomicOr32 (ANDI <typ.Uintptr> [^3] ptr)
545 (SLL <typ.UInt32> (ZeroExt8to32 val)
546 (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr))) mem)
547
548 (AtomicOr32 ...) => (LoweredAtomicOr32 ...)
549
550 // Conditional branches
551 (If cond yes no) => (BNEZ (MOVBUreg <typ.UInt64> cond) yes no)
552
553 // Optimizations
554
555 // Absorb SEQZ/SNEZ into branch.
556 (BEQZ (SEQZ x) yes no) => (BNEZ x yes no)
557 (BEQZ (SNEZ x) yes no) => (BEQZ x yes no)
558 (BNEZ (SEQZ x) yes no) => (BEQZ x yes no)
559 (BNEZ (SNEZ x) yes no) => (BNEZ x yes no)
560
561 // Remove redundant NEG from BEQZ/BNEZ.
562 (BEQZ (NEG x) yes no) => (BEQZ x yes no)
563 (BNEZ (NEG x) yes no) => (BNEZ x yes no)
564
565 // Negate comparison with FNES/FNED.
566 (BEQZ (FNES <t> x y) yes no) => (BNEZ (FEQS <t> x y) yes no)
567 (BNEZ (FNES <t> x y) yes no) => (BEQZ (FEQS <t> x y) yes no)
568 (BEQZ (FNED <t> x y) yes no) => (BNEZ (FEQD <t> x y) yes no)
569 (BNEZ (FNED <t> x y) yes no) => (BEQZ (FEQD <t> x y) yes no)
570
571 // Convert BEQZ/BNEZ into more optimal branch conditions.
572 (BEQZ (SUB x y) yes no) => (BEQ x y yes no)
573 (BNEZ (SUB x y) yes no) => (BNE x y yes no)
574 (BEQZ (SLT x y) yes no) => (BGE x y yes no)
575 (BNEZ (SLT x y) yes no) => (BLT x y yes no)
576 (BEQZ (SLTU x y) yes no) => (BGEU x y yes no)
577 (BNEZ (SLTU x y) yes no) => (BLTU x y yes no)
578 (BEQZ (SLTI [x] y) yes no) => (BGE y (MOVDconst [x]) yes no)
579 (BNEZ (SLTI [x] y) yes no) => (BLT y (MOVDconst [x]) yes no)
580 (BEQZ (SLTIU [x] y) yes no) => (BGEU y (MOVDconst [x]) yes no)
581 (BNEZ (SLTIU [x] y) yes no) => (BLTU y (MOVDconst [x]) yes no)
582
583 // Convert branch with zero to more optimal branch zero.
584 (BEQ (MOVDconst [0]) cond yes no) => (BEQZ cond yes no)
585 (BEQ cond (MOVDconst [0]) yes no) => (BEQZ cond yes no)
586 (BNE (MOVDconst [0]) cond yes no) => (BNEZ cond yes no)
587 (BNE cond (MOVDconst [0]) yes no) => (BNEZ cond yes no)
588 (BLT (MOVDconst [0]) cond yes no) => (BGTZ cond yes no)
589 (BLT cond (MOVDconst [0]) yes no) => (BLTZ cond yes no)
590 (BGE (MOVDconst [0]) cond yes no) => (BLEZ cond yes no)
591 (BGE cond (MOVDconst [0]) yes no) => (BGEZ cond yes no)
592
593 // Remove redundant NEG from SEQZ/SNEZ.
594 (SEQZ (NEG x)) => (SEQZ x)
595 (SNEZ (NEG x)) => (SNEZ x)
596
597 // Remove redundant SEQZ/SNEZ.
598 (SEQZ (SEQZ x)) => (SNEZ x)
599 (SEQZ (SNEZ x)) => (SEQZ x)
600 (SNEZ (SEQZ x)) => (SEQZ x)
601 (SNEZ (SNEZ x)) => (SNEZ x)
602
603 // Store zero.
604 (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
605 (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
606 (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
607 (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
608
609 // Boolean ops are already extended.
610 (MOVBUreg x:((FLES|FLTS|FEQS|FNES) _ _)) => x
611 (MOVBUreg x:((FLED|FLTD|FEQD|FNED) _ _)) => x
612 (MOVBUreg x:((SEQZ|SNEZ) _)) => x
613 (MOVBUreg x:((SLT|SLTU) _ _)) => x
614
615 // Avoid extending when already sufficiently masked.
616 (MOVBreg x:(ANDI [c] y)) && c >= 0 && int64(int8(c)) == c => x
617 (MOVHreg x:(ANDI [c] y)) && c >= 0 && int64(int16(c)) == c => x
618 (MOVWreg x:(ANDI [c] y)) && c >= 0 && int64(int32(c)) == c => x
619 (MOVBUreg x:(ANDI [c] y)) && c >= 0 && int64(uint8(c)) == c => x
620 (MOVHUreg x:(ANDI [c] y)) && c >= 0 && int64(uint16(c)) == c => x
621 (MOVWUreg x:(ANDI [c] y)) && c >= 0 && int64(uint32(c)) == c => x
622
623 // Combine masking and zero extension.
624 (MOVBUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint8(c))] x)
625 (MOVHUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint16(c))] x)
626 (MOVWUreg (ANDI [c] x)) && c < 0 => (AND (MOVDconst [int64(uint32(c))]) x)
627
628 // Avoid sign/zero extension for consts.
629 (MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
630 (MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
631 (MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
632 (MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
633 (MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
634 (MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
635
636 // Avoid sign/zero extension after properly typed load.
637 (MOVBreg x:(MOVBload _ _)) => (MOVDreg x)
638 (MOVHreg x:(MOVBload _ _)) => (MOVDreg x)
639 (MOVHreg x:(MOVBUload _ _)) => (MOVDreg x)
640 (MOVHreg x:(MOVHload _ _)) => (MOVDreg x)
641 (MOVWreg x:(MOVBload _ _)) => (MOVDreg x)
642 (MOVWreg x:(MOVBUload _ _)) => (MOVDreg x)
643 (MOVWreg x:(MOVHload _ _)) => (MOVDreg x)
644 (MOVWreg x:(MOVHUload _ _)) => (MOVDreg x)
645 (MOVWreg x:(MOVWload _ _)) => (MOVDreg x)
646 (MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x)
647 (MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x)
648 (MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x)
649 (MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x)
650 (MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x)
651 (MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x)
652
653 // Avoid zero extension after properly typed atomic operation.
654 (MOVBUreg x:(Select0 (LoweredAtomicLoad8 _ _))) => (MOVDreg x)
655 (MOVBUreg x:(Select0 (LoweredAtomicCas32 _ _ _ _))) => (MOVDreg x)
656 (MOVBUreg x:(Select0 (LoweredAtomicCas64 _ _ _ _))) => (MOVDreg x)
657
658 // Avoid sign extension after word arithmetic.
659 (MOVWreg x:(ADDIW _)) => (MOVDreg x)
660 (MOVWreg x:(SUBW _ _)) => (MOVDreg x)
661 (MOVWreg x:(NEGW _)) => (MOVDreg x)
662 (MOVWreg x:(MULW _ _)) => (MOVDreg x)
663 (MOVWreg x:(DIVW _ _)) => (MOVDreg x)
664 (MOVWreg x:(DIVUW _ _)) => (MOVDreg x)
665 (MOVWreg x:(REMW _ _)) => (MOVDreg x)
666 (MOVWreg x:(REMUW _ _)) => (MOVDreg x)
667 (MOVWreg x:(ROLW _ _)) => (MOVDreg x)
668 (MOVWreg x:(RORW _ _)) => (MOVDreg x)
669 (MOVWreg x:(RORIW _)) => (MOVDreg x)
670
671 // Fold double extensions.
672 (MOVBreg x:(MOVBreg _)) => (MOVDreg x)
673 (MOVHreg x:(MOVBreg _)) => (MOVDreg x)
674 (MOVHreg x:(MOVBUreg _)) => (MOVDreg x)
675 (MOVHreg x:(MOVHreg _)) => (MOVDreg x)
676 (MOVWreg x:(MOVBreg _)) => (MOVDreg x)
677 (MOVWreg x:(MOVBUreg _)) => (MOVDreg x)
678 (MOVWreg x:(MOVHreg _)) => (MOVDreg x)
679 (MOVWreg x:(MOVWreg _)) => (MOVDreg x)
680 (MOVBUreg x:(MOVBUreg _)) => (MOVDreg x)
681 (MOVHUreg x:(MOVBUreg _)) => (MOVDreg x)
682 (MOVHUreg x:(MOVHUreg _)) => (MOVDreg x)
683 (MOVWUreg x:(MOVBUreg _)) => (MOVDreg x)
684 (MOVWUreg x:(MOVHUreg _)) => (MOVDreg x)
685 (MOVWUreg x:(MOVWUreg _)) => (MOVDreg x)
686
687 // Do not extend before store.
688 (MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
689 (MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
690 (MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
691 (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
692 (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
693 (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
694 (MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
695 (MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
696 (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
697 (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
698 (MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
699 (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
700
701 // Replace extend after load with alternate load where possible.
702 (MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <t> [off] {sym} ptr mem)
703 (MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload <t> [off] {sym} ptr mem)
704 (MOVWreg <t> x:(MOVWUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <t> [off] {sym} ptr mem)
705 (MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem)
706 (MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
707 (MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload <t> [off] {sym} ptr mem)
708
709 // If a register move has only 1 use, just use the same register without emitting instruction
710 // MOVnop does not emit an instruction, only for ensuring the type.
711 (MOVDreg x) && x.Uses == 1 => (MOVDnop x)
712
713 // TODO: we should be able to get rid of MOVDnop all together.
714 // But for now, this is enough to get rid of lots of them.
715 (MOVDnop (MOVDconst [c])) => (MOVDconst [c])
716
717 // Avoid unnecessary zero and sign extension when right shifting.
718 (SRAI <t> [x] (MOVWreg y)) && x >= 0 && x <= 31 => (SRAIW <t> [int64(x)] y)
719 (SRLI <t> [x] (MOVWUreg y)) && x >= 0 && x <= 31 => (SRLIW <t> [int64(x)] y)
720
721 // Replace right shifts that exceed size of signed type.
722 (SRAI <t> [x] (MOVBreg y)) && x >= 8 => (SRAI [63] (SLLI <t> [56] y))
723 (SRAI <t> [x] (MOVHreg y)) && x >= 16 => (SRAI [63] (SLLI <t> [48] y))
724 (SRAI <t> [x] (MOVWreg y)) && x >= 32 => (SRAIW [31] y)
725
726 // Eliminate right shifts that exceed size of unsigned type.
727 (SRLI <t> [x] (MOVBUreg y)) && x >= 8 => (MOVDconst <t> [0])
728 (SRLI <t> [x] (MOVHUreg y)) && x >= 16 => (MOVDconst <t> [0])
729 (SRLI <t> [x] (MOVWUreg y)) && x >= 32 => (MOVDconst <t> [0])
730
731 // Fold constant into immediate instructions where possible.
732 (ADD (MOVDconst <t> [val]) x) && is32Bit(val) && !t.IsPtr() => (ADDI [val] x)
733 (AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x)
734 (OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x)
735 (XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x)
736 (ROL x (MOVDconst [val])) => (RORI [int64(int8(-val)&63)] x)
737 (ROLW x (MOVDconst [val])) => (RORIW [int64(int8(-val)&31)] x)
738 (ROR x (MOVDconst [val])) => (RORI [int64(val&63)] x)
739 (RORW x (MOVDconst [val])) => (RORIW [int64(val&31)] x)
740 (SLL x (MOVDconst [val])) => (SLLI [int64(val&63)] x)
741 (SRL x (MOVDconst [val])) => (SRLI [int64(val&63)] x)
742 (SLLW x (MOVDconst [val])) => (SLLIW [int64(val&31)] x)
743 (SRLW x (MOVDconst [val])) => (SRLIW [int64(val&31)] x)
744 (SRA x (MOVDconst [val])) => (SRAI [int64(val&63)] x)
745 (SRAW x (MOVDconst [val])) => (SRAIW [int64(val&31)] x)
746 (SLT x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTI [val] x)
747 (SLTU x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTIU [val] x)
748
749 // Replace negated left rotation with right rotation.
750 (ROL x (NEG y)) => (ROR x y)
751 (ROLW x (NEG y)) => (RORW x y)
752
753 // Convert const subtraction into ADDI with negative immediate, where possible.
754 (SUB x (MOVDconst [val])) && is32Bit(-val) => (ADDI [-val] x)
755 (SUB <t> (MOVDconst [val]) y) && is32Bit(-val) => (NEG (ADDI <t> [-val] y))
756
757 // Subtraction of zero.
758 (SUB x (MOVDconst [0])) => x
759 (SUBW x (MOVDconst [0])) => (ADDIW [0] x)
760
761 // Subtraction from zero.
762 (SUB (MOVDconst [0]) x) => (NEG x)
763 (SUBW (MOVDconst [0]) x) => (NEGW x)
764
765 // Fold negation into subtraction.
766 (NEG (SUB x y)) => (SUB y x)
767 (NEG <t> s:(ADDI [val] (SUB x y))) && s.Uses == 1 && is32Bit(-val) => (ADDI [-val] (SUB <t> y x))
768
769 // Double negation.
770 (NEG (NEG x)) => x
771
772 // Addition of zero or two constants.
773 (ADDI [0] x) => x
774 (ADDI [x] (MOVDconst [y])) && is32Bit(x + y) => (MOVDconst [x + y])
775
776 // ANDI with all zeros, all ones or two constants.
777 (ANDI [0] x) => (MOVDconst [0])
778 (ANDI [-1] x) => x
779 (ANDI [x] (MOVDconst [y])) => (MOVDconst [x & y])
780
781 // ORI with all zeroes, all ones or two constants.
782 (ORI [0] x) => x
783 (ORI [-1] x) => (MOVDconst [-1])
784 (ORI [x] (MOVDconst [y])) => (MOVDconst [x | y])
785
786 // Combine operations with immediate.
787 (ADDI [x] (ADDI [y] z)) && is32Bit(x + y) => (ADDI [x + y] z)
788 (ANDI [x] (ANDI [y] z)) => (ANDI [x & y] z)
789 (ORI [x] (ORI [y] z)) => (ORI [x | y] z)
790
791 // Negation of a constant.
792 (NEG (MOVDconst [x])) => (MOVDconst [-x])
793 (NEGW (MOVDconst [x])) => (MOVDconst [int64(int32(-x))])
794
795 // Shift of a constant.
796 (SLLI [x] (MOVDconst [y])) && is32Bit(y << uint32(x)) => (MOVDconst [y << uint32(x)])
797 (SRLI [x] (MOVDconst [y])) => (MOVDconst [int64(uint64(y) >> uint32(x))])
798 (SRAI [x] (MOVDconst [y])) => (MOVDconst [int64(y) >> uint32(x)])
799
800 // SLTI/SLTIU with constants.
801 (SLTI [x] (MOVDconst [y])) => (MOVDconst [b2i(int64(y) < int64(x))])
802 (SLTIU [x] (MOVDconst [y])) => (MOVDconst [b2i(uint64(y) < uint64(x))])
803
804 // SLTI/SLTIU with known outcomes.
805 (SLTI [x] (ANDI [y] _)) && y >= 0 && int64(y) < int64(x) => (MOVDconst [1])
806 (SLTIU [x] (ANDI [y] _)) && y >= 0 && uint64(y) < uint64(x) => (MOVDconst [1])
807 (SLTI [x] (ORI [y] _)) && y >= 0 && int64(y) >= int64(x) => (MOVDconst [0])
808 (SLTIU [x] (ORI [y] _)) && y >= 0 && uint64(y) >= uint64(x) => (MOVDconst [0])
809
810 // SLT/SLTU with known outcomes.
811 (SLT x x) => (MOVDconst [0])
812 (SLTU x x) => (MOVDconst [0])
813
814 // Deadcode for LoweredMuluhilo
815 (Select0 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MULHU x y)
816 (Select1 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MUL x y)
817
818 (FADD(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FMADD(S|D) x y a)
819 (FSUB(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FNMSUB(S|D) x y a)
820 (FSUB(S|D) (FMUL(S|D) x y) a) && a.Block.Func.useFMA(v) => (FMSUB(S|D) x y a)
821
822 // Merge negation into fused multiply-add and multiply-subtract.
823 //
824 // Key:
825 //
826 // [+ -](x * y [+ -] z).
827 // _ N A S
828 // D U
829 // D B
830 //
831 // Note: multiplication commutativity handled by rule generator.
832 (F(MADD|NMADD|MSUB|NMSUB)S neg:(FNEGS x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)S x y z)
833 (F(MADD|NMADD|MSUB|NMSUB)S x y neg:(FNEGS z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)S x y z)
834 (F(MADD|NMADD|MSUB|NMSUB)D neg:(FNEGD x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)D x y z)
835 (F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z)
836
837 //
838 // Optimisations for rva22u64 and above.
839 //
840
841 // Combine left shift and addition.
842 (ADD (SLLI [1] x) y) && buildcfg.GORISCV64 >= 22 => (SH1ADD x y)
843 (ADD (SLLI [2] x) y) && buildcfg.GORISCV64 >= 22 => (SH2ADD x y)
844 (ADD (SLLI [3] x) y) && buildcfg.GORISCV64 >= 22 => (SH3ADD x y)
845
846 // Integer minimum and maximum.
847 (Min64 x y) && buildcfg.GORISCV64 >= 22 => (MIN x y)
848 (Max64 x y) && buildcfg.GORISCV64 >= 22 => (MAX x y)
849 (Min64u x y) && buildcfg.GORISCV64 >= 22 => (MINU x y)
850 (Max64u x y) && buildcfg.GORISCV64 >= 22 => (MAXU x y)
851
View as plain text