1 // Copyright 2022 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 (Add(Ptr|64|32|16|8) ...) => (ADDV ...)
6 (Add(32|64)F ...) => (ADD(F|D) ...)
7
8 (Sub(Ptr|64|32|16|8) ...) => (SUBV ...)
9 (Sub(32|64)F ...) => (SUB(F|D) ...)
10
11 (Mul(64|32|16|8) ...) => (MULV ...)
12 (Mul(32|64)F ...) => (MUL(F|D) ...)
13 (Select0 (Mul64uhilo x y)) => (MULHVU x y)
14 (Select1 (Mul64uhilo x y)) => (MULV x y)
15 (Select0 (Mul64uover x y)) => (MULV x y)
16 (Select1 (Mul64uover x y)) => (SGTU <typ.Bool> (MULHVU x y) (MOVVconst <typ.UInt64> [0]))
17
18 (Hmul64 ...) => (MULHV ...)
19 (Hmul64u ...) => (MULHVU ...)
20 (Hmul32 x y) => (SRAVconst (MULV (SignExt32to64 x) (SignExt32to64 y)) [32])
21 (Hmul32u x y) => (SRLVconst (MULV (ZeroExt32to64 x) (ZeroExt32to64 y)) [32])
22
23 (Div64 x y) => (DIVV x y)
24 (Div64u ...) => (DIVVU ...)
25 (Div32 x y) => (DIVV (SignExt32to64 x) (SignExt32to64 y))
26 (Div32u x y) => (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))
27 (Div16 x y) => (DIVV (SignExt16to64 x) (SignExt16to64 y))
28 (Div16u x y) => (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))
29 (Div8 x y) => (DIVV (SignExt8to64 x) (SignExt8to64 y))
30 (Div8u x y) => (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))
31 (Div(32|64)F ...) => (DIV(F|D) ...)
32
33 (Mod64 x y) => (REMV x y)
34 (Mod64u ...) => (REMVU ...)
35 (Mod32 x y) => (REMV (SignExt32to64 x) (SignExt32to64 y))
36 (Mod32u x y) => (REMVU (ZeroExt32to64 x) (ZeroExt32to64 y))
37 (Mod16 x y) => (REMV (SignExt16to64 x) (SignExt16to64 y))
38 (Mod16u x y) => (REMVU (ZeroExt16to64 x) (ZeroExt16to64 y))
39 (Mod8 x y) => (REMV (SignExt8to64 x) (SignExt8to64 y))
40 (Mod8u x y) => (REMVU (ZeroExt8to64 x) (ZeroExt8to64 y))
41
42 (Select0 <t> (Add64carry x y c)) => (ADDV (ADDV <t> x y) c)
43 (Select1 <t> (Add64carry x y c)) =>
44 (OR (SGTU <t> x s:(ADDV <t> x y)) (SGTU <t> s (ADDV <t> s c)))
45
46 (Select0 <t> (Sub64borrow x y c)) => (SUBV (SUBV <t> x y) c)
47 (Select1 <t> (Sub64borrow x y c)) =>
48 (OR (SGTU <t> s:(SUBV <t> x y) x) (SGTU <t> (SUBV <t> s c) s))
49
50 // (x + y) / 2 with x>=y => (x - y) / 2 + y
51 (Avg64u <t> x y) => (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
52
53 (And(64|32|16|8) ...) => (AND ...)
54 (Or(64|32|16|8) ...) => (OR ...)
55 (Xor(64|32|16|8) ...) => (XOR ...)
56
57 // shifts
58 // hardware instruction uses only the low 6 bits of the shift
59 // we compare to 64 to ensure Go semantics for large shifts
60 (Lsh64x64 <t> x y) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
61 (Lsh64x32 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
62 (Lsh64x16 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
63 (Lsh64x8 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
64
65 (Lsh32x64 <t> x y) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
66 (Lsh32x32 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
67 (Lsh32x16 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
68 (Lsh32x8 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
69
70 (Lsh16x64 <t> x y) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
71 (Lsh16x32 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
72 (Lsh16x16 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
73 (Lsh16x8 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
74
75 (Lsh8x64 <t> x y) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
76 (Lsh8x32 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
77 (Lsh8x16 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
78 (Lsh8x8 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
79
80 (Rsh64Ux64 <t> x y) => (MASKEQZ (SRLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
81 (Rsh64Ux32 <t> x y) => (MASKEQZ (SRLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
82 (Rsh64Ux16 <t> x y) => (MASKEQZ (SRLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
83 (Rsh64Ux8 <t> x y) => (MASKEQZ (SRLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
84
85 (Rsh32Ux64 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt32to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
86 (Rsh32Ux32 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
87 (Rsh32Ux16 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
88 (Rsh32Ux8 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
89
90 (Rsh16Ux64 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
91 (Rsh16Ux32 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
92 (Rsh16Ux16 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
93 (Rsh16Ux8 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
94
95 (Rsh8Ux64 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
96 (Rsh8Ux32 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
97 (Rsh8Ux16 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
98 (Rsh8Ux8 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
99
100 (Rsh64x64 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
101 (Rsh64x32 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
102 (Rsh64x16 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
103 (Rsh64x8 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
104
105 (Rsh32x64 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
106 (Rsh32x32 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
107 (Rsh32x16 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
108 (Rsh32x8 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
109
110 (Rsh16x64 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
111 (Rsh16x32 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
112 (Rsh16x16 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
113 (Rsh16x8 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
114
115 (Rsh8x64 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
116 (Rsh8x32 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
117 (Rsh8x16 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
118 (Rsh8x8 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
119
120 // rotates
121 (RotateLeft8 <t> x (MOVVconst [c])) => (Or8 (Lsh8x64 <t> x (MOVVconst [c&7])) (Rsh8Ux64 <t> x (MOVVconst [-c&7])))
122 (RotateLeft16 <t> x (MOVVconst [c])) => (Or16 (Lsh16x64 <t> x (MOVVconst [c&15])) (Rsh16Ux64 <t> x (MOVVconst [-c&15])))
123 (RotateLeft32 x y) => (ROTR x (NEGV <y.Type> y))
124 (RotateLeft64 x y) => (ROTRV x (NEGV <y.Type> y))
125
126 // unary ops
127 (Neg(64|32|16|8) ...) => (NEGV ...)
128 (Neg(32|64)F ...) => (NEG(F|D) ...)
129
130 (Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x)
131
132 (Sqrt ...) => (SQRTD ...)
133 (Sqrt32 ...) => (SQRTF ...)
134
135 // boolean ops -- booleans are represented with 0=false, 1=true
136 (AndB ...) => (AND ...)
137 (OrB ...) => (OR ...)
138 (EqB x y) => (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
139 (NeqB ...) => (XOR ...)
140 (Not x) => (XORconst [1] x)
141
142 // constants
143 (Const(64|32|16|8) [val]) => (MOVVconst [int64(val)])
144 (Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
145 (ConstNil) => (MOVVconst [0])
146 (ConstBool [t]) => (MOVVconst [int64(b2i(t))])
147
148 (Slicemask <t> x) => (SRAVconst (NEGV <t> x) [63])
149
150 // truncations
151 // Because we ignore high parts of registers, truncates are just copies.
152 (Trunc16to8 ...) => (Copy ...)
153 (Trunc32to8 ...) => (Copy ...)
154 (Trunc32to16 ...) => (Copy ...)
155 (Trunc64to8 ...) => (Copy ...)
156 (Trunc64to16 ...) => (Copy ...)
157 (Trunc64to32 ...) => (Copy ...)
158
159 // Zero-/Sign-extensions
160 (ZeroExt8to16 ...) => (MOVBUreg ...)
161 (ZeroExt8to32 ...) => (MOVBUreg ...)
162 (ZeroExt16to32 ...) => (MOVHUreg ...)
163 (ZeroExt8to64 ...) => (MOVBUreg ...)
164 (ZeroExt16to64 ...) => (MOVHUreg ...)
165 (ZeroExt32to64 ...) => (MOVWUreg ...)
166
167 (SignExt8to16 ...) => (MOVBreg ...)
168 (SignExt8to32 ...) => (MOVBreg ...)
169 (SignExt16to32 ...) => (MOVHreg ...)
170 (SignExt8to64 ...) => (MOVBreg ...)
171 (SignExt16to64 ...) => (MOVHreg ...)
172 (SignExt32to64 ...) => (MOVWreg ...)
173
174 // float <=> int conversion
175 (Cvt32to32F ...) => (MOVWF ...)
176 (Cvt32to64F ...) => (MOVWD ...)
177 (Cvt64to32F ...) => (MOVVF ...)
178 (Cvt64to64F ...) => (MOVVD ...)
179 (Cvt32Fto32 ...) => (TRUNCFW ...)
180 (Cvt64Fto32 ...) => (TRUNCDW ...)
181 (Cvt32Fto64 ...) => (TRUNCFV ...)
182 (Cvt64Fto64 ...) => (TRUNCDV ...)
183 (Cvt32Fto64F ...) => (MOVFD ...)
184 (Cvt64Fto32F ...) => (MOVDF ...)
185
186 (CvtBoolToUint8 ...) => (Copy ...)
187
188 (Round(32|64)F ...) => (Copy ...)
189
190 // comparisons
191 (Eq8 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
192 (Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
193 (Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
194 (Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y))
195 (EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y))
196 (Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y))
197
198 (Neq8 x y) => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
199 (Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
200 (Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
201 (Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0]))
202 (NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0]))
203 (Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y))
204
205 (Less8 x y) => (SGT (SignExt8to64 y) (SignExt8to64 x))
206 (Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x))
207 (Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x))
208 (Less64 x y) => (SGT y x)
209 (Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
210
211 (Less8U x y) => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
212 (Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
213 (Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
214 (Less64U x y) => (SGTU y x)
215
216 (Leq8 x y) => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
217 (Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
218 (Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
219 (Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y))
220 (Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
221
222 (Leq8U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
223 (Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
224 (Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
225 (Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y))
226
227 (OffPtr [off] ptr:(SP)) => (MOVVaddr [int32(off)] ptr)
228 (OffPtr [off] ptr) => (ADDVconst [off] ptr)
229
230 (Addr {sym} base) => (MOVVaddr {sym} base)
231 (LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVVaddr {sym} (SPanchored base mem))
232 (LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVVaddr {sym} base)
233
234 // loads
235 (Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
236 (Load <t> ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
237 (Load <t> ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
238 (Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
239 (Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
240 (Load <t> ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem)
241 (Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
242 (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem)
243 (Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
244 (Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
245
246 // stores
247 (Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
248 (Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
249 (Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
250 (Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVVstore ptr val mem)
251 (Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVFstore ptr val mem)
252 (Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVDstore ptr val mem)
253
254 // zeroing
255 (Zero [0] _ mem) => mem
256 (Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem)
257 (Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
258 (MOVHstore ptr (MOVVconst [0]) mem)
259 (Zero [2] ptr mem) =>
260 (MOVBstore [1] ptr (MOVVconst [0])
261 (MOVBstore [0] ptr (MOVVconst [0]) mem))
262 (Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
263 (MOVWstore ptr (MOVVconst [0]) mem)
264 (Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
265 (MOVHstore [2] ptr (MOVVconst [0])
266 (MOVHstore [0] ptr (MOVVconst [0]) mem))
267 (Zero [4] ptr mem) =>
268 (MOVBstore [3] ptr (MOVVconst [0])
269 (MOVBstore [2] ptr (MOVVconst [0])
270 (MOVBstore [1] ptr (MOVVconst [0])
271 (MOVBstore [0] ptr (MOVVconst [0]) mem))))
272 (Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
273 (MOVVstore ptr (MOVVconst [0]) mem)
274 (Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
275 (MOVWstore [4] ptr (MOVVconst [0])
276 (MOVWstore [0] ptr (MOVVconst [0]) mem))
277 (Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
278 (MOVHstore [6] ptr (MOVVconst [0])
279 (MOVHstore [4] ptr (MOVVconst [0])
280 (MOVHstore [2] ptr (MOVVconst [0])
281 (MOVHstore [0] ptr (MOVVconst [0]) mem))))
282
283 (Zero [3] ptr mem) =>
284 (MOVBstore [2] ptr (MOVVconst [0])
285 (MOVBstore [1] ptr (MOVVconst [0])
286 (MOVBstore [0] ptr (MOVVconst [0]) mem)))
287 (Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
288 (MOVHstore [4] ptr (MOVVconst [0])
289 (MOVHstore [2] ptr (MOVVconst [0])
290 (MOVHstore [0] ptr (MOVVconst [0]) mem)))
291 (Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
292 (MOVWstore [8] ptr (MOVVconst [0])
293 (MOVWstore [4] ptr (MOVVconst [0])
294 (MOVWstore [0] ptr (MOVVconst [0]) mem)))
295 (Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
296 (MOVVstore [8] ptr (MOVVconst [0])
297 (MOVVstore [0] ptr (MOVVconst [0]) mem))
298 (Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
299 (MOVVstore [16] ptr (MOVVconst [0])
300 (MOVVstore [8] ptr (MOVVconst [0])
301 (MOVVstore [0] ptr (MOVVconst [0]) mem)))
302
303 // medium zeroing uses a duff device
304 // 8, and 128 are magic constants, see runtime/mkduff.go
305 (Zero [s] {t} ptr mem)
306 && s%8 == 0 && s > 24 && s <= 8*128
307 && t.Alignment()%8 == 0 && !config.noDuffDevice =>
308 (DUFFZERO [8 * (128 - s/8)] ptr mem)
309
310 // large or unaligned zeroing uses a loop
311 (Zero [s] {t} ptr mem)
312 && (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 =>
313 (LoweredZero [t.Alignment()]
314 ptr
315 (ADDVconst <ptr.Type> ptr [s-moveSize(t.Alignment(), config)])
316 mem)
317
318 // moves
319 (Move [0] _ _ mem) => mem
320 (Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
321 (Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
322 (MOVHstore dst (MOVHload src mem) mem)
323 (Move [2] dst src mem) =>
324 (MOVBstore [1] dst (MOVBload [1] src mem)
325 (MOVBstore dst (MOVBload src mem) mem))
326 (Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
327 (MOVWstore dst (MOVWload src mem) mem)
328 (Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
329 (MOVHstore [2] dst (MOVHload [2] src mem)
330 (MOVHstore dst (MOVHload src mem) mem))
331 (Move [4] dst src mem) =>
332 (MOVBstore [3] dst (MOVBload [3] src mem)
333 (MOVBstore [2] dst (MOVBload [2] src mem)
334 (MOVBstore [1] dst (MOVBload [1] src mem)
335 (MOVBstore dst (MOVBload src mem) mem))))
336 (Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
337 (MOVVstore dst (MOVVload src mem) mem)
338 (Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
339 (MOVWstore [4] dst (MOVWload [4] src mem)
340 (MOVWstore dst (MOVWload src mem) mem))
341 (Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
342 (MOVHstore [6] dst (MOVHload [6] src mem)
343 (MOVHstore [4] dst (MOVHload [4] src mem)
344 (MOVHstore [2] dst (MOVHload [2] src mem)
345 (MOVHstore dst (MOVHload src mem) mem))))
346
347 (Move [3] dst src mem) =>
348 (MOVBstore [2] dst (MOVBload [2] src mem)
349 (MOVBstore [1] dst (MOVBload [1] src mem)
350 (MOVBstore dst (MOVBload src mem) mem)))
351 (Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
352 (MOVHstore [4] dst (MOVHload [4] src mem)
353 (MOVHstore [2] dst (MOVHload [2] src mem)
354 (MOVHstore dst (MOVHload src mem) mem)))
355 (Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
356 (MOVWstore [8] dst (MOVWload [8] src mem)
357 (MOVWstore [4] dst (MOVWload [4] src mem)
358 (MOVWstore dst (MOVWload src mem) mem)))
359 (Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
360 (MOVVstore [8] dst (MOVVload [8] src mem)
361 (MOVVstore dst (MOVVload src mem) mem))
362 (Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
363 (MOVVstore [16] dst (MOVVload [16] src mem)
364 (MOVVstore [8] dst (MOVVload [8] src mem)
365 (MOVVstore dst (MOVVload src mem) mem)))
366
367 // medium move uses a duff device
368 (Move [s] {t} dst src mem)
369 && s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0
370 && !config.noDuffDevice && logLargeCopy(v, s) =>
371 (DUFFCOPY [16 * (128 - s/8)] dst src mem)
372 // 16 and 128 are magic constants. 16 is the number of bytes to encode:
373 // MOVV (R1), R23
374 // ADDV $8, R1
375 // MOVV R23, (R2)
376 // ADDV $8, R2
377 // and 128 is the number of such blocks. See runtime/duff_mips64.s:duffcopy.
378
379 // large or unaligned move uses a loop
380 (Move [s] {t} dst src mem)
381 && s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 =>
382 (LoweredMove [t.Alignment()]
383 dst
384 src
385 (ADDVconst <src.Type> src [s-moveSize(t.Alignment(), config)])
386 mem)
387
388 // calls
389 (StaticCall ...) => (CALLstatic ...)
390 (ClosureCall ...) => (CALLclosure ...)
391 (InterCall ...) => (CALLinter ...)
392 (TailCall ...) => (CALLtail ...)
393
394 // atomic intrinsics
395 (AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...)
396 (AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
397
398 (AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...)
399 (AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
400
401 (AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
402
403 (AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
404
405 (AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
406 (AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
407
408 // checks
409 (NilCheck ...) => (LoweredNilCheck ...)
410 (IsNonNil ptr) => (SGTU ptr (MOVVconst [0]))
411 (IsInBounds idx len) => (SGTU len idx)
412 (IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len))
413
414 // pseudo-ops
415 (GetClosurePtr ...) => (LoweredGetClosurePtr ...)
416 (GetCallerSP ...) => (LoweredGetCallerSP ...)
417 (GetCallerPC ...) => (LoweredGetCallerPC ...)
418
419 (If cond yes no) => (NE (MOVBUreg <typ.UInt64> cond) yes no)
420
421 // Write barrier.
422 (WB ...) => (LoweredWB ...)
423
424 (PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
425 (PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
426 (PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
427
428 (CondSelect <t> x y cond) => (OR (MASKEQZ <t> x cond) (MASKNEZ <t> y cond))
429
430 // Optimizations
431
432 // Absorb boolean tests into block
433 (NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
434 (NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
435 (EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
436 (EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
437 (NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no)
438 (NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no)
439 (NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no)
440 (NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no)
441 (EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no)
442 (EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no)
443 (EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no)
444 (EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no)
445 (NE (SGTUconst [1] x) yes no) => (EQ x yes no)
446 (EQ (SGTUconst [1] x) yes no) => (NE x yes no)
447 (NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no)
448 (EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no)
449 (NE (SGTconst [0] x) yes no) => (LTZ x yes no)
450 (EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
451 (NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no)
452 (EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
453 (MOVBUreg x:((SGT|SGTU) _ _)) => x
454
455 // fold offset into address
456 (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
457
458 // fold address into load/store
459 // Do not fold global variable access in -dynlink mode, where it will be rewritten
460 // to use the GOT via REGTMP, which currently cannot handle large offset.
461 (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
462 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
463 (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {sym} ptr mem)
464
465 (MOV(B|H|W|V|F|D)store [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
466 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
467 (MOV(B|H|W|V|F|D)store [off1+int32(off2)] {sym} ptr val mem)
468
469 (MOV(B|H|W|V)storezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
470 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
471 (MOV(B|H|W|V)storezero [off1+int32(off2)] {sym} ptr mem)
472
473 (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
474 && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
475 (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
476
477 (MOV(B|H|W|V|F|D)store [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
478 && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
479 (MOV(B|H|W|V|F|D)store [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
480
481 (MOV(B|H|W|V)storezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
482 && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
483 (MOV(B|H|W|V)storezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
484
485 (LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem)
486 (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem)
487 (LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst64 [c] ptr mem)
488
489 // don't extend after proper load
490 (MOVBreg x:(MOVBload _ _)) => (MOVVreg x)
491 (MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x)
492 (MOVHreg x:(MOVBload _ _)) => (MOVVreg x)
493 (MOVHreg x:(MOVBUload _ _)) => (MOVVreg x)
494 (MOVHreg x:(MOVHload _ _)) => (MOVVreg x)
495 (MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x)
496 (MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x)
497 (MOVWreg x:(MOVBload _ _)) => (MOVVreg x)
498 (MOVWreg x:(MOVBUload _ _)) => (MOVVreg x)
499 (MOVWreg x:(MOVHload _ _)) => (MOVVreg x)
500 (MOVWreg x:(MOVHUload _ _)) => (MOVVreg x)
501 (MOVWreg x:(MOVWload _ _)) => (MOVVreg x)
502 (MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x)
503 (MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x)
504 (MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x)
505
506 // fold double extensions
507 (MOVBreg x:(MOVBreg _)) => (MOVVreg x)
508 (MOVBUreg x:(MOVBUreg _)) => (MOVVreg x)
509 (MOVHreg x:(MOVBreg _)) => (MOVVreg x)
510 (MOVHreg x:(MOVBUreg _)) => (MOVVreg x)
511 (MOVHreg x:(MOVHreg _)) => (MOVVreg x)
512 (MOVHUreg x:(MOVBUreg _)) => (MOVVreg x)
513 (MOVHUreg x:(MOVHUreg _)) => (MOVVreg x)
514 (MOVWreg x:(MOVBreg _)) => (MOVVreg x)
515 (MOVWreg x:(MOVBUreg _)) => (MOVVreg x)
516 (MOVWreg x:(MOVHreg _)) => (MOVVreg x)
517 (MOVWreg x:(MOVWreg _)) => (MOVVreg x)
518 (MOVWUreg x:(MOVBUreg _)) => (MOVVreg x)
519 (MOVWUreg x:(MOVHUreg _)) => (MOVVreg x)
520 (MOVWUreg x:(MOVWUreg _)) => (MOVVreg x)
521
522 // don't extend before store
523 (MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
524 (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
525 (MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
526 (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
527 (MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
528 (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
529 (MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
530 (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
531 (MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
532 (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
533 (MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
534 (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
535
536 // if a register move has only 1 use, just use the same register without emitting instruction
537 // MOVVnop doesn't emit instruction, only for ensuring the type.
538 (MOVVreg x) && x.Uses == 1 => (MOVVnop x)
539
540 // fold constant into arithmetic ops
541 (ADDV x (MOVVconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDVconst [c] x)
542 (SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x)
543 (AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x)
544 (OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x)
545 (XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x)
546 (NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x)
547
548 (SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
549 (SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
550 (SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63])
551 (SLLV x (MOVVconst [c])) => (SLLVconst x [c])
552 (SRLV x (MOVVconst [c])) => (SRLVconst x [c])
553 (SRAV x (MOVVconst [c])) => (SRAVconst x [c])
554 (ROTR x (MOVVconst [c])) => (ROTRconst x [c&31])
555 (ROTRV x (MOVVconst [c])) => (ROTRVconst x [c&63])
556
557 (SGT (MOVVconst [c]) x) && is32Bit(c) => (SGTconst [c] x)
558 (SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x)
559
560 // mul by constant
561 (MULV x (MOVVconst [-1])) => (NEGV x)
562 (MULV _ (MOVVconst [0])) => (MOVVconst [0])
563 (MULV x (MOVVconst [1])) => x
564 (MULV x (MOVVconst [c])) && isPowerOfTwo64(c) => (SLLVconst [log64(c)] x)
565
566 // div by constant
567 (DIVVU x (MOVVconst [1])) => x
568 (DIVVU x (MOVVconst [c])) && isPowerOfTwo64(c) => (SRLVconst [log64(c)] x)
569 (REMVU _ (MOVVconst [1])) => (MOVVconst [0]) // mod
570 (REMVU x (MOVVconst [c])) && isPowerOfTwo64(c) => (ANDconst [c-1] x) // mod
571
572 // generic simplifications
573 (ADDV x (NEGV y)) => (SUBV x y)
574 (SUBV x x) => (MOVVconst [0])
575 (SUBV (MOVVconst [0]) x) => (NEGV x)
576 (AND x x) => x
577 (OR x x) => x
578 (XOR x x) => (MOVVconst [0])
579
580 // remove redundant *const ops
581 (ADDVconst [0] x) => x
582 (SUBVconst [0] x) => x
583 (ANDconst [0] _) => (MOVVconst [0])
584 (ANDconst [-1] x) => x
585 (ORconst [0] x) => x
586 (ORconst [-1] _) => (MOVVconst [-1])
587 (XORconst [0] x) => x
588 (XORconst [-1] x) => (NORconst [0] x)
589 (MASKEQZ (MOVVconst [0]) cond) => (MOVVconst [0])
590 (MASKNEZ (MOVVconst [0]) cond) => (MOVVconst [0])
591 (MASKEQZ x (MOVVconst [c])) && c == 0 => (MOVVconst [0])
592 (MASKEQZ x (MOVVconst [c])) && c != 0 => x
593
594 // generic constant folding
595 (ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d])
596 (ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x)
597 (ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x)
598 (SUBVconst [c] (MOVVconst [d])) => (MOVVconst [d-c])
599 (SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x)
600 (SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x)
601 (SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d<<uint64(c)])
602 (SRLVconst [c] (MOVVconst [d])) => (MOVVconst [int64(uint64(d)>>uint64(c))])
603 (SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)])
604 (MULV (MOVVconst [c]) (MOVVconst [d])) => (MOVVconst [c*d])
605 (DIVV (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [c/d])
606 (DIVVU (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [int64(uint64(c)/uint64(d))])
607 (REMV (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [c%d]) // mod
608 (REMVU (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod
609 (ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d])
610 (ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
611 (ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d])
612 (ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x)
613 (XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d])
614 (XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x)
615 (NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)])
616 (NEGV (MOVVconst [c])) => (MOVVconst [-c])
617 (MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))])
618 (MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))])
619 (MOVHreg (MOVVconst [c])) => (MOVVconst [int64(int16(c))])
620 (MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))])
621 (MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))])
622 (MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))])
623 (MOVVreg (MOVVconst [c])) => (MOVVconst [c])
624
625 // constant comparisons
626 (SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1])
627 (SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0])
628 (SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1])
629 (SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0])
630
631 // other known comparisons
632 (SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1])
633 (SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0])
634 (SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1])
635 (SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0])
636 (SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1])
637 (SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1])
638 (SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0])
639 (SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1])
640 (SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0])
641 (SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1])
642 (SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0])
643 (SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1])
644 (SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1])
645 (SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
646 (SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
647
648 // absorb constants into branches
649 (EQ (MOVVconst [0]) yes no) => (First yes no)
650 (EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes)
651 (NE (MOVVconst [0]) yes no) => (First no yes)
652 (NE (MOVVconst [c]) yes no) && c != 0 => (First yes no)
653 (LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no)
654 (LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes)
655 (LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no)
656 (LEZ (MOVVconst [c]) yes no) && c > 0 => (First no yes)
657 (GTZ (MOVVconst [c]) yes no) && c > 0 => (First yes no)
658 (GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes)
659 (GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no)
660 (GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes)
661
662 // SGT/SGTU with known outcomes.
663 (SGT x x) => (MOVVconst [0])
664 (SGTU x x) => (MOVVconst [0])
665
View as plain text