1 // Copyright 2022 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 (Add(Ptr|64|32|16|8) ...) => (ADDV ...)
6 (Add(32|64)F ...) => (ADD(F|D) ...)
7
8 (Sub(Ptr|64|32|16|8) ...) => (SUBV ...)
9 (Sub(32|64)F ...) => (SUB(F|D) ...)
10
11 (Mul(64|32|16|8) ...) => (MULV ...)
12 (Mul(32|64)F ...) => (MUL(F|D) ...)
13 (Select0 (Mul64uhilo x y)) => (MULHVU x y)
14 (Select1 (Mul64uhilo x y)) => (MULV x y)
15 (Select0 (Mul64uover x y)) => (MULV x y)
16 (Select1 (Mul64uover x y)) => (SGTU <typ.Bool> (MULHVU x y) (MOVVconst <typ.UInt64> [0]))
17
18 (Hmul64 ...) => (MULHV ...)
19 (Hmul64u ...) => (MULHVU ...)
20 (Hmul32 ...) => (MULH ...)
21 (Hmul32u ...) => (MULHU ...)
22
23 (Div64 x y) => (DIVV x y)
24 (Div64u ...) => (DIVVU ...)
25 (Div32 x y) => (DIVV (SignExt32to64 x) (SignExt32to64 y))
26 (Div32u x y) => (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))
27 (Div16 x y) => (DIVV (SignExt16to64 x) (SignExt16to64 y))
28 (Div16u x y) => (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))
29 (Div8 x y) => (DIVV (SignExt8to64 x) (SignExt8to64 y))
30 (Div8u x y) => (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))
31 (Div(32|64)F ...) => (DIV(F|D) ...)
32
33 (Mod64 x y) => (REMV x y)
34 (Mod64u ...) => (REMVU ...)
35 (Mod32 x y) => (REMV (SignExt32to64 x) (SignExt32to64 y))
36 (Mod32u x y) => (REMVU (ZeroExt32to64 x) (ZeroExt32to64 y))
37 (Mod16 x y) => (REMV (SignExt16to64 x) (SignExt16to64 y))
38 (Mod16u x y) => (REMVU (ZeroExt16to64 x) (ZeroExt16to64 y))
39 (Mod8 x y) => (REMV (SignExt8to64 x) (SignExt8to64 y))
40 (Mod8u x y) => (REMVU (ZeroExt8to64 x) (ZeroExt8to64 y))
41
42 (Select0 <t> (Add64carry x y c)) => (ADDV (ADDV <t> x y) c)
43 (Select1 <t> (Add64carry x y c)) =>
44 (OR (SGTU <t> x s:(ADDV <t> x y)) (SGTU <t> s (ADDV <t> s c)))
45
46 (Select0 <t> (Sub64borrow x y c)) => (SUBV (SUBV <t> x y) c)
47 (Select1 <t> (Sub64borrow x y c)) =>
48 (OR (SGTU <t> s:(SUBV <t> x y) x) (SGTU <t> (SUBV <t> s c) s))
49
50 // (x + y) / 2 with x>=y => (x - y) / 2 + y
51 (Avg64u <t> x y) => (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
52
53 (And(64|32|16|8) ...) => (AND ...)
54 (Or(64|32|16|8) ...) => (OR ...)
55 (Xor(64|32|16|8) ...) => (XOR ...)
56
57 // shifts
58 // hardware instruction uses only the low 6 bits of the shift
59 // we compare to 64 to ensure Go semantics for large shifts
60
61 // left shift
62 (Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLLV x y)
63 (Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
64 (Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLLV x y)
65 (Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLLV x y)
66
67 (Lsh64x64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
68 (Lsh64x32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
69 (Lsh64x16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
70 (Lsh64x8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
71
72 (Lsh32x64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLL <t> x y) (SGTU (MOVVconst <typ.UInt64> [32]) y))
73 (Lsh32x32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLL <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt32to64 y)))
74 (Lsh32x16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLL <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt16to64 y)))
75 (Lsh32x8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLL <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt8to64 y)))
76
77 (Lsh16x64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
78 (Lsh16x32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
79 (Lsh16x16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
80 (Lsh16x8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
81
82 (Lsh8x64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
83 (Lsh8x32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
84 (Lsh8x16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
85 (Lsh8x8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
86
87 // unsigned right shift
88 (Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLV x y)
89 (Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y)
90 (Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLV (ZeroExt16to64 x) y)
91 (Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLV (ZeroExt8to64 x) y)
92
93 (Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
94 (Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
95 (Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
96 (Rsh64Ux8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
97
98 (Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRL <t> x y) (SGTU (MOVVconst <typ.UInt64> [32]) y))
99 (Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRL <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt32to64 y)))
100 (Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRL <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt16to64 y)))
101 (Rsh32Ux8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRL <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt8to64 y)))
102
103 (Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
104 (Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
105 (Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
106 (Rsh16Ux8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
107
108 (Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
109 (Rsh8Ux32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
110 (Rsh8Ux16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
111 (Rsh8Ux8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
112
113 // signed right shift
114 (Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAV x y)
115 (Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y)
116 (Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAV (SignExt16to64 x) y)
117 (Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAV (SignExt8to64 x) y)
118
119 (Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
120 (Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
121 (Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
122 (Rsh64x8 <t> x y) && !shiftIsBounded(v) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
123
124 (Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRA x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [31]))) y))
125 (Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt32to64 y)))
126 (Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt16to64 y)))
127 (Rsh32x8 <t> x y) && !shiftIsBounded(v) => (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt8to64 y)))
128
129 (Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
130 (Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
131 (Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
132 (Rsh16x8 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
133
134 (Rsh8x64 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
135 (Rsh8x32 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
136 (Rsh8x16 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
137 (Rsh8x8 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
138
139
140 // revb2h
141 // ((x>>8) | (x<<8)) => (REVB2H x), the type of x is uint16
142 ((OR|XOR|ADDV) <typ.UInt16> (SRLVconst [8] <typ.UInt16> x) (SLLVconst [8] <typ.UInt16> x)) => (REVB2H x)
143
144 // ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), the type of x is uint32
145 ((OR|XOR|ADDV) (SRLconst [8] (ANDconst [c1] x)) (SLLconst [8] (ANDconst [c2] x)))
146 && uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff
147 => (REVB2H x)
148
149 // revb4h
150 // ((x & 0xff00ff00ff00ff00)>>8) | ((x & 0x00ff00ff00ff00ff)<<8), the type of x is uint64
151 ((OR|XOR|ADDV) (SRLVconst [8] (AND (MOVVconst [c1]) x)) (SLLVconst [8] (AND (MOVVconst [c2]) x)))
152 && uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff
153 => (REVB4H x)
154
155 // ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), the type of x is uint64
156 ((OR|XOR|ADDV) (SRLVconst [8] (AND (MOVVconst [c1]) x)) (SLLVconst [8] (ANDconst [c2] x)))
157 && uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff
158 => (REVB4H (ANDconst <x.Type> [0xffffffff] x))
159
160 // bitfield ops
161
162 // bstrpickv
163 // (x << lc) >> rc
164 (SRLVconst [rc] (SLLVconst [lc] x)) && lc <= rc => (BSTRPICKV [rc-lc + ((64-lc)-1)<<6] x)
165 // uint64(x) >> rc
166 (SRLVconst [rc] (MOVWUreg x)) && rc < 32 => (BSTRPICKV [rc + 31<<6] x)
167 (SRLVconst [rc] (MOVHUreg x)) && rc < 16 => (BSTRPICKV [rc + 15<<6] x)
168 (SRLVconst [rc] (MOVBUreg x)) && rc < 8 => (BSTRPICKV [rc + 7<<6] x)
169 // uint64(x >> rc)
170 (MOVWUreg (SRLVconst [rc] x)) && rc < 32 => (BSTRPICKV [rc + (31+rc)<<6] x)
171 (MOVHUreg (SRLVconst [rc] x)) && rc < 16 => (BSTRPICKV [rc + (15+rc)<<6] x)
172 (MOVBUreg (SRLVconst [rc] x)) && rc < 8 => (BSTRPICKV [rc + (7+rc)<<6] x)
173
174 // rotates
175 (RotateLeft8 <t> x (MOVVconst [c])) => (Or8 (Lsh8x64 <t> x (MOVVconst [c&7])) (Rsh8Ux64 <t> x (MOVVconst [-c&7])))
176 (RotateLeft8 <t> x y) => (OR <t> (SLLV <t> x (ANDconst <typ.Int64> [7] y)) (SRLV <t> (ZeroExt8to64 x) (ANDconst <typ.Int64> [7] (NEGV <typ.Int64> y))))
177 (RotateLeft16 <t> x (MOVVconst [c])) => (Or16 (Lsh16x64 <t> x (MOVVconst [c&15])) (Rsh16Ux64 <t> x (MOVVconst [-c&15])))
178 (RotateLeft16 <t> x y) => (ROTR <t> (OR <typ.UInt32> (ZeroExt16to32 x) (SLLVconst <t> (ZeroExt16to32 x) [16])) (NEGV <typ.Int64> y))
179 (RotateLeft32 x y) => (ROTR x (NEGV <y.Type> y))
180 (RotateLeft64 x y) => (ROTRV x (NEGV <y.Type> y))
181
182 // unary ops
183 (Neg(64|32|16|8) ...) => (NEGV ...)
184 (Neg(32|64)F ...) => (NEG(F|D) ...)
185
186 (Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x)
187
188 (BitLen64 <t> x) => (NEGV <t> (SUBVconst <t> [64] (CLZV <t> x)))
189 (BitLen32 <t> x) => (NEGV <t> (SUBVconst <t> [32] (CLZW <t> x)))
190 (BitLen(16|8) x) => (BitLen64 (ZeroExt(16|8)to64 x))
191 (Bswap(16|32|64) ...) => (REVB(2H|2W|V) ...)
192 (BitRev8 ...) => (BITREV4B ...)
193 (BitRev16 <t> x) => (REVB2H (BITREV4B <t> x))
194 (BitRev32 ...) => (BITREVW ...)
195 (BitRev64 ...) => (BITREVV ...)
196 (Ctz(64|32|16|8)NonZero ...) => (Ctz64 ...)
197 (Ctz(32|64) ...) => (CTZ(W|V) ...)
198 (Ctz16 x) => (CTZV (OR <typ.UInt64> x (MOVVconst [1<<16])))
199 (Ctz8 x) => (CTZV (OR <typ.UInt64> x (MOVVconst [1<<8])))
200
201 (PopCount64 <t> x) => (MOVVfpgp <t> (VPCNT64 <typ.Float64> (MOVVgpfp <typ.Float64> x)))
202 (PopCount32 <t> x) => (MOVWfpgp <t> (VPCNT32 <typ.Float32> (MOVWgpfp <typ.Float32> x)))
203 (PopCount16 <t> x) => (MOVWfpgp <t> (VPCNT16 <typ.Float32> (MOVWgpfp <typ.Float32> (ZeroExt16to32 x))))
204
205 // math package intrinsics
206 (Sqrt ...) => (SQRTD ...)
207 (Sqrt32 ...) => (SQRTF ...)
208 (Abs ...) => (ABSD ...)
209 (Copysign ...) => (FCOPYSGD ...)
210
211 (Min(64|32)F ...) => (FMIN(D|F) ...)
212 (Max(64|32)F ...) => (FMAX(D|F) ...)
213
214 // boolean ops -- booleans are represented with 0=false, 1=true
215 (AndB ...) => (AND ...)
216 (OrB ...) => (OR ...)
217 (EqB x y) => (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
218 (NeqB ...) => (XOR ...)
219 (Not x) => (XORconst [1] x)
220
221 // constants
222 (Const(64|32|16|8) [val]) => (MOVVconst [int64(val)])
223 (Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
224 (ConstNil) => (MOVVconst [0])
225 (ConstBool [t]) => (MOVVconst [int64(b2i(t))])
226
227 (Slicemask <t> x) => (SRAVconst (NEGV <t> x) [63])
228
229 // truncations
230 // Because we ignore high parts of registers, truncates are just copies.
231 (Trunc16to8 ...) => (Copy ...)
232 (Trunc32to8 ...) => (Copy ...)
233 (Trunc32to16 ...) => (Copy ...)
234 (Trunc64to8 ...) => (Copy ...)
235 (Trunc64to16 ...) => (Copy ...)
236 (Trunc64to32 ...) => (Copy ...)
237
238 // Zero-/Sign-extensions
239 (ZeroExt8to16 ...) => (MOVBUreg ...)
240 (ZeroExt8to32 ...) => (MOVBUreg ...)
241 (ZeroExt16to32 ...) => (MOVHUreg ...)
242 (ZeroExt8to64 ...) => (MOVBUreg ...)
243 (ZeroExt16to64 ...) => (MOVHUreg ...)
244 (ZeroExt32to64 ...) => (MOVWUreg ...)
245
246 (SignExt8to16 ...) => (MOVBreg ...)
247 (SignExt8to32 ...) => (MOVBreg ...)
248 (SignExt16to32 ...) => (MOVHreg ...)
249 (SignExt8to64 ...) => (MOVBreg ...)
250 (SignExt16to64 ...) => (MOVHreg ...)
251 (SignExt32to64 ...) => (MOVWreg ...)
252
253 // float <=> int conversion
254 (Cvt32to32F ...) => (MOVWF ...)
255 (Cvt32to64F ...) => (MOVWD ...)
256 (Cvt64to32F ...) => (MOVVF ...)
257 (Cvt64to64F ...) => (MOVVD ...)
258 (Cvt32Fto32 ...) => (TRUNCFW ...)
259 (Cvt64Fto32 ...) => (TRUNCDW ...)
260 (Cvt32Fto64 ...) => (TRUNCFV ...)
261 (Cvt64Fto64 ...) => (TRUNCDV ...)
262 (Cvt32Fto64F ...) => (MOVFD ...)
263 (Cvt64Fto32F ...) => (MOVDF ...)
264
265 (CvtBoolToUint8 ...) => (Copy ...)
266
267 (Round(32|64)F ...) => (LoweredRound(32|64)F ...)
268
269 // comparisons
270 (Eq8 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
271 (Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
272 (Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
273 (Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y))
274 (EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y))
275 (Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y))
276
277 (Neq8 x y) => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
278 (Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
279 (Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
280 (Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0]))
281 (NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0]))
282 (Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y))
283
284 (Less8 x y) => (SGT (SignExt8to64 y) (SignExt8to64 x))
285 (Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x))
286 (Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x))
287 (Less64 x y) => (SGT y x)
288 (Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
289
290 (Less8U x y) => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
291 (Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
292 (Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
293 (Less64U x y) => (SGTU y x)
294
295 (Leq8 x y) => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
296 (Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
297 (Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
298 (Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y))
299 (Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
300
301 (Leq8U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
302 (Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
303 (Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
304 (Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y))
305
306 (OffPtr [off] ptr:(SP)) => (MOVVaddr [int32(off)] ptr)
307 (OffPtr [off] ptr) => (ADDVconst [off] ptr)
308
309 (Addr {sym} base) => (MOVVaddr {sym} base)
310 (LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVVaddr {sym} (SPanchored base mem))
311 (LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVVaddr {sym} base)
312
313 // loads
314 (Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
315 (Load <t> ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
316 (Load <t> ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
317 (Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
318 (Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
319 (Load <t> ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem)
320 (Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
321 (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem)
322 (Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
323 (Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
324
325 // stores
326 (Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
327 (Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
328 (Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
329 (Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVVstore ptr val mem)
330 (Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVFstore ptr val mem)
331 (Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVDstore ptr val mem)
332
333 // zeroing
334 (Zero [0] _ mem) => mem
335 (Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem)
336 (Zero [2] ptr mem) => (MOVHstore ptr (MOVVconst [0]) mem)
337 (Zero [3] ptr mem) =>
338 (MOVBstore [2] ptr (MOVVconst [0])
339 (MOVHstore ptr (MOVVconst [0]) mem))
340 (Zero [4] {t} ptr mem) => (MOVWstore ptr (MOVVconst [0]) mem)
341 (Zero [5] ptr mem) =>
342 (MOVBstore [4] ptr (MOVVconst [0])
343 (MOVWstore ptr (MOVVconst [0]) mem))
344 (Zero [6] ptr mem) =>
345 (MOVHstore [4] ptr (MOVVconst [0])
346 (MOVWstore ptr (MOVVconst [0]) mem))
347 (Zero [7] ptr mem) =>
348 (MOVWstore [3] ptr (MOVVconst [0])
349 (MOVWstore ptr (MOVVconst [0]) mem))
350 (Zero [8] {t} ptr mem) => (MOVVstore ptr (MOVVconst [0]) mem)
351 (Zero [9] ptr mem) =>
352 (MOVBstore [8] ptr (MOVVconst [0])
353 (MOVVstore ptr (MOVVconst [0]) mem))
354 (Zero [10] ptr mem) =>
355 (MOVHstore [8] ptr (MOVVconst [0])
356 (MOVVstore ptr (MOVVconst [0]) mem))
357 (Zero [11] ptr mem) =>
358 (MOVWstore [7] ptr (MOVVconst [0])
359 (MOVVstore ptr (MOVVconst [0]) mem))
360 (Zero [12] ptr mem) =>
361 (MOVWstore [8] ptr (MOVVconst [0])
362 (MOVVstore ptr (MOVVconst [0]) mem))
363 (Zero [13] ptr mem) =>
364 (MOVVstore [5] ptr (MOVVconst [0])
365 (MOVVstore ptr (MOVVconst [0]) mem))
366 (Zero [14] ptr mem) =>
367 (MOVVstore [6] ptr (MOVVconst [0])
368 (MOVVstore ptr (MOVVconst [0]) mem))
369 (Zero [15] ptr mem) =>
370 (MOVVstore [7] ptr (MOVVconst [0])
371 (MOVVstore ptr (MOVVconst [0]) mem))
372 (Zero [16] ptr mem) =>
373 (MOVVstore [8] ptr (MOVVconst [0])
374 (MOVVstore ptr (MOVVconst [0]) mem))
375
376 (Zero [s] ptr mem) && s > 16 && s < 192 => (LoweredZero [s] ptr mem)
377 (Zero [s] ptr mem) && s >= 192 => (LoweredZeroLoop [s] ptr mem)
378
379 // moves
380 (Move [0] _ _ mem) => mem
381 (Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
382 (Move [2] dst src mem) => (MOVHstore dst (MOVHUload src mem) mem)
383 (Move [3] dst src mem) =>
384 (MOVBstore [2] dst (MOVBUload [2] src mem)
385 (MOVHstore dst (MOVHUload src mem) mem))
386 (Move [4] dst src mem) => (MOVWstore dst (MOVWUload src mem) mem)
387 (Move [5] dst src mem) =>
388 (MOVBstore [4] dst (MOVBUload [4] src mem)
389 (MOVWstore dst (MOVWUload src mem) mem))
390 (Move [6] dst src mem) =>
391 (MOVHstore [4] dst (MOVHUload [4] src mem)
392 (MOVWstore dst (MOVWUload src mem) mem))
393 (Move [7] dst src mem) =>
394 (MOVWstore [3] dst (MOVWUload [3] src mem)
395 (MOVWstore dst (MOVWUload src mem) mem))
396 (Move [8] dst src mem) => (MOVVstore dst (MOVVload src mem) mem)
397 (Move [9] dst src mem) =>
398 (MOVBstore [8] dst (MOVBUload [8] src mem)
399 (MOVVstore dst (MOVVload src mem) mem))
400 (Move [10] dst src mem) =>
401 (MOVHstore [8] dst (MOVHUload [8] src mem)
402 (MOVVstore dst (MOVVload src mem) mem))
403 (Move [11] dst src mem) =>
404 (MOVWstore [7] dst (MOVWload [7] src mem)
405 (MOVVstore dst (MOVVload src mem) mem))
406 (Move [12] dst src mem) =>
407 (MOVWstore [8] dst (MOVWUload [8] src mem)
408 (MOVVstore dst (MOVVload src mem) mem))
409 (Move [13] dst src mem) =>
410 (MOVVstore [5] dst (MOVVload [5] src mem)
411 (MOVVstore dst (MOVVload src mem) mem))
412 (Move [14] dst src mem) =>
413 (MOVVstore [6] dst (MOVVload [6] src mem)
414 (MOVVstore dst (MOVVload src mem) mem))
415 (Move [15] dst src mem) =>
416 (MOVVstore [7] dst (MOVVload [7] src mem)
417 (MOVVstore dst (MOVVload src mem) mem))
418 (Move [16] dst src mem) =>
419 (MOVVstore [8] dst (MOVVload [8] src mem)
420 (MOVVstore dst (MOVVload src mem) mem))
421
422 (Move [s] dst src mem) && s > 16 && s < 192 && logLargeCopy(v, s) => (LoweredMove [s] dst src mem)
423 (Move [s] dst src mem) && s >= 192 && logLargeCopy(v, s) => (LoweredMoveLoop [s] dst src mem)
424
425 // float <=> int register moves, with no conversion.
426 // These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
427 (MOVVload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) => (MOVVfpgp val)
428 (MOVDload [off] {sym} ptr (MOVVstore [off] {sym} ptr val _)) => (MOVVgpfp val)
429 (MOVWUload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _)) => (ZeroExt32to64 (MOVWfpgp <typ.Float32> val))
430 (MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (MOVWgpfp val)
431
432 // If the memory load and store operations use the same ptr, they are combined into a direct move operation between registers.
433 (MOV(V|W|H|B)load [off] {sym} ptr (MOV(V|W|H|B)store [off] {sym} ptr x _)) => (MOV(V|W|H|B)reg x)
434 (MOV(W|H|B)Uload [off] {sym} ptr (MOV(W|H|B)store [off] {sym} ptr x _)) => (MOV(W|H|B)Ureg x)
435
436 // Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
437 (MOVVstore [off] {sym} ptr (MOVVfpgp val) mem) => (MOVDstore [off] {sym} ptr val mem)
438 (MOVDstore [off] {sym} ptr (MOVVgpfp val) mem) => (MOVVstore [off] {sym} ptr val mem)
439 (MOVWstore [off] {sym} ptr (MOVWfpgp val) mem) => (MOVFstore [off] {sym} ptr val mem)
440 (MOVFstore [off] {sym} ptr (MOVWgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem)
441
442 // calls
443 (StaticCall ...) => (CALLstatic ...)
444 (ClosureCall ...) => (CALLclosure ...)
445 (InterCall ...) => (CALLinter ...)
446 (TailCall ...) => (CALLtail ...)
447
448 // atomic intrinsics
449 (AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...)
450 (AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
451
452 (AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...)
453 (AtomicStore(8|32|64)Variant ...) => (LoweredAtomicStore(8|32|64)Variant ...)
454 (AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
455
456 (AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
457 (AtomicExchange8Variant ...) => (LoweredAtomicExchange8Variant ...)
458
459 (AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
460
461 // Loong64's 32-bit atomic operation instructions ll.w and amcasw are both sign-extended,
462 // so the input parameters need to be sign-extended to 64 bits, otherwise the subsequent
463 // comparison operations may not produce the expected results.
464 //
465 (AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
466 (AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
467 (AtomicCompareAndSwap32Variant ptr old new mem) => (LoweredAtomicCas32Variant ptr (SignExt32to64 old) new mem)
468 (AtomicCompareAndSwap64Variant ...) => (LoweredAtomicCas64Variant ...)
469
470 // Atomic memory logical operations (old style).
471 //
472 // AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8)))
473 // AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3, uint32(val) << ((ptr & 3) * 8))
474 //
475 (AtomicAnd8 ptr val mem) =>
476 (LoweredAtomicAnd32 (AND <typ.Uintptr> (MOVVconst [^3]) ptr)
477 (NORconst [0] <typ.UInt32> (SLLV <typ.UInt32> (XORconst <typ.UInt32> [0xff] (ZeroExt8to32 val))
478 (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr)))) mem)
479
480 (AtomicOr8 ptr val mem) =>
481 (LoweredAtomicOr32 (AND <typ.Uintptr> (MOVVconst [^3]) ptr)
482 (SLLV <typ.UInt32> (ZeroExt8to32 val)
483 (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr))) mem)
484
485 (AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
486 (AtomicOr32 ...) => (LoweredAtomicOr32 ...)
487
488 // Atomic memory logical operations (new style).
489 (AtomicAnd(64|32)value ...) => (LoweredAtomicAnd(64|32)value ...)
490 (AtomicOr(64|32)value ...) => (LoweredAtomicOr(64|32)value ...)
491
492 // checks
493 (NilCheck ...) => (LoweredNilCheck ...)
494 (IsNonNil ptr) => (SGTU ptr (MOVVconst [0]))
495 (IsInBounds idx len) => (SGTU len idx)
496 (IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len))
497
498 // pseudo-ops
499 (GetClosurePtr ...) => (LoweredGetClosurePtr ...)
500 (GetCallerSP ...) => (LoweredGetCallerSP ...)
501 (GetCallerPC ...) => (LoweredGetCallerPC ...)
502
503 (If cond yes no) => (NEZ (MOVBUreg <typ.UInt64> cond) yes no)
504 (MOVBUreg x:((SGT|SGTU) _ _)) => x
505 (MOVBUreg x:(XOR (MOVVconst [1]) ((SGT|SGTU) _ _))) => x
506
507 // Write barrier.
508 (WB ...) => (LoweredWB ...)
509
510 // Publication barrier as intrinsic
511 (PubBarrier ...) => (LoweredPubBarrier ...)
512
513 (PanicBounds ...) => (LoweredPanicBoundsRR ...)
514 (LoweredPanicBoundsRR [kind] x (MOVVconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
515 (LoweredPanicBoundsRR [kind] (MOVVconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
516 (LoweredPanicBoundsRC [kind] {p} (MOVVconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
517 (LoweredPanicBoundsCR [kind] {p} (MOVVconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
518
519 (CondSelect <t> x y cond) => (OR (MASKEQZ <t> x cond) (MASKNEZ <t> y cond))
520
521 // c > d-x => x > d-c
522 (SGT (MOVVconst [c]) (NEGV (SUBVconst [d] x))) && is32Bit(d-c) => (SGT x (MOVVconst [d-c]))
523
524 (SGT (MOVVconst [c]) x) && is32Bit(c) => (SGTconst [c] x)
525 (SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x)
526
527 // fold offset into address
528 (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
529
530 // fold address into load/store
531 // Do not fold global variable access in -dynlink mode, where it will be rewritten
532 // to use the GOT via REGTMP, which currently cannot handle large offset.
533 (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
534 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
535 (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {sym} ptr mem)
536
537 (MOV(B|H|W|V|F|D)store [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
538 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
539 (MOV(B|H|W|V|F|D)store [off1+int32(off2)] {sym} ptr val mem)
540
541 (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
542 && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
543 (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
544
545 (MOV(B|H|W|V|F|D)store [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
546 && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
547 (MOV(B|H|W|V|F|D)store [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
548
549 // don't extend after proper load
550 (MOVBreg x:(MOVBload _ _)) => (MOVVreg x)
551 (MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x)
552 (MOVHreg x:(MOVBload _ _)) => (MOVVreg x)
553 (MOVHreg x:(MOVBUload _ _)) => (MOVVreg x)
554 (MOVHreg x:(MOVHload _ _)) => (MOVVreg x)
555 (MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x)
556 (MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x)
557 (MOVWreg x:(MOVBload _ _)) => (MOVVreg x)
558 (MOVWreg x:(MOVBUload _ _)) => (MOVVreg x)
559 (MOVWreg x:(MOVHload _ _)) => (MOVVreg x)
560 (MOVWreg x:(MOVHUload _ _)) => (MOVVreg x)
561 (MOVWreg x:(MOVWload _ _)) => (MOVVreg x)
562 (MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x)
563 (MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x)
564 (MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x)
565 (MOVBreg x:(MOVBloadidx _ _ _)) => (MOVVreg x)
566 (MOVBUreg x:(MOVBUloadidx _ _ _)) => (MOVVreg x)
567 (MOVHreg x:(MOVBloadidx _ _ _)) => (MOVVreg x)
568 (MOVHreg x:(MOVBUloadidx _ _ _)) => (MOVVreg x)
569 (MOVHreg x:(MOVHloadidx _ _ _)) => (MOVVreg x)
570 (MOVHUreg x:(MOVBUloadidx _ _ _)) => (MOVVreg x)
571 (MOVHUreg x:(MOVHUloadidx _ _ _)) => (MOVVreg x)
572 (MOVWreg x:(MOVBloadidx _ _ _)) => (MOVVreg x)
573 (MOVWreg x:(MOVBUloadidx _ _ _)) => (MOVVreg x)
574 (MOVWreg x:(MOVHloadidx _ _ _)) => (MOVVreg x)
575 (MOVWreg x:(MOVHUloadidx _ _ _)) => (MOVVreg x)
576 (MOVWreg x:(MOVWloadidx _ _ _)) => (MOVVreg x)
577 (MOVWUreg x:(MOVBUloadidx _ _ _)) => (MOVVreg x)
578 (MOVWUreg x:(MOVHUloadidx _ _ _)) => (MOVVreg x)
579 (MOVWUreg x:(MOVWUloadidx _ _ _)) => (MOVVreg x)
580
581 // fold double extensions
582 (MOVBreg x:(MOVBreg _)) => (MOVVreg x)
583 (MOVBUreg x:(MOVBUreg _)) => (MOVVreg x)
584 (MOVHreg x:(MOVBreg _)) => (MOVVreg x)
585 (MOVHreg x:(MOVBUreg _)) => (MOVVreg x)
586 (MOVHreg x:(MOVHreg _)) => (MOVVreg x)
587 (MOVHUreg x:(MOVBUreg _)) => (MOVVreg x)
588 (MOVHUreg x:(MOVHUreg _)) => (MOVVreg x)
589 (MOVWreg x:(MOVBreg _)) => (MOVVreg x)
590 (MOVWreg x:(MOVBUreg _)) => (MOVVreg x)
591 (MOVWreg x:(MOVHreg _)) => (MOVVreg x)
592 (MOVWreg x:(MOVWreg _)) => (MOVVreg x)
593 (MOVWUreg x:(MOVBUreg _)) => (MOVVreg x)
594 (MOVWUreg x:(MOVHUreg _)) => (MOVVreg x)
595 (MOVWUreg x:(MOVWUreg _)) => (MOVVreg x)
596
597 // don't extend before store
598 (MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
599 (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
600 (MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
601 (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
602 (MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
603 (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
604 (MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
605 (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
606 (MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
607 (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
608 (MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
609 (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
610
611 // register indexed load
612 (MOVVload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVVloadidx ptr idx mem)
613 (MOVWUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr idx mem)
614 (MOVWload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr idx mem)
615 (MOVHUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr idx mem)
616 (MOVHload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr idx mem)
617 (MOVBUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr idx mem)
618 (MOVBload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr idx mem)
619 (MOVFload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVFloadidx ptr idx mem)
620 (MOVDload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr idx mem)
621 (MOVVloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVVload [int32(c)] ptr mem)
622 (MOVVloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVVload [int32(c)] ptr mem)
623 (MOVWUloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
624 (MOVWUloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
625 (MOVWloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
626 (MOVWloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
627 (MOVHUloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
628 (MOVHUloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
629 (MOVHloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
630 (MOVHloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
631 (MOVBUloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
632 (MOVBUloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
633 (MOVBloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
634 (MOVBloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
635 (MOVFloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVFload [int32(c)] ptr mem)
636 (MOVFloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVFload [int32(c)] ptr mem)
637 (MOVDloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
638 (MOVDloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
639
640 // register indexed store
641 (MOVVstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVVstoreidx ptr idx val mem)
642 (MOVWstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr idx val mem)
643 (MOVHstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr idx val mem)
644 (MOVBstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr idx val mem)
645 (MOVFstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVFstoreidx ptr idx val mem)
646 (MOVDstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr idx val mem)
647 (MOVVstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVVstore [int32(c)] ptr val mem)
648 (MOVVstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVVstore [int32(c)] idx val mem)
649 (MOVWstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVWstore [int32(c)] ptr val mem)
650 (MOVWstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVWstore [int32(c)] idx val mem)
651 (MOVHstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVHstore [int32(c)] ptr val mem)
652 (MOVHstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVHstore [int32(c)] idx val mem)
653 (MOVBstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVBstore [int32(c)] ptr val mem)
654 (MOVBstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVBstore [int32(c)] idx val mem)
655 (MOVFstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVFstore [int32(c)] ptr val mem)
656 (MOVFstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVFstore [int32(c)] idx val mem)
657 (MOVDstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVDstore [int32(c)] ptr val mem)
658 (MOVDstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVDstore [int32(c)] idx val mem)
659
660 // if a register move has only 1 use, just use the same register without emitting instruction
661 // MOVVnop doesn't emit instruction, only for ensuring the type.
662 (MOVVreg x) && x.Uses == 1 => (MOVVnop x)
663
664 // TODO: we should be able to get rid of MOVVnop all together.
665 // But for now, this is enough to get rid of lots of them.
666 (MOVVnop (MOVVconst [c])) => (MOVVconst [c])
667
668 // fold constant into arithmetic ops
669 (ADDV x (MOVVconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDVconst [c] x)
670 (SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x)
671 (AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x)
672 (OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x)
673 (XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x)
674 (NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x)
675
676 (SLL _ (MOVVconst [c])) && uint64(c)>=32 => (MOVVconst [0])
677 (SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
678 (SRL _ (MOVVconst [c])) && uint64(c)>=32 => (MOVVconst [0])
679 (SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
680 (SRA x (MOVVconst [c])) && uint64(c)>=32 => (SRAconst x [31])
681 (SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63])
682 (SLL x (MOVVconst [c])) && uint64(c) >=0 && uint64(c) <=31 => (SLLconst x [c])
683 (SLLV x (MOVVconst [c])) => (SLLVconst x [c])
684 (SRL x (MOVVconst [c])) && uint64(c) >=0 && uint64(c) <=31 => (SRLconst x [c])
685 (SRLV x (MOVVconst [c])) => (SRLVconst x [c])
686 (SRA x (MOVVconst [c])) && uint64(c) >=0 && uint64(c) <=31 => (SRAconst x [c])
687 (SRAV x (MOVVconst [c])) => (SRAVconst x [c])
688 (ROTR x (MOVVconst [c])) => (ROTRconst x [c&31])
689 (ROTRV x (MOVVconst [c])) => (ROTRVconst x [c&63])
690
691 // SLLV/SRLV/SRAV only considers the bottom 6 bits of y, similarly SLL/SRL/SRA only considers the
692 // bottom 5 bits of y.
693 (SLL x (ANDconst [31] y)) => (SLL x y)
694 (SRL x (ANDconst [31] y)) => (SRL x y)
695 (SRA x (ANDconst [31] y)) => (SRA x y)
696 (SLLV x (ANDconst [63] y)) => (SLLV x y)
697 (SRLV x (ANDconst [63] y)) => (SRLV x y)
698 (SRAV x (ANDconst [63] y)) => (SRAV x y)
699
700 // Avoid unnecessary zero and sign extension when right shifting.
701 (SRLVconst [rc] (MOVWUreg y)) && rc >= 0 && rc <= 31 => (SRLconst [int64(rc)] y)
702 (SRAVconst [rc] (MOVWreg y)) && rc >= 0 && rc <= 31 => (SRAconst [int64(rc)] y)
703
704 // Replace right shifts that exceed size of signed type.
705 (SRAVconst <t> [rc] (MOVBreg y)) && rc >= 8 => (SRAVconst [63] (SLLVconst <t> [56] y))
706 (SRAVconst <t> [rc] (MOVHreg y)) && rc >= 16 => (SRAVconst [63] (SLLVconst <t> [48] y))
707 (SRAVconst <t> [rc] (MOVWreg y)) && rc >= 32 => (SRAconst [31] y)
708
709 // If the shift amount is larger than the datasize(32, 16, 8), we can optimize to constant 0.
710 (MOVWUreg (SLLVconst [lc] x)) && lc >= 32 => (MOVVconst [0])
711 (MOVHUreg (SLLVconst [lc] x)) && lc >= 16 => (MOVVconst [0])
712 (MOVBUreg (SLLVconst [lc] x)) && lc >= 8 => (MOVVconst [0])
713
714 // After zero extension, the upper (64-datasize(32|16|8)) bits are zero, we can optimize to constant 0.
715 (SRLVconst [rc] (MOVWUreg x)) && rc >= 32 => (MOVVconst [0])
716 (SRLVconst [rc] (MOVHUreg x)) && rc >= 16 => (MOVVconst [0])
717 (SRLVconst [rc] (MOVBUreg x)) && rc >= 8 => (MOVVconst [0])
718
719 // (x + x) << c -> x << c+1
720 ((SLLV|SLL)const [c] (ADDV x x)) => ((SLLV|SLL)const [c+1] x)
721
722 // mul by constant
723 (MULV _ (MOVVconst [0])) => (MOVVconst [0])
724 (MULV x (MOVVconst [1])) => x
725
726 (MULV x (MOVVconst [c])) && canMulStrengthReduce(config, c) => {mulStrengthReduce(v, x, c)}
727
728 (MULV (NEGV x) (MOVVconst [c])) => (MULV x (MOVVconst [-c]))
729 (MULV (NEGV x) (NEGV y)) => (MULV x y)
730
731 (ADDV x0 x1:(SLLVconst [c] y)) && x1.Uses == 1 && c > 0 && c <= 4 => (ADDshiftLLV x0 y [c])
732
733 // fold constant in ADDshift op
734 (ADDshiftLLV x (MOVVconst [c]) [d]) && is12Bit(c<<d) => (ADDVconst x [c<<d])
735
736 // div by constant
737 (DIVVU x (MOVVconst [1])) => x
738 (DIVVU x (MOVVconst [c])) && isPowerOfTwo(c) => (SRLVconst [log64(c)] x)
739 (REMVU _ (MOVVconst [1])) => (MOVVconst [0]) // mod
740 (REMVU x (MOVVconst [c])) && isPowerOfTwo(c) => (ANDconst [c-1] x) // mod
741
742 // FMA
743 (FMA ...) => (FMADDD ...)
744 ((ADD|SUB)F (MULF x y) z) && z.Block.Func.useFMA(v) => (FM(ADD|SUB)F x y z)
745 ((ADD|SUB)D (MULD x y) z) && z.Block.Func.useFMA(v) => (FM(ADD|SUB)D x y z)
746 // z - xy -> -(xy - z)
747 (SUBF z (MULF x y)) && z.Block.Func.useFMA(v) => (FNMSUBF x y z)
748 (SUBD z (MULD x y)) && z.Block.Func.useFMA(v) => (FNMSUBD x y z)
749 // z + (-xy) -> -(xy - z)
750 // z - (-xy) -> xy + z
751 ((ADD|SUB)F z (NEGF (MULF x y))) && z.Block.Func.useFMA(v) => (F(NMSUB|MADD)F x y z)
752 ((ADD|SUB)D z (NEGD (MULD x y))) && z.Block.Func.useFMA(v) => (F(NMSUB|MADD)D x y z)
753 // -xy - z -> -(xy + z)
754 (SUBF (NEGF (MULF x y)) z) && z.Block.Func.useFMA(v) => (FNMADDF x y z)
755 (SUBD (NEGD (MULD x y)) z) && z.Block.Func.useFMA(v) => (FNMADDD x y z)
756
757 // generic simplifications
758 (ADDV x (NEGV y)) => (SUBV x y)
759 (SUBV x (NEGV y)) => (ADDV x y)
760 (SUBV x x) => (MOVVconst [0])
761 (SUBV (MOVVconst [0]) x) => (NEGV x)
762 (AND x x) => x
763 (OR x x) => x
764 (XOR x x) => (MOVVconst [0])
765 (ORN x (MOVVconst [-1])) => x
766 (AND x (NORconst [0] y)) => (ANDN x y)
767 (OR x (NORconst [0] y)) => (ORN x y)
768
769 // Fold negation into subtraction.
770 (NEGV (SUBV x y)) => (SUBV y x)
771 (NEGV <t> s:(ADDVconst [c] (SUBV x y))) && s.Uses == 1 && is12Bit(-c) => (ADDVconst [-c] (SUBV <t> y x))
772
773 // Double negation.
774 (NEGV (NEGV x)) => x
775 // Fold NEGV into ADDVconst. Take care to keep c in 12 bit range.
776 (NEGV <t> s:(ADDVconst [c] (NEGV x))) && s.Uses == 1 && is12Bit(-c) => (ADDVconst [-c] x)
777
778 // remove redundant *const ops
779 (ADDVconst [0] x) => x
780 (SUBVconst [0] x) => x
781 (ANDconst [0] _) => (MOVVconst [0])
782 (ANDconst [-1] x) => x
783 (ORconst [0] x) => x
784 (ORconst [-1] _) => (MOVVconst [-1])
785 (XORconst [0] x) => x
786 (XORconst [-1] x) => (NORconst [0] x)
787 (MASKEQZ (MOVVconst [0]) cond) => (MOVVconst [0])
788 (MASKNEZ (MOVVconst [0]) cond) => (MOVVconst [0])
789 (MASKEQZ x (MOVVconst [c])) && c == 0 => (MOVVconst [0])
790 (MASKEQZ x (MOVVconst [c])) && c != 0 => x
791
792 // generic constant folding
793 (ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d])
794 (ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x)
795 (ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x)
796 (SUBVconst [c] (MOVVconst [d])) => (MOVVconst [d-c])
797 (SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x)
798 (SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x)
799 (SUBV (MOVVconst [c]) (NEGV (SUBVconst [d] x))) => (ADDVconst [c-d] x)
800 (ADDVconst [c] x) && is32Bit(c) && c&0xffff == 0 && c != 0 => (ADDV16const [c] x)
801 (SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d<<uint64(c)])
802 (SRLVconst [c] (MOVVconst [d])) => (MOVVconst [int64(uint64(d)>>uint64(c))])
803 (SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)])
804 (MULV (MOVVconst [c]) (MOVVconst [d])) => (MOVVconst [c*d])
805 (DIVV (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [c/d])
806 (DIVVU (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [int64(uint64(c)/uint64(d))])
807 (REMV (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [c%d]) // mod
808 (REMVU (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod
809 (ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d])
810 (ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
811 (ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d])
812 (ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x)
813 (XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d])
814 (XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x)
815 (NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)])
816 (NEGV (MOVVconst [c])) => (MOVVconst [-c])
817 (MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))])
818 (MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))])
819 (MOVHreg (MOVVconst [c])) => (MOVVconst [int64(int16(c))])
820 (MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))])
821 (MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))])
822 (MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))])
823 (MOVVreg (MOVVconst [c])) => (MOVVconst [c])
824
825 (MOVBUreg (ANDconst [c] x)) => (ANDconst [c&0xff] x)
826
827 // Avoid extending when already sufficiently masked.
828 (MOVBreg x:(ANDconst [c] y)) && c >= 0 && int64(int8(c)) == c => x
829 (MOVHreg x:(ANDconst [c] y)) && c >= 0 && int64(int16(c)) == c => x
830 (MOVWreg x:(ANDconst [c] y)) && c >= 0 && int64(int32(c)) == c => x
831 (MOVBUreg x:(ANDconst [c] y)) && c >= 0 && int64(uint8(c)) == c => x
832 (MOVHUreg x:(ANDconst [c] y)) && c >= 0 && int64(uint16(c)) == c => x
833 (MOVWUreg x:(ANDconst [c] y)) && c >= 0 && int64(uint32(c)) == c => x
834
835 // Prefetch instructions (hint specified using aux field)
836 // For PRELD{,X} A value of hint indicates:
837 // hint=0 is defined as load prefetch to L1-cache
838 // hint=2 is defined as load prefetch to L3-cache
839 // The PrefetchCacheStreamed implementation prefetches 512 bytes of data
840 // into L3. The aux field are defined as follows:
841 // bit[4:0]:
842 // $hint parameter of PRELDX instruction
843 // bit[41:5]:
844 // $n parameter of PRELDX instruction, bit[0] of $n is the address
845 // sequence, bits[11:1] is the block size, bits[20:12] is the block
846 // num, bits[36:21] is the stride, for more details about $n, refer
847 // to src/cmd/internal/obj/loong64/doc.go
848 (PrefetchCache addr mem) => (PRELD addr mem [0])
849 (PrefetchCacheStreamed addr mem) => (PRELDX addr mem [(((512 << 1) + (1 << 12)) << 5) + 2])
850
851 // constant comparisons
852 (SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1])
853 (SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0])
854 (SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1])
855 (SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0])
856
857 // other known comparisons
858 (SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1])
859 (SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0])
860 (SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1])
861 (SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0])
862 (SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1])
863 (SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1])
864 (SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0])
865 (SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1])
866 (SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0])
867 (SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1])
868 (SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0])
869 (SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1])
870 (SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1])
871 (SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
872 (SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
873
874 // SGT/SGTU with known outcomes.
875 (SGT x x) => (MOVVconst [0])
876 (SGTU x x) => (MOVVconst [0])
877
878 // Optimizations
879
880 // Absorb boolean tests into block
881 (NEZ (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
882 (NEZ (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
883 (EQZ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
884 (EQZ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
885 (NEZ (XORconst [1] cmp:(SGT _ _)) yes no) => (EQZ cmp yes no)
886 (NEZ (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQZ cmp yes no)
887 (NEZ (XORconst [1] cmp:(SGTconst _)) yes no) => (EQZ cmp yes no)
888 (NEZ (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQZ cmp yes no)
889 (EQZ (XORconst [1] cmp:(SGT _ _)) yes no) => (NEZ cmp yes no)
890 (EQZ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NEZ cmp yes no)
891 (EQZ (XORconst [1] cmp:(SGTconst _)) yes no) => (NEZ cmp yes no)
892 (EQZ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NEZ cmp yes no)
893 (NEZ (SGTUconst [1] x) yes no) => (EQZ x yes no)
894 (EQZ (SGTUconst [1] x) yes no) => (NEZ x yes no)
895 (NEZ (SGTU x (MOVVconst [0])) yes no) => (NEZ x yes no)
896 (EQZ (SGTU x (MOVVconst [0])) yes no) => (EQZ x yes no)
897 (NEZ (SGTconst [0] x) yes no) => (LTZ x yes no)
898 (EQZ (SGTconst [0] x) yes no) => (GEZ x yes no)
899 (NEZ (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no)
900 (EQZ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
901
902 // Convert EQZ/NEZ into more optimal branch conditions.
903 (EQZ (SGTU (MOVVconst [c]) y) yes no) && c >= -2048 && c <= 2047 => (EQZ (SGTUconst [c] y) yes no)
904 (NEZ (SGTU (MOVVconst [c]) y) yes no) && c >= -2048 && c <= 2047 => (NEZ (SGTUconst [c] y) yes no)
905 (EQZ (SUBV x y) yes no) => (BEQ x y yes no)
906 (NEZ (SUBV x y) yes no) => (BNE x y yes no)
907 (EQZ (SGT x y) yes no) => (BGE y x yes no)
908 (NEZ (SGT x y) yes no) => (BLT y x yes no)
909 (EQZ (SGTU x y) yes no) => (BGEU y x yes no)
910 (NEZ (SGTU x y) yes no) => (BLTU y x yes no)
911 (EQZ (SGTconst [c] y) yes no) => (BGE y (MOVVconst [c]) yes no)
912 (NEZ (SGTconst [c] y) yes no) => (BLT y (MOVVconst [c]) yes no)
913 (EQZ (SGTUconst [c] y) yes no) => (BGEU y (MOVVconst [c]) yes no)
914 (NEZ (SGTUconst [c] y) yes no) => (BLTU y (MOVVconst [c]) yes no)
915
916 // absorb constants into branches
917 (EQZ (MOVVconst [0]) yes no) => (First yes no)
918 (EQZ (MOVVconst [c]) yes no) && c != 0 => (First no yes)
919 (NEZ (MOVVconst [0]) yes no) => (First no yes)
920 (NEZ (MOVVconst [c]) yes no) && c != 0 => (First yes no)
921 (LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no)
922 (LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes)
923 (LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no)
924 (LEZ (MOVVconst [c]) yes no) && c > 0 => (First no yes)
925 (GTZ (MOVVconst [c]) yes no) && c > 0 => (First yes no)
926 (GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes)
927 (GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no)
928 (GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes)
929
930 // absorb NEGV into branches
931 (EQZ (NEGV x) yes no) => (EQZ x yes no)
932 (NEZ (NEGV x) yes no) => (NEZ x yes no)
933
934 // Convert branch with zero to more optimal branch zero.
935 (BEQ (MOVVconst [0]) cond yes no) => (EQZ cond yes no)
936 (BEQ cond (MOVVconst [0]) yes no) => (EQZ cond yes no)
937 (BNE (MOVVconst [0]) cond yes no) => (NEZ cond yes no)
938 (BNE cond (MOVVconst [0]) yes no) => (NEZ cond yes no)
939 (BLT (MOVVconst [0]) cond yes no) => (GTZ cond yes no)
940 (BLT cond (MOVVconst [0]) yes no) => (LTZ cond yes no)
941 (BLTU (MOVVconst [0]) cond yes no) => (NEZ cond yes no)
942 (BGE (MOVVconst [0]) cond yes no) => (LEZ cond yes no)
943 (BGE cond (MOVVconst [0]) yes no) => (GEZ cond yes no)
944 (BGEU (MOVVconst [0]) cond yes no) => (EQZ cond yes no)
945
946 // Arch-specific inlining for small or disjoint runtime.memmove
947 // Match post-lowering calls, register version.
948 (SelectN [0] call:(CALLstatic {sym} dst src (MOVVconst [sz]) mem))
949 && sz >= 0
950 && isSameCall(sym, "runtime.memmove")
951 && call.Uses == 1
952 && isInlinableMemmove(dst, src, sz, config)
953 && clobber(call)
954 => (Move [sz] dst src mem)
955
956 // fold readonly sym load
957 (MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read8(sym, int64(off)))])
958 (MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
959 (MOVWUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
960 (MOVVload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
961 (MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(int8(read8(sym, int64(off))))])
962 (MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
963 (MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
964
View as plain text